xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 06358c25081129256abcc28a5821dd2ecca7e06d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 
12 #include "common/lib/ut_multithread.c"
13 
14 #include "bdev/nvme/bdev_nvme.c"
15 
16 #include "unit/lib/json_mock.c"
17 
18 #include "bdev/nvme/bdev_mdns_client.c"
19 
20 static void *g_accel_p = (void *)0xdeadbeaf;
21 
22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
23 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
24 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
25 	     spdk_nvme_remove_cb remove_cb), NULL);
26 
27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
28 		enum spdk_nvme_transport_type trtype));
29 
30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
31 	    NULL);
32 
33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
34 
35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
36 		struct spdk_nvme_transport_id *trid), 0);
37 
38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
39 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
40 
41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0);
43 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr),
44 	    SPDK_ENV_NUMA_ID_ANY);
45 
46 DEFINE_STUB(spdk_nvme_qpair_get_id, uint16_t, (struct spdk_nvme_qpair *qpair), 0);
47 
48 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
49 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
50 
51 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
52 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
53 
54 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
55 
56 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
57 		int error_code, const char *msg));
58 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *,
59 	    (struct spdk_jsonrpc_request *request), NULL);
60 DEFINE_STUB_V(spdk_jsonrpc_end_result,
61 	      (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w));
62 
63 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
64 		size_t opts_size));
65 
66 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
67 		size_t opts_size), 0);
68 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL);
69 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL);
70 
71 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
72 
73 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
74 					enum spdk_bdev_reset_stat_mode mode));
75 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
76 				      struct spdk_bdev_io_stat *add));
77 
78 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr));
79 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL);
80 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k));
81 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL);
82 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0);
83 
84 DEFINE_STUB(spdk_nvme_poll_group_get_fd_group, struct spdk_fd_group *,
85 	    (struct spdk_nvme_poll_group *group), NULL);
86 DEFINE_STUB(spdk_nvme_poll_group_wait, int, (struct spdk_nvme_poll_group *group,
87 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
88 DEFINE_STUB(spdk_nvme_ctrlr_get_admin_qp_fd, int, (struct spdk_nvme_ctrlr *ctrlr,
89 		struct spdk_event_handler_opts *opts), 0);
90 DEFINE_STUB(spdk_nvme_poll_group_set_interrupt_callback, int,
91 	    (struct spdk_nvme_poll_group *group,
92 	     spdk_nvme_poll_group_interrupt_cb cb_fn, void *ctx), 0);
93 int
94 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
95 				   struct spdk_memory_domain **domains, int array_size)
96 {
97 	int i, min_array_size;
98 
99 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
100 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
101 		for (i = 0; i < min_array_size; i++) {
102 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
103 		}
104 	}
105 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
106 
107 	return 0;
108 }
109 
110 struct spdk_io_channel *
111 spdk_accel_get_io_channel(void)
112 {
113 	return spdk_get_io_channel(g_accel_p);
114 }
115 
116 void
117 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
118 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
119 {
120 	/* Avoid warning that opts is used uninitialised */
121 	memset(opts, 0, opts_size);
122 }
123 
124 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"
125 
126 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN};
127 
128 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
129 	    (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts);
130 
131 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
132 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
133 
134 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
135 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
136 
137 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
138 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
139 
140 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
141 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
142 
143 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
144 
145 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
146 
147 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
148 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
149 
150 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
151 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
152 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
153 
154 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
155 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
156 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
157 
158 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, (
159 		    struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
160 		    struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf,
161 		    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
162 		    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
163 		    spdk_nvme_req_next_sge_cb next_sge_fn), 0);
164 
165 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
166 		size_t *size), 0);
167 
168 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
169 
170 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
171 
172 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
173 
174 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
175 
176 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns),
177 	    SPDK_NVME_16B_GUARD_PI);
178 
179 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
180 
181 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
182 
183 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
184 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
185 
186 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
187 
188 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
189 		char *name, size_t *size), 0);
190 
191 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
192 	    (struct spdk_nvme_ns *ns), 0);
193 
194 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
195 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
196 
197 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
198 	    (struct spdk_nvme_ns *ns), 0);
199 
200 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
201 	    (struct spdk_nvme_ns *ns), 0);
202 
203 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
204 	    (struct spdk_nvme_ns *ns), 0);
205 
206 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
207 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
208 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
209 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
210 
211 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
212 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
213 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
214 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
215 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
216 
217 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
218 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
219 	     void *payload, uint32_t payload_size, uint64_t slba,
220 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
221 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
222 
223 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
224 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
225 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
226 
227 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
228 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
229 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
230 
231 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
232 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
233 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
234 
235 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
236 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
237 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
238 
239 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
240 
241 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
242 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
243 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
244 
245 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
246 	    (const struct spdk_nvme_status *status), NULL);
247 
248 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
249 	    (const struct spdk_nvme_status *status), NULL);
250 
251 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
252 
253 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
254 
255 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
256 
257 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
258 
259 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
260 
261 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
262 		struct iovec *iov,
263 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
264 DEFINE_STUB(spdk_accel_append_crc32c, int,
265 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst,
266 	     struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx,
267 	     uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
268 DEFINE_STUB(spdk_accel_append_copy, int,
269 	    (struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
270 	     struct iovec *dst_iovs, uint32_t dst_iovcnt,
271 	     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
272 	     struct iovec *src_iovs, uint32_t src_iovcnt,
273 	     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
274 	     spdk_accel_step_cb cb_fn, void *cb_arg), 0);
275 DEFINE_STUB_V(spdk_accel_sequence_finish,
276 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
277 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
278 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
279 DEFINE_STUB(spdk_nvme_qpair_authenticate, int,
280 	    (struct spdk_nvme_qpair *qpair, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
281 DEFINE_STUB(spdk_nvme_ctrlr_authenticate, int,
282 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
283 DEFINE_STUB(spdk_nvme_ctrlr_set_keys, int,
284 	    (struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr_key_opts *opts), 0);
285 
286 struct ut_nvme_req {
287 	uint16_t			opc;
288 	spdk_nvme_cmd_cb		cb_fn;
289 	void				*cb_arg;
290 	struct spdk_nvme_cpl		cpl;
291 	TAILQ_ENTRY(ut_nvme_req)	tailq;
292 };
293 
294 struct spdk_nvme_ns {
295 	struct spdk_nvme_ctrlr		*ctrlr;
296 	uint32_t			id;
297 	bool				is_active;
298 	struct spdk_uuid		*uuid;
299 	enum spdk_nvme_ana_state	ana_state;
300 	enum spdk_nvme_csi		csi;
301 };
302 
303 struct spdk_nvme_qpair {
304 	struct spdk_nvme_ctrlr		*ctrlr;
305 	uint8_t				failure_reason;
306 	bool				is_connected;
307 	bool				in_completion_context;
308 	bool				delete_after_completion_context;
309 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
310 	uint32_t			num_outstanding_reqs;
311 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
312 	struct spdk_nvme_poll_group	*poll_group;
313 	void				*poll_group_tailq_head;
314 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
315 };
316 
317 struct spdk_nvme_ctrlr {
318 	uint32_t			num_ns;
319 	struct spdk_nvme_ns		*ns;
320 	struct spdk_nvme_ns_data	*nsdata;
321 	struct spdk_nvme_qpair		adminq;
322 	struct spdk_nvme_ctrlr_data	cdata;
323 	bool				attached;
324 	bool				is_failed;
325 	bool				fail_reset;
326 	bool				is_removed;
327 	struct spdk_nvme_transport_id	trid;
328 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
329 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
330 	struct spdk_nvme_ctrlr_opts	opts;
331 };
332 
333 struct spdk_nvme_poll_group {
334 	void				*ctx;
335 	struct spdk_nvme_accel_fn_table	accel_fn_table;
336 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
337 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
338 };
339 
340 struct spdk_nvme_probe_ctx {
341 	struct spdk_nvme_transport_id	trid;
342 	void				*cb_ctx;
343 	spdk_nvme_attach_cb		attach_cb;
344 	struct spdk_nvme_ctrlr		*init_ctrlr;
345 };
346 
347 uint32_t
348 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
349 {
350 	uint32_t nsid;
351 
352 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
353 		if (ctrlr->ns[nsid - 1].is_active) {
354 			return nsid;
355 		}
356 	}
357 
358 	return 0;
359 }
360 
361 uint32_t
362 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
363 {
364 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
365 		if (ctrlr->ns[nsid - 1].is_active) {
366 			return nsid;
367 		}
368 	}
369 
370 	return 0;
371 }
372 
373 uint32_t
374 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
375 {
376 	return qpair->num_outstanding_reqs;
377 }
378 
379 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
380 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
381 			g_ut_attached_ctrlrs);
382 static int g_ut_attach_ctrlr_status;
383 static size_t g_ut_attach_bdev_count;
384 static int g_ut_register_bdev_status;
385 static struct spdk_bdev *g_ut_registered_bdev;
386 static uint16_t g_ut_cntlid;
387 static struct nvme_path_id g_any_path = {};
388 
389 static void
390 ut_init_trid(struct spdk_nvme_transport_id *trid)
391 {
392 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
393 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
394 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
395 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
396 }
397 
398 static void
399 ut_init_trid2(struct spdk_nvme_transport_id *trid)
400 {
401 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
402 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
403 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
404 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
405 }
406 
407 static void
408 ut_init_trid3(struct spdk_nvme_transport_id *trid)
409 {
410 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
411 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
412 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
413 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
414 }
415 
416 static int
417 cmp_int(int a, int b)
418 {
419 	return a - b;
420 }
421 
422 int
423 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
424 			       const struct spdk_nvme_transport_id *trid2)
425 {
426 	int cmp;
427 
428 	/* We assume trtype is TCP for now. */
429 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
430 
431 	cmp = cmp_int(trid1->trtype, trid2->trtype);
432 	if (cmp) {
433 		return cmp;
434 	}
435 
436 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
437 	if (cmp) {
438 		return cmp;
439 	}
440 
441 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
442 	if (cmp) {
443 		return cmp;
444 	}
445 
446 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
447 	if (cmp) {
448 		return cmp;
449 	}
450 
451 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
452 	if (cmp) {
453 		return cmp;
454 	}
455 
456 	return 0;
457 }
458 
459 static struct spdk_nvme_ctrlr *
460 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
461 		bool ana_reporting, bool multipath)
462 {
463 	struct spdk_nvme_ctrlr *ctrlr;
464 	uint32_t i;
465 
466 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
467 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
468 			/* There is a ctrlr whose trid matches. */
469 			return NULL;
470 		}
471 	}
472 
473 	ctrlr = calloc(1, sizeof(*ctrlr));
474 	if (ctrlr == NULL) {
475 		return NULL;
476 	}
477 
478 	ctrlr->attached = true;
479 	ctrlr->adminq.ctrlr = ctrlr;
480 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
481 	ctrlr->adminq.is_connected = true;
482 
483 	if (num_ns != 0) {
484 		ctrlr->num_ns = num_ns;
485 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
486 		if (ctrlr->ns == NULL) {
487 			free(ctrlr);
488 			return NULL;
489 		}
490 
491 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
492 		if (ctrlr->nsdata == NULL) {
493 			free(ctrlr->ns);
494 			free(ctrlr);
495 			return NULL;
496 		}
497 
498 		for (i = 0; i < num_ns; i++) {
499 			ctrlr->ns[i].id = i + 1;
500 			ctrlr->ns[i].ctrlr = ctrlr;
501 			ctrlr->ns[i].is_active = true;
502 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
503 			ctrlr->nsdata[i].nsze = 1024;
504 			ctrlr->nsdata[i].nmic.can_share = multipath;
505 		}
506 
507 		ctrlr->cdata.nn = num_ns;
508 		ctrlr->cdata.mnan = num_ns;
509 		ctrlr->cdata.nanagrpid = num_ns;
510 	}
511 
512 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
513 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
514 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
515 	ctrlr->trid = *trid;
516 	TAILQ_INIT(&ctrlr->active_io_qpairs);
517 
518 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
519 
520 	return ctrlr;
521 }
522 
523 static void
524 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
525 {
526 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
527 
528 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
529 	free(ctrlr->nsdata);
530 	free(ctrlr->ns);
531 	free(ctrlr);
532 }
533 
534 static int
535 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
536 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
537 {
538 	struct ut_nvme_req *req;
539 
540 	req = calloc(1, sizeof(*req));
541 	if (req == NULL) {
542 		return -ENOMEM;
543 	}
544 
545 	req->opc = opc;
546 	req->cb_fn = cb_fn;
547 	req->cb_arg = cb_arg;
548 
549 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
550 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
551 
552 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
553 	qpair->num_outstanding_reqs++;
554 
555 	return 0;
556 }
557 
558 static struct ut_nvme_req *
559 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
560 {
561 	struct ut_nvme_req *req;
562 
563 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
564 		if (req->cb_arg == cb_arg) {
565 			break;
566 		}
567 	}
568 
569 	return req;
570 }
571 
572 static struct spdk_bdev_io *
573 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
574 		 struct spdk_io_channel *ch)
575 {
576 	struct spdk_bdev_io *bdev_io;
577 
578 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
579 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
580 	bdev_io->type = type;
581 	bdev_io->bdev = &nbdev->disk;
582 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
583 
584 	return bdev_io;
585 }
586 
587 static void
588 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
589 {
590 	bdev_io->u.bdev.iovs = &bdev_io->iov;
591 	bdev_io->u.bdev.iovcnt = 1;
592 
593 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
594 	bdev_io->iov.iov_len = 4096;
595 }
596 
597 static void
598 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
599 {
600 	if (ctrlr->is_failed) {
601 		free(ctrlr);
602 		return;
603 	}
604 
605 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
606 	if (probe_ctx->cb_ctx) {
607 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
608 	}
609 
610 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
611 
612 	if (probe_ctx->attach_cb) {
613 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
614 	}
615 }
616 
617 int
618 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
619 {
620 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
621 
622 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
623 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
624 			continue;
625 		}
626 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
627 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
628 	}
629 
630 	free(probe_ctx);
631 
632 	return 0;
633 }
634 
635 struct spdk_nvme_probe_ctx *
636 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
637 			const struct spdk_nvme_ctrlr_opts *opts,
638 			spdk_nvme_attach_cb attach_cb)
639 {
640 	struct spdk_nvme_probe_ctx *probe_ctx;
641 
642 	if (trid == NULL) {
643 		return NULL;
644 	}
645 
646 	probe_ctx = calloc(1, sizeof(*probe_ctx));
647 	if (probe_ctx == NULL) {
648 		return NULL;
649 	}
650 
651 	probe_ctx->trid = *trid;
652 	probe_ctx->cb_ctx = (void *)opts;
653 	probe_ctx->attach_cb = attach_cb;
654 
655 	return probe_ctx;
656 }
657 
658 int
659 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
660 {
661 	if (ctrlr->attached) {
662 		ut_detach_ctrlr(ctrlr);
663 	}
664 
665 	return 0;
666 }
667 
668 int
669 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
670 {
671 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
672 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
673 
674 	return 0;
675 }
676 
677 int
678 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
679 {
680 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
681 }
682 
683 void
684 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
685 {
686 	memset(opts, 0, opts_size);
687 
688 	snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN);
689 }
690 
691 const struct spdk_nvme_ctrlr_data *
692 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
693 {
694 	return &ctrlr->cdata;
695 }
696 
697 uint16_t
698 spdk_nvme_ctrlr_get_id(struct spdk_nvme_ctrlr *ctrlr)
699 {
700 	return ctrlr->cdata.cntlid;
701 }
702 
703 uint32_t
704 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
705 {
706 	return ctrlr->num_ns;
707 }
708 
709 struct spdk_nvme_ns *
710 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
711 {
712 	if (nsid < 1 || nsid > ctrlr->num_ns) {
713 		return NULL;
714 	}
715 
716 	return &ctrlr->ns[nsid - 1];
717 }
718 
719 bool
720 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
721 {
722 	if (nsid < 1 || nsid > ctrlr->num_ns) {
723 		return false;
724 	}
725 
726 	return ctrlr->ns[nsid - 1].is_active;
727 }
728 
729 union spdk_nvme_csts_register
730 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
731 {
732 	union spdk_nvme_csts_register csts;
733 
734 	csts.raw = 0;
735 
736 	return csts;
737 }
738 
739 union spdk_nvme_vs_register
740 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
741 {
742 	union spdk_nvme_vs_register vs;
743 
744 	vs.raw = 0;
745 
746 	return vs;
747 }
748 
749 struct spdk_nvme_qpair *
750 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
751 			       const struct spdk_nvme_io_qpair_opts *user_opts,
752 			       size_t opts_size)
753 {
754 	struct spdk_nvme_qpair *qpair;
755 
756 	qpair = calloc(1, sizeof(*qpair));
757 	if (qpair == NULL) {
758 		return NULL;
759 	}
760 
761 	qpair->ctrlr = ctrlr;
762 	TAILQ_INIT(&qpair->outstanding_reqs);
763 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
764 
765 	return qpair;
766 }
767 
768 static void
769 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
770 {
771 	struct spdk_nvme_poll_group *group = qpair->poll_group;
772 
773 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
774 
775 	qpair->poll_group_tailq_head = &group->connected_qpairs;
776 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
777 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
778 }
779 
780 static void
781 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
782 {
783 	struct spdk_nvme_poll_group *group = qpair->poll_group;
784 
785 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
786 
787 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
788 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
789 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
790 }
791 
792 int
793 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
794 				 struct spdk_nvme_qpair *qpair)
795 {
796 	if (qpair->is_connected) {
797 		return -EISCONN;
798 	}
799 
800 	qpair->is_connected = true;
801 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
802 
803 	if (qpair->poll_group) {
804 		nvme_poll_group_connect_qpair(qpair);
805 	}
806 
807 	return 0;
808 }
809 
810 void
811 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
812 {
813 	if (!qpair->is_connected) {
814 		return;
815 	}
816 
817 	qpair->is_connected = false;
818 
819 	if (qpair->poll_group != NULL) {
820 		nvme_poll_group_disconnect_qpair(qpair);
821 	}
822 }
823 
824 int
825 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
826 {
827 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
828 
829 	if (qpair->in_completion_context) {
830 		qpair->delete_after_completion_context = true;
831 		return 0;
832 	}
833 
834 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
835 
836 	if (qpair->poll_group != NULL) {
837 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
838 	}
839 
840 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
841 
842 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
843 
844 	free(qpair);
845 
846 	return 0;
847 }
848 
849 int
850 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
851 {
852 	if (ctrlr->fail_reset) {
853 		ctrlr->is_failed = true;
854 		return -EIO;
855 	}
856 
857 	ctrlr->adminq.is_connected = true;
858 	return 0;
859 }
860 
861 void
862 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
863 {
864 }
865 
866 int
867 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
868 {
869 	if (ctrlr->is_removed) {
870 		return -ENXIO;
871 	}
872 
873 	ctrlr->adminq.is_connected = false;
874 	ctrlr->is_failed = false;
875 
876 	return 0;
877 }
878 
879 void
880 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
881 {
882 	ctrlr->is_failed = true;
883 }
884 
885 bool
886 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
887 {
888 	return ctrlr->is_failed;
889 }
890 
891 spdk_nvme_qp_failure_reason
892 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
893 {
894 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
895 }
896 
897 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
898 				 sizeof(uint32_t))
899 static void
900 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
901 {
902 	struct spdk_nvme_ana_page ana_hdr;
903 	char _ana_desc[UT_ANA_DESC_SIZE];
904 	struct spdk_nvme_ana_group_descriptor *ana_desc;
905 	struct spdk_nvme_ns *ns;
906 	uint32_t i;
907 
908 	memset(&ana_hdr, 0, sizeof(ana_hdr));
909 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
910 
911 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
912 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
913 
914 	buf += sizeof(ana_hdr);
915 	length -= sizeof(ana_hdr);
916 
917 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
918 
919 	for (i = 0; i < ctrlr->num_ns; i++) {
920 		ns = &ctrlr->ns[i];
921 
922 		if (!ns->is_active) {
923 			continue;
924 		}
925 
926 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
927 
928 		ana_desc->ana_group_id = ns->id;
929 		ana_desc->num_of_nsid = 1;
930 		ana_desc->ana_state = ns->ana_state;
931 		ana_desc->nsid[0] = ns->id;
932 
933 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
934 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
935 
936 		buf += UT_ANA_DESC_SIZE;
937 		length -= UT_ANA_DESC_SIZE;
938 	}
939 }
940 
941 int
942 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
943 				 uint8_t log_page, uint32_t nsid,
944 				 void *payload, uint32_t payload_size,
945 				 uint64_t offset,
946 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
947 {
948 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
949 		SPDK_CU_ASSERT_FATAL(offset == 0);
950 		ut_create_ana_log_page(ctrlr, payload, payload_size);
951 	}
952 
953 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
954 				      cb_fn, cb_arg);
955 }
956 
957 int
958 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
959 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
960 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
961 {
962 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
963 }
964 
965 int
966 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
967 			      void *cmd_cb_arg,
968 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
969 {
970 	struct ut_nvme_req *req = NULL, *abort_req;
971 
972 	if (qpair == NULL) {
973 		qpair = &ctrlr->adminq;
974 	}
975 
976 	abort_req = calloc(1, sizeof(*abort_req));
977 	if (abort_req == NULL) {
978 		return -ENOMEM;
979 	}
980 
981 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
982 		if (req->cb_arg == cmd_cb_arg) {
983 			break;
984 		}
985 	}
986 
987 	if (req == NULL) {
988 		free(abort_req);
989 		return -ENOENT;
990 	}
991 
992 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
993 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
994 
995 	abort_req->opc = SPDK_NVME_OPC_ABORT;
996 	abort_req->cb_fn = cb_fn;
997 	abort_req->cb_arg = cb_arg;
998 
999 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1000 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
1001 	abort_req->cpl.cdw0 = 0;
1002 
1003 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
1004 	ctrlr->adminq.num_outstanding_reqs++;
1005 
1006 	return 0;
1007 }
1008 
1009 int32_t
1010 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
1011 {
1012 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
1013 }
1014 
1015 uint32_t
1016 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
1017 {
1018 	return ns->id;
1019 }
1020 
1021 struct spdk_nvme_ctrlr *
1022 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
1023 {
1024 	return ns->ctrlr;
1025 }
1026 
1027 static inline struct spdk_nvme_ns_data *
1028 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
1029 {
1030 	return &ns->ctrlr->nsdata[ns->id - 1];
1031 }
1032 
1033 const struct spdk_nvme_ns_data *
1034 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
1035 {
1036 	return _nvme_ns_get_data(ns);
1037 }
1038 
1039 uint64_t
1040 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
1041 {
1042 	return _nvme_ns_get_data(ns)->nsze;
1043 }
1044 
1045 const struct spdk_uuid *
1046 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
1047 {
1048 	return ns->uuid;
1049 }
1050 
1051 enum spdk_nvme_csi
1052 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
1053 	return ns->csi;
1054 }
1055 
1056 int
1057 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1058 			      void *metadata, uint64_t lba, uint32_t lba_count,
1059 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1060 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1061 {
1062 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1063 }
1064 
1065 int
1066 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1067 			       void *buffer, void *metadata, uint64_t lba,
1068 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1069 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1070 {
1071 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1072 }
1073 
1074 int
1075 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1076 			       uint64_t lba, uint32_t lba_count,
1077 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1078 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1079 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1080 			       uint16_t apptag_mask, uint16_t apptag)
1081 {
1082 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1083 }
1084 
1085 int
1086 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1087 				uint64_t lba, uint32_t lba_count,
1088 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1089 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1090 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1091 				uint16_t apptag_mask, uint16_t apptag)
1092 {
1093 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1094 }
1095 
1096 static bool g_ut_readv_ext_called;
1097 int
1098 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1099 			   uint64_t lba, uint32_t lba_count,
1100 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1101 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1102 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1103 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1104 {
1105 	g_ut_readv_ext_called = true;
1106 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1107 }
1108 
1109 static bool g_ut_read_ext_called;
1110 int
1111 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1112 			  uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1113 			  struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1114 {
1115 	g_ut_read_ext_called = true;
1116 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1117 }
1118 
1119 static bool g_ut_writev_ext_called;
1120 int
1121 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1122 			    uint64_t lba, uint32_t lba_count,
1123 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1124 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1125 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1126 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1127 {
1128 	g_ut_writev_ext_called = true;
1129 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1130 }
1131 
1132 static bool g_ut_write_ext_called;
1133 int
1134 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1135 			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1136 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1137 {
1138 	g_ut_write_ext_called = true;
1139 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1140 }
1141 
1142 int
1143 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1144 				  uint64_t lba, uint32_t lba_count,
1145 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1146 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1147 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1148 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1149 {
1150 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1151 }
1152 
1153 int
1154 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1155 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1156 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1157 {
1158 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1159 }
1160 
1161 int
1162 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1163 			      uint64_t lba, uint32_t lba_count,
1164 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1165 			      uint32_t io_flags)
1166 {
1167 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1168 }
1169 
1170 int
1171 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1172 		      const struct spdk_nvme_scc_source_range *ranges,
1173 		      uint16_t num_ranges, uint64_t dest_lba,
1174 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1175 {
1176 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1177 }
1178 
1179 struct spdk_nvme_poll_group *
1180 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1181 {
1182 	struct spdk_nvme_poll_group *group;
1183 
1184 	group = calloc(1, sizeof(*group));
1185 	if (group == NULL) {
1186 		return NULL;
1187 	}
1188 
1189 	group->ctx = ctx;
1190 	if (table != NULL) {
1191 		group->accel_fn_table = *table;
1192 	}
1193 	TAILQ_INIT(&group->connected_qpairs);
1194 	TAILQ_INIT(&group->disconnected_qpairs);
1195 
1196 	return group;
1197 }
1198 
1199 int
1200 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1201 {
1202 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1203 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1204 		return -EBUSY;
1205 	}
1206 
1207 	free(group);
1208 
1209 	return 0;
1210 }
1211 
1212 spdk_nvme_qp_failure_reason
1213 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1214 {
1215 	return qpair->failure_reason;
1216 }
1217 
1218 bool
1219 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair)
1220 {
1221 	return qpair->is_connected;
1222 }
1223 
1224 int32_t
1225 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1226 				    uint32_t max_completions)
1227 {
1228 	struct ut_nvme_req *req, *tmp;
1229 	uint32_t num_completions = 0;
1230 
1231 	if (!qpair->is_connected) {
1232 		return -ENXIO;
1233 	}
1234 
1235 	qpair->in_completion_context = true;
1236 
1237 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1238 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1239 		qpair->num_outstanding_reqs--;
1240 
1241 		req->cb_fn(req->cb_arg, &req->cpl);
1242 
1243 		free(req);
1244 		num_completions++;
1245 	}
1246 
1247 	qpair->in_completion_context = false;
1248 	if (qpair->delete_after_completion_context) {
1249 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1250 	}
1251 
1252 	return num_completions;
1253 }
1254 
1255 int64_t
1256 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1257 		uint32_t completions_per_qpair,
1258 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1259 {
1260 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1261 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1262 
1263 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1264 
1265 	if (disconnected_qpair_cb == NULL) {
1266 		return -EINVAL;
1267 	}
1268 
1269 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1270 		disconnected_qpair_cb(qpair, group->ctx);
1271 	}
1272 
1273 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1274 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1275 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1276 			/* Bump the number of completions so this counts as "busy" */
1277 			num_completions++;
1278 			continue;
1279 		}
1280 
1281 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1282 				    completions_per_qpair);
1283 		if (local_completions < 0 && error_reason == 0) {
1284 			error_reason = local_completions;
1285 		} else {
1286 			num_completions += local_completions;
1287 			assert(num_completions >= 0);
1288 		}
1289 	}
1290 
1291 	return error_reason ? error_reason : num_completions;
1292 }
1293 
1294 int
1295 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1296 			 struct spdk_nvme_qpair *qpair)
1297 {
1298 	CU_ASSERT(!qpair->is_connected);
1299 
1300 	qpair->poll_group = group;
1301 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1302 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1303 
1304 	return 0;
1305 }
1306 
1307 int
1308 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1309 			    struct spdk_nvme_qpair *qpair)
1310 {
1311 	CU_ASSERT(!qpair->is_connected);
1312 
1313 	if (qpair->poll_group == NULL) {
1314 		return -ENOENT;
1315 	}
1316 
1317 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1318 
1319 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1320 
1321 	qpair->poll_group = NULL;
1322 	qpair->poll_group_tailq_head = NULL;
1323 
1324 	return 0;
1325 }
1326 
1327 int
1328 spdk_bdev_register(struct spdk_bdev *bdev)
1329 {
1330 	g_ut_registered_bdev = bdev;
1331 
1332 	return g_ut_register_bdev_status;
1333 }
1334 
1335 void
1336 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1337 {
1338 	int rc;
1339 
1340 	rc = bdev->fn_table->destruct(bdev->ctxt);
1341 
1342 	if (bdev == g_ut_registered_bdev) {
1343 		g_ut_registered_bdev = NULL;
1344 	}
1345 
1346 	if (rc <= 0 && cb_fn != NULL) {
1347 		cb_fn(cb_arg, rc);
1348 	}
1349 }
1350 
1351 int
1352 spdk_bdev_open_ext(const char *bdev_name, bool write,
1353 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1354 		   struct spdk_bdev_desc **desc)
1355 {
1356 	if (g_ut_registered_bdev == NULL ||
1357 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1358 		return -ENODEV;
1359 	}
1360 
1361 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1362 
1363 	return 0;
1364 }
1365 
1366 struct spdk_bdev *
1367 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1368 {
1369 	return (struct spdk_bdev *)desc;
1370 }
1371 
1372 int
1373 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1374 {
1375 	bdev->blockcnt = size;
1376 
1377 	return 0;
1378 }
1379 
1380 struct spdk_io_channel *
1381 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1382 {
1383 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1384 }
1385 
1386 struct spdk_thread *
1387 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
1388 {
1389 	return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io));
1390 }
1391 
1392 void
1393 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1394 {
1395 	bdev_io->internal.status = status;
1396 	bdev_io->internal.f.in_submit_request = false;
1397 }
1398 
1399 void
1400 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1401 {
1402 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1403 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1404 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1405 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1406 	} else {
1407 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1408 	}
1409 
1410 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1411 	bdev_io->internal.error.nvme.sct = sct;
1412 	bdev_io->internal.error.nvme.sc = sc;
1413 
1414 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1415 }
1416 
1417 void
1418 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1419 {
1420 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1421 
1422 	ut_bdev_io_set_buf(bdev_io);
1423 
1424 	cb(ch, bdev_io, true);
1425 }
1426 
1427 static void
1428 test_create_ctrlr(void)
1429 {
1430 	struct spdk_nvme_transport_id trid = {};
1431 	struct spdk_nvme_ctrlr ctrlr = {};
1432 	int rc;
1433 
1434 	ut_init_trid(&trid);
1435 
1436 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1437 	CU_ASSERT(rc == 0);
1438 
1439 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1440 
1441 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1442 	CU_ASSERT(rc == 0);
1443 
1444 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1445 
1446 	poll_threads();
1447 	spdk_delay_us(1000);
1448 	poll_threads();
1449 
1450 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1451 }
1452 
1453 static void
1454 ut_check_hotplug_on_reset(void *cb_arg, int rc)
1455 {
1456 	bool *detect_remove = cb_arg;
1457 
1458 	CU_ASSERT(rc != 0);
1459 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1460 
1461 	*detect_remove = true;
1462 }
1463 
1464 static void
1465 test_reset_ctrlr(void)
1466 {
1467 	struct spdk_nvme_transport_id trid = {};
1468 	struct spdk_nvme_ctrlr ctrlr = {};
1469 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1470 	struct nvme_path_id *curr_trid;
1471 	struct spdk_io_channel *ch1, *ch2;
1472 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1473 	bool detect_remove;
1474 	int rc;
1475 
1476 	ut_init_trid(&trid);
1477 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1478 
1479 	set_thread(0);
1480 
1481 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1482 	CU_ASSERT(rc == 0);
1483 
1484 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1485 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1486 
1487 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1488 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1489 
1490 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1491 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1492 
1493 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1494 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1495 
1496 	set_thread(1);
1497 
1498 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1499 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1500 
1501 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1502 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1503 
1504 	/* Reset starts from thread 1. */
1505 	set_thread(1);
1506 
1507 	/* Case 1: ctrlr is already being destructed. */
1508 	nvme_ctrlr->destruct = true;
1509 
1510 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1511 	CU_ASSERT(rc == -ENXIO);
1512 
1513 	/* Case 2: reset is in progress. */
1514 	nvme_ctrlr->destruct = false;
1515 	nvme_ctrlr->resetting = true;
1516 
1517 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1518 	CU_ASSERT(rc == -EBUSY);
1519 
1520 	/* Case 3: reset completes successfully. */
1521 	nvme_ctrlr->resetting = false;
1522 	curr_trid->last_failed_tsc = spdk_get_ticks();
1523 	ctrlr.is_failed = true;
1524 
1525 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1526 	CU_ASSERT(rc == 0);
1527 	CU_ASSERT(nvme_ctrlr->resetting == true);
1528 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1529 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1530 
1531 	poll_thread_times(0, 3);
1532 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1533 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1534 
1535 	poll_thread_times(0, 1);
1536 	poll_thread_times(1, 1);
1537 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1538 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1539 	CU_ASSERT(ctrlr.is_failed == true);
1540 
1541 	poll_thread_times(1, 1);
1542 	poll_thread_times(0, 1);
1543 	CU_ASSERT(ctrlr.is_failed == false);
1544 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1545 
1546 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1547 	poll_thread_times(0, 2);
1548 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1549 
1550 	poll_thread_times(0, 1);
1551 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1552 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1553 
1554 	poll_thread_times(1, 1);
1555 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1556 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1557 	CU_ASSERT(nvme_ctrlr->resetting == true);
1558 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1559 
1560 	poll_thread_times(0, 1);
1561 	CU_ASSERT(nvme_ctrlr->resetting == false);
1562 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1563 
1564 	/* Case 4: ctrlr is already removed. */
1565 	ctrlr.is_removed = true;
1566 
1567 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1568 	CU_ASSERT(rc == 0);
1569 
1570 	detect_remove = false;
1571 	nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset;
1572 	nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove;
1573 
1574 	poll_threads();
1575 
1576 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
1577 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
1578 	CU_ASSERT(detect_remove == true);
1579 
1580 	ctrlr.is_removed = false;
1581 
1582 	spdk_put_io_channel(ch2);
1583 
1584 	set_thread(0);
1585 
1586 	spdk_put_io_channel(ch1);
1587 
1588 	poll_threads();
1589 
1590 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1591 	CU_ASSERT(rc == 0);
1592 
1593 	poll_threads();
1594 	spdk_delay_us(1000);
1595 	poll_threads();
1596 
1597 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1598 }
1599 
1600 static void
1601 test_race_between_reset_and_destruct_ctrlr(void)
1602 {
1603 	struct spdk_nvme_transport_id trid = {};
1604 	struct spdk_nvme_ctrlr ctrlr = {};
1605 	struct nvme_ctrlr *nvme_ctrlr;
1606 	struct spdk_io_channel *ch1, *ch2;
1607 	int rc;
1608 
1609 	ut_init_trid(&trid);
1610 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1611 
1612 	set_thread(0);
1613 
1614 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1615 	CU_ASSERT(rc == 0);
1616 
1617 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1618 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1619 
1620 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1621 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1622 
1623 	set_thread(1);
1624 
1625 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1626 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1627 
1628 	/* Reset starts from thread 1. */
1629 	set_thread(1);
1630 
1631 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1632 	CU_ASSERT(rc == 0);
1633 	CU_ASSERT(nvme_ctrlr->resetting == true);
1634 
1635 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1636 	set_thread(0);
1637 
1638 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1639 	CU_ASSERT(rc == 0);
1640 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1641 	CU_ASSERT(nvme_ctrlr->destruct == true);
1642 	CU_ASSERT(nvme_ctrlr->resetting == true);
1643 
1644 	poll_threads();
1645 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1646 	poll_threads();
1647 
1648 	/* Reset completed but ctrlr is not still destructed yet. */
1649 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1650 	CU_ASSERT(nvme_ctrlr->destruct == true);
1651 	CU_ASSERT(nvme_ctrlr->resetting == false);
1652 
1653 	/* New reset request is rejected. */
1654 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1655 	CU_ASSERT(rc == -ENXIO);
1656 
1657 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1658 	 * However there are two channels and destruct is not completed yet.
1659 	 */
1660 	poll_threads();
1661 
1662 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1663 
1664 	set_thread(0);
1665 
1666 	spdk_put_io_channel(ch1);
1667 
1668 	set_thread(1);
1669 
1670 	spdk_put_io_channel(ch2);
1671 
1672 	poll_threads();
1673 	spdk_delay_us(1000);
1674 	poll_threads();
1675 
1676 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1677 }
1678 
1679 static void
1680 test_failover_ctrlr(void)
1681 {
1682 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1683 	struct spdk_nvme_ctrlr ctrlr = {};
1684 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1685 	struct nvme_path_id *curr_trid, *next_trid;
1686 	struct spdk_io_channel *ch1, *ch2;
1687 	int rc;
1688 
1689 	ut_init_trid(&trid1);
1690 	ut_init_trid2(&trid2);
1691 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1692 
1693 	set_thread(0);
1694 
1695 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1696 	CU_ASSERT(rc == 0);
1697 
1698 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1699 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1700 
1701 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1702 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1703 
1704 	set_thread(1);
1705 
1706 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1707 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1708 
1709 	/* First, test one trid case. */
1710 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1711 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1712 
1713 	/* Failover starts from thread 1. */
1714 	set_thread(1);
1715 
1716 	/* Case 1: ctrlr is already being destructed. */
1717 	nvme_ctrlr->destruct = true;
1718 
1719 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1720 	CU_ASSERT(rc == -ENXIO);
1721 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1722 
1723 	/* Case 2: reset is in progress. */
1724 	nvme_ctrlr->destruct = false;
1725 	nvme_ctrlr->resetting = true;
1726 
1727 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1728 	CU_ASSERT(rc == -EINPROGRESS);
1729 
1730 	/* Case 3: reset completes successfully. */
1731 	nvme_ctrlr->resetting = false;
1732 
1733 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1734 	CU_ASSERT(rc == 0);
1735 
1736 	CU_ASSERT(nvme_ctrlr->resetting == true);
1737 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1738 
1739 	poll_threads();
1740 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1741 	poll_threads();
1742 
1743 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1744 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1745 
1746 	CU_ASSERT(nvme_ctrlr->resetting == false);
1747 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1748 
1749 	set_thread(0);
1750 
1751 	/* Second, test two trids case. */
1752 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1753 	CU_ASSERT(rc == 0);
1754 
1755 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1756 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1757 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1758 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1759 
1760 	/* Failover starts from thread 1. */
1761 	set_thread(1);
1762 
1763 	/* Case 4: reset is in progress. */
1764 	nvme_ctrlr->resetting = true;
1765 
1766 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1767 	CU_ASSERT(rc == -EINPROGRESS);
1768 
1769 	/* Case 5: failover completes successfully. */
1770 	nvme_ctrlr->resetting = false;
1771 
1772 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1773 	CU_ASSERT(rc == 0);
1774 
1775 	CU_ASSERT(nvme_ctrlr->resetting == true);
1776 
1777 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1778 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1779 	CU_ASSERT(next_trid != curr_trid);
1780 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1781 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1782 
1783 	poll_threads();
1784 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1785 	poll_threads();
1786 
1787 	CU_ASSERT(nvme_ctrlr->resetting == false);
1788 
1789 	spdk_put_io_channel(ch2);
1790 
1791 	set_thread(0);
1792 
1793 	spdk_put_io_channel(ch1);
1794 
1795 	poll_threads();
1796 
1797 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1798 	CU_ASSERT(rc == 0);
1799 
1800 	poll_threads();
1801 	spdk_delay_us(1000);
1802 	poll_threads();
1803 
1804 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1805 }
1806 
1807 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1808  *
1809  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1810  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1811  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1812  * have been active, i.e., the head of the list until the failover completed.
1813  * However trid3 was inserted to the head of the list by mistake.
1814  *
1815  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1816  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1817  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1818  * may be executed repeatedly before failover is executed. Hence this bug is real.
1819  *
1820  * The following test verifies the fix.
1821  */
1822 static void
1823 test_race_between_failover_and_add_secondary_trid(void)
1824 {
1825 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1826 	struct spdk_nvme_ctrlr ctrlr = {};
1827 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1828 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1829 	struct spdk_io_channel *ch1, *ch2;
1830 	int rc;
1831 
1832 	ut_init_trid(&trid1);
1833 	ut_init_trid2(&trid2);
1834 	ut_init_trid3(&trid3);
1835 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1836 
1837 	set_thread(0);
1838 
1839 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1840 	CU_ASSERT(rc == 0);
1841 
1842 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1843 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1844 
1845 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1846 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1847 
1848 	set_thread(1);
1849 
1850 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1851 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1852 
1853 	set_thread(0);
1854 
1855 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1856 	CU_ASSERT(rc == 0);
1857 
1858 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1859 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1860 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1861 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1862 	path_id2 = TAILQ_NEXT(path_id1, link);
1863 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1864 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1865 
1866 	ctrlr.fail_reset = true;
1867 
1868 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1869 	CU_ASSERT(rc == 0);
1870 
1871 	poll_threads();
1872 
1873 	CU_ASSERT(path_id1->last_failed_tsc != 0);
1874 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1875 
1876 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1877 	CU_ASSERT(rc == 0);
1878 
1879 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1880 	CU_ASSERT(rc == 0);
1881 
1882 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1883 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1884 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1885 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1886 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1887 	path_id3 = TAILQ_NEXT(path_id2, link);
1888 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1889 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1890 
1891 	poll_threads();
1892 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1893 	poll_threads();
1894 
1895 	spdk_put_io_channel(ch1);
1896 
1897 	set_thread(1);
1898 
1899 	spdk_put_io_channel(ch2);
1900 
1901 	poll_threads();
1902 
1903 	set_thread(0);
1904 
1905 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1906 	CU_ASSERT(rc == 0);
1907 
1908 	poll_threads();
1909 	spdk_delay_us(1000);
1910 	poll_threads();
1911 
1912 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1913 }
1914 
1915 static void
1916 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1917 {
1918 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1919 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1920 }
1921 
1922 static void
1923 test_pending_reset(void)
1924 {
1925 	struct spdk_nvme_transport_id trid = {};
1926 	struct spdk_nvme_ctrlr *ctrlr;
1927 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
1928 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1929 	const int STRING_SIZE = 32;
1930 	const char *attached_names[STRING_SIZE];
1931 	struct nvme_bdev *nbdev;
1932 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1933 	struct spdk_io_channel *ch1, *ch2;
1934 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1935 	struct nvme_io_path *io_path1, *io_path2;
1936 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1937 	int rc;
1938 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
1939 
1940 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
1941 	bdev_opts.multipath = false;
1942 
1943 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1944 	ut_init_trid(&trid);
1945 
1946 	set_thread(0);
1947 
1948 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1949 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1950 
1951 	g_ut_attach_ctrlr_status = 0;
1952 	g_ut_attach_bdev_count = 1;
1953 
1954 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1955 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
1956 	CU_ASSERT(rc == 0);
1957 
1958 	spdk_delay_us(1000);
1959 	poll_threads();
1960 
1961 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1962 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1963 
1964 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1965 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1966 
1967 	ch1 = spdk_get_io_channel(nbdev);
1968 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1969 
1970 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1971 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1972 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1973 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1974 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1975 
1976 	set_thread(1);
1977 
1978 	ch2 = spdk_get_io_channel(nbdev);
1979 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1980 
1981 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1982 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1983 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1984 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1985 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1986 
1987 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, nbdev, ch2);
1988 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1989 
1990 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, nbdev, ch1);
1991 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1992 
1993 	/* The first reset request is submitted on thread 1, and the second reset request
1994 	 * is submitted on thread 0 while processing the first request.
1995 	 */
1996 	bdev_nvme_submit_request(ch2, first_bdev_io);
1997 
1998 	poll_thread_times(0, 1);
1999 	poll_thread_times(1, 2);
2000 
2001 	CU_ASSERT(nvme_ctrlr->resetting == true);
2002 	CU_ASSERT(TAILQ_EMPTY(&nvme_ctrlr->pending_resets));
2003 
2004 	set_thread(0);
2005 
2006 	bdev_nvme_submit_request(ch1, second_bdev_io);
2007 
2008 	poll_thread_times(0, 1);
2009 	poll_thread_times(1, 1);
2010 	poll_thread_times(0, 2);
2011 	poll_thread_times(1, 1);
2012 	poll_thread_times(0, 1);
2013 
2014 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&nvme_ctrlr->pending_resets)) == second_bdev_io);
2015 
2016 	poll_threads();
2017 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2018 	poll_threads();
2019 
2020 	CU_ASSERT(nvme_ctrlr->resetting == false);
2021 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2022 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2023 
2024 	/* The first reset request is submitted on thread 1, and the second reset request
2025 	 * is submitted on thread 0 while processing the first request.
2026 	 *
2027 	 * The difference from the above scenario is that the controller is removed while
2028 	 * processing the first request. Hence both reset requests should fail.
2029 	 */
2030 	set_thread(1);
2031 
2032 	bdev_nvme_submit_request(ch2, first_bdev_io);
2033 
2034 	poll_thread_times(0, 1);
2035 	poll_thread_times(1, 2);
2036 
2037 	CU_ASSERT(nvme_ctrlr->resetting == true);
2038 	CU_ASSERT(TAILQ_EMPTY(&nvme_ctrlr->pending_resets));
2039 
2040 	set_thread(0);
2041 
2042 	bdev_nvme_submit_request(ch1, second_bdev_io);
2043 
2044 	poll_thread_times(0, 1);
2045 	poll_thread_times(1, 1);
2046 	poll_thread_times(0, 2);
2047 	poll_thread_times(1, 1);
2048 	poll_thread_times(0, 1);
2049 
2050 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&nvme_ctrlr->pending_resets)) == second_bdev_io);
2051 
2052 	ctrlr->fail_reset = true;
2053 
2054 	poll_threads();
2055 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2056 	poll_threads();
2057 
2058 	CU_ASSERT(nvme_ctrlr->resetting == false);
2059 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2060 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2061 
2062 	spdk_put_io_channel(ch1);
2063 
2064 	set_thread(1);
2065 
2066 	spdk_put_io_channel(ch2);
2067 
2068 	poll_threads();
2069 
2070 	set_thread(0);
2071 
2072 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2073 	CU_ASSERT(rc == 0);
2074 
2075 	poll_threads();
2076 	spdk_delay_us(1000);
2077 	poll_threads();
2078 
2079 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2080 
2081 	free(first_bdev_io);
2082 	free(second_bdev_io);
2083 }
2084 
2085 static void
2086 test_attach_ctrlr(void)
2087 {
2088 	struct spdk_nvme_transport_id trid = {};
2089 	struct spdk_nvme_ctrlr *ctrlr;
2090 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2091 	struct nvme_ctrlr *nvme_ctrlr;
2092 	const int STRING_SIZE = 32;
2093 	const char *attached_names[STRING_SIZE];
2094 	struct nvme_bdev *nbdev;
2095 	int rc;
2096 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2097 
2098 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2099 	bdev_opts.multipath = false;
2100 
2101 	set_thread(0);
2102 
2103 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2104 	ut_init_trid(&trid);
2105 
2106 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
2107 	 * by probe polling.
2108 	 */
2109 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2110 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2111 
2112 	ctrlr->is_failed = true;
2113 	g_ut_attach_ctrlr_status = -EIO;
2114 	g_ut_attach_bdev_count = 0;
2115 
2116 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2117 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2118 	CU_ASSERT(rc == 0);
2119 
2120 	spdk_delay_us(1000);
2121 	poll_threads();
2122 
2123 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2124 
2125 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2126 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2127 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2128 
2129 	g_ut_attach_ctrlr_status = 0;
2130 
2131 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2132 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2133 	CU_ASSERT(rc == 0);
2134 
2135 	spdk_delay_us(1000);
2136 	poll_threads();
2137 
2138 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2139 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2140 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2141 
2142 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2143 	CU_ASSERT(rc == 0);
2144 
2145 	poll_threads();
2146 	spdk_delay_us(1000);
2147 	poll_threads();
2148 
2149 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2150 
2151 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2152 	 * one nvme_bdev is created.
2153 	 */
2154 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2155 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2156 
2157 	g_ut_attach_bdev_count = 1;
2158 
2159 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2160 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2161 	CU_ASSERT(rc == 0);
2162 
2163 	spdk_delay_us(1000);
2164 	poll_threads();
2165 
2166 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2167 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2168 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2169 
2170 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2171 	attached_names[0] = NULL;
2172 
2173 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2174 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2175 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2176 
2177 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2178 	CU_ASSERT(rc == 0);
2179 
2180 	poll_threads();
2181 	spdk_delay_us(1000);
2182 	poll_threads();
2183 
2184 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2185 
2186 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2187 	 * created because creating one nvme_bdev failed.
2188 	 */
2189 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2190 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2191 
2192 	g_ut_register_bdev_status = -EINVAL;
2193 	g_ut_attach_bdev_count = 0;
2194 
2195 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2196 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2197 	CU_ASSERT(rc == 0);
2198 
2199 	spdk_delay_us(1000);
2200 	poll_threads();
2201 
2202 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2203 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2204 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2205 
2206 	CU_ASSERT(attached_names[0] == NULL);
2207 
2208 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2209 	CU_ASSERT(rc == 0);
2210 
2211 	poll_threads();
2212 	spdk_delay_us(1000);
2213 	poll_threads();
2214 
2215 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2216 
2217 	g_ut_register_bdev_status = 0;
2218 }
2219 
2220 static void
2221 test_aer_cb(void)
2222 {
2223 	struct spdk_nvme_transport_id trid = {};
2224 	struct spdk_nvme_ctrlr *ctrlr;
2225 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2226 	struct nvme_ctrlr *nvme_ctrlr;
2227 	struct nvme_bdev *nbdev;
2228 	const int STRING_SIZE = 32;
2229 	const char *attached_names[STRING_SIZE];
2230 	union spdk_nvme_async_event_completion event = {};
2231 	struct spdk_nvme_cpl cpl = {};
2232 	int rc;
2233 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2234 
2235 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2236 	bdev_opts.multipath = false;
2237 
2238 	set_thread(0);
2239 
2240 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2241 	ut_init_trid(&trid);
2242 
2243 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2244 	 * namespaces are populated.
2245 	 */
2246 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2247 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2248 
2249 	ctrlr->ns[0].is_active = false;
2250 
2251 	g_ut_attach_ctrlr_status = 0;
2252 	g_ut_attach_bdev_count = 3;
2253 
2254 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2255 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2256 	CU_ASSERT(rc == 0);
2257 
2258 	spdk_delay_us(1000);
2259 	poll_threads();
2260 
2261 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2262 	poll_threads();
2263 
2264 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2265 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2266 
2267 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2268 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2269 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2270 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2271 
2272 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2273 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2274 	CU_ASSERT(nbdev->disk.blockcnt == 1024);
2275 
2276 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2277 	 * change the size of the 4th namespace.
2278 	 */
2279 	ctrlr->ns[0].is_active = true;
2280 	ctrlr->ns[2].is_active = false;
2281 	ctrlr->nsdata[3].nsze = 2048;
2282 
2283 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2284 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2285 	cpl.cdw0 = event.raw;
2286 
2287 	aer_cb(nvme_ctrlr, &cpl);
2288 
2289 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2290 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2291 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2292 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2293 	CU_ASSERT(nbdev->disk.blockcnt == 2048);
2294 
2295 	/* Change ANA state of active namespaces. */
2296 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2297 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2298 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2299 
2300 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2301 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2302 	cpl.cdw0 = event.raw;
2303 
2304 	aer_cb(nvme_ctrlr, &cpl);
2305 
2306 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2307 	poll_threads();
2308 
2309 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2310 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2311 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2312 
2313 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2314 	CU_ASSERT(rc == 0);
2315 
2316 	poll_threads();
2317 	spdk_delay_us(1000);
2318 	poll_threads();
2319 
2320 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2321 }
2322 
2323 static void
2324 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2325 			enum spdk_bdev_io_type io_type)
2326 {
2327 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2328 	struct nvme_io_path *io_path;
2329 	struct spdk_nvme_qpair *qpair;
2330 
2331 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2332 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2333 	qpair = io_path->qpair->qpair;
2334 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2335 
2336 	bdev_io->type = io_type;
2337 	bdev_io->internal.f.in_submit_request = true;
2338 
2339 	bdev_nvme_submit_request(ch, bdev_io);
2340 
2341 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2342 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2343 
2344 	poll_threads();
2345 
2346 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2347 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2348 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2349 }
2350 
2351 static void
2352 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2353 		   enum spdk_bdev_io_type io_type)
2354 {
2355 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2356 	struct nvme_io_path *io_path;
2357 	struct spdk_nvme_qpair *qpair;
2358 
2359 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2360 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2361 	qpair = io_path->qpair->qpair;
2362 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2363 
2364 	bdev_io->type = io_type;
2365 	bdev_io->internal.f.in_submit_request = true;
2366 
2367 	bdev_nvme_submit_request(ch, bdev_io);
2368 
2369 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2370 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2371 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2372 }
2373 
2374 static void
2375 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2376 {
2377 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2378 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2379 	struct ut_nvme_req *req;
2380 	struct nvme_io_path *io_path;
2381 	struct spdk_nvme_qpair *qpair;
2382 
2383 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2384 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2385 	qpair = io_path->qpair->qpair;
2386 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2387 
2388 	/* Only compare and write now. */
2389 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2390 	bdev_io->internal.f.in_submit_request = true;
2391 
2392 	bdev_nvme_submit_request(ch, bdev_io);
2393 
2394 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2395 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2396 	CU_ASSERT(bio->first_fused_submitted == true);
2397 
2398 	/* First outstanding request is compare operation. */
2399 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2400 	SPDK_CU_ASSERT_FATAL(req != NULL);
2401 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2402 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2403 
2404 	poll_threads();
2405 
2406 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2407 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2408 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2409 }
2410 
2411 static void
2412 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2413 			 struct spdk_nvme_ctrlr *ctrlr)
2414 {
2415 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2416 	bdev_io->internal.f.in_submit_request = true;
2417 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2418 
2419 	bdev_nvme_submit_request(ch, bdev_io);
2420 
2421 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2422 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2423 
2424 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2425 	poll_thread_times(1, 1);
2426 
2427 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2428 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2429 
2430 	poll_thread_times(0, 1);
2431 
2432 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2433 }
2434 
2435 static void
2436 test_submit_nvme_cmd(void)
2437 {
2438 	struct spdk_nvme_transport_id trid = {};
2439 	struct spdk_nvme_ctrlr *ctrlr;
2440 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2441 	struct nvme_ctrlr *nvme_ctrlr;
2442 	const int STRING_SIZE = 32;
2443 	const char *attached_names[STRING_SIZE];
2444 	struct nvme_bdev *nbdev;
2445 	struct spdk_bdev_io *bdev_io;
2446 	struct spdk_io_channel *ch;
2447 	int rc;
2448 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2449 
2450 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2451 	bdev_opts.multipath = false;
2452 
2453 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2454 	ut_init_trid(&trid);
2455 
2456 	set_thread(1);
2457 
2458 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2459 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2460 
2461 	g_ut_attach_ctrlr_status = 0;
2462 	g_ut_attach_bdev_count = 1;
2463 
2464 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2465 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2466 	CU_ASSERT(rc == 0);
2467 
2468 	spdk_delay_us(1000);
2469 	poll_threads();
2470 
2471 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2472 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2473 
2474 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2475 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2476 
2477 	set_thread(0);
2478 
2479 	ch = spdk_get_io_channel(nbdev);
2480 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2481 
2482 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, nbdev, ch);
2483 
2484 	bdev_io->u.bdev.iovs = NULL;
2485 
2486 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2487 
2488 	ut_bdev_io_set_buf(bdev_io);
2489 
2490 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2491 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2492 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2493 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2494 
2495 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2496 
2497 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2498 
2499 	/* Verify that ext NVME API is called when data is described by memory domain  */
2500 	g_ut_read_ext_called = false;
2501 	bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef;
2502 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2503 	CU_ASSERT(g_ut_read_ext_called == true);
2504 	g_ut_read_ext_called = false;
2505 	bdev_io->u.bdev.memory_domain = NULL;
2506 
2507 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2508 
2509 	free(bdev_io);
2510 
2511 	spdk_put_io_channel(ch);
2512 
2513 	poll_threads();
2514 
2515 	set_thread(1);
2516 
2517 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2518 	CU_ASSERT(rc == 0);
2519 
2520 	poll_threads();
2521 	spdk_delay_us(1000);
2522 	poll_threads();
2523 
2524 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2525 }
2526 
2527 static void
2528 test_add_remove_trid(void)
2529 {
2530 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2531 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2532 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2533 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2534 	const int STRING_SIZE = 32;
2535 	const char *attached_names[STRING_SIZE];
2536 	struct nvme_path_id *ctrid;
2537 	int rc;
2538 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2539 
2540 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2541 	bdev_opts.multipath = false;
2542 
2543 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2544 	ut_init_trid(&path1.trid);
2545 	ut_init_trid2(&path2.trid);
2546 	ut_init_trid3(&path3.trid);
2547 
2548 	set_thread(0);
2549 
2550 	g_ut_attach_ctrlr_status = 0;
2551 	g_ut_attach_bdev_count = 0;
2552 
2553 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2554 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2555 
2556 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2557 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2558 	CU_ASSERT(rc == 0);
2559 
2560 	spdk_delay_us(1000);
2561 	poll_threads();
2562 
2563 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2564 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2565 
2566 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2567 
2568 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2569 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2570 
2571 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2572 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2573 	CU_ASSERT(rc == 0);
2574 
2575 	spdk_delay_us(1000);
2576 	poll_threads();
2577 
2578 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2579 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2580 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2581 			break;
2582 		}
2583 	}
2584 	CU_ASSERT(ctrid != NULL);
2585 
2586 	/* trid3 is not in the registered list. */
2587 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2588 	CU_ASSERT(rc == -ENXIO);
2589 
2590 	/* trid2 is not used, and simply removed. */
2591 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2592 	CU_ASSERT(rc == 0);
2593 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2594 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2595 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2596 	}
2597 
2598 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2599 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2600 
2601 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2602 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2603 	CU_ASSERT(rc == 0);
2604 
2605 	spdk_delay_us(1000);
2606 	poll_threads();
2607 
2608 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2609 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2610 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2611 			break;
2612 		}
2613 	}
2614 	CU_ASSERT(ctrid != NULL);
2615 
2616 	/* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully.
2617 	 * If we add path2 again, path2 should be inserted between path1 and path3.
2618 	 * Then, we remove path2. It is not used, and simply removed.
2619 	 */
2620 	ctrid->last_failed_tsc = spdk_get_ticks() + 1;
2621 
2622 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2623 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2624 
2625 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2626 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2627 	CU_ASSERT(rc == 0);
2628 
2629 	spdk_delay_us(1000);
2630 	poll_threads();
2631 
2632 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2633 
2634 	ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link);
2635 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2636 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0);
2637 
2638 	ctrid = TAILQ_NEXT(ctrid, link);
2639 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2640 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0);
2641 
2642 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2643 	CU_ASSERT(rc == 0);
2644 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2645 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2646 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2647 	}
2648 
2649 	/* path1 is currently used and path3 is an alternative path.
2650 	 * If we remove path1, path is changed to path3.
2651 	 */
2652 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
2653 	CU_ASSERT(rc == 0);
2654 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2655 	CU_ASSERT(nvme_ctrlr->resetting == true);
2656 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2657 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2658 	}
2659 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2660 
2661 	poll_threads();
2662 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2663 	poll_threads();
2664 
2665 	CU_ASSERT(nvme_ctrlr->resetting == false);
2666 
2667 	/* path3 is the current and only path. If we remove path3, the corresponding
2668 	 * nvme_ctrlr is removed.
2669 	 */
2670 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2671 	CU_ASSERT(rc == 0);
2672 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2673 
2674 	poll_threads();
2675 	spdk_delay_us(1000);
2676 	poll_threads();
2677 
2678 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2679 
2680 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2681 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2682 
2683 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2684 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2685 	CU_ASSERT(rc == 0);
2686 
2687 	spdk_delay_us(1000);
2688 	poll_threads();
2689 
2690 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2691 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2692 
2693 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2694 
2695 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2696 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2697 
2698 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2699 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2700 	CU_ASSERT(rc == 0);
2701 
2702 	spdk_delay_us(1000);
2703 	poll_threads();
2704 
2705 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2706 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2707 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2708 			break;
2709 		}
2710 	}
2711 	CU_ASSERT(ctrid != NULL);
2712 
2713 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2714 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2715 	CU_ASSERT(rc == 0);
2716 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2717 
2718 	poll_threads();
2719 	spdk_delay_us(1000);
2720 	poll_threads();
2721 
2722 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2723 }
2724 
2725 static void
2726 test_abort(void)
2727 {
2728 	struct spdk_nvme_transport_id trid = {};
2729 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
2730 	struct spdk_nvme_ctrlr *ctrlr;
2731 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
2732 	struct nvme_ctrlr *nvme_ctrlr;
2733 	const int STRING_SIZE = 32;
2734 	const char *attached_names[STRING_SIZE];
2735 	struct nvme_bdev *nbdev;
2736 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2737 	struct spdk_io_channel *ch1, *ch2;
2738 	struct nvme_bdev_channel *nbdev_ch1;
2739 	struct nvme_io_path *io_path1;
2740 	struct nvme_qpair *nvme_qpair1;
2741 	int rc;
2742 
2743 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2744 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2745 	 * are submitted on thread 1. Both should succeed.
2746 	 */
2747 
2748 	ut_init_trid(&trid);
2749 
2750 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2751 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2752 
2753 	g_ut_attach_ctrlr_status = 0;
2754 	g_ut_attach_bdev_count = 1;
2755 
2756 	set_thread(1);
2757 
2758 	opts.ctrlr_loss_timeout_sec = -1;
2759 	opts.reconnect_delay_sec = 1;
2760 	opts.multipath = false;
2761 
2762 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2763 				   attach_ctrlr_done, NULL, &dopts, &opts);
2764 	CU_ASSERT(rc == 0);
2765 
2766 	spdk_delay_us(1000);
2767 	poll_threads();
2768 
2769 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2770 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2771 
2772 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2773 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2774 
2775 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, NULL);
2776 	ut_bdev_io_set_buf(write_io);
2777 
2778 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, nbdev, NULL);
2779 	ut_bdev_io_set_buf(fuse_io);
2780 
2781 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, nbdev, NULL);
2782 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2783 
2784 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, nbdev, NULL);
2785 
2786 	set_thread(0);
2787 
2788 	ch1 = spdk_get_io_channel(nbdev);
2789 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2790 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2791 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2792 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2793 	nvme_qpair1 = io_path1->qpair;
2794 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2795 
2796 	set_thread(1);
2797 
2798 	ch2 = spdk_get_io_channel(nbdev);
2799 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2800 
2801 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2802 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2803 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2804 
2805 	/* Aborting the already completed request should fail. */
2806 	write_io->internal.f.in_submit_request = true;
2807 	bdev_nvme_submit_request(ch1, write_io);
2808 	poll_threads();
2809 
2810 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2811 
2812 	abort_io->u.abort.bio_to_abort = write_io;
2813 	abort_io->internal.f.in_submit_request = true;
2814 
2815 	bdev_nvme_submit_request(ch1, abort_io);
2816 
2817 	poll_threads();
2818 
2819 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2820 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2821 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2822 
2823 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2824 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2825 
2826 	admin_io->internal.f.in_submit_request = true;
2827 	bdev_nvme_submit_request(ch1, admin_io);
2828 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2829 	poll_threads();
2830 
2831 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2832 
2833 	abort_io->u.abort.bio_to_abort = admin_io;
2834 	abort_io->internal.f.in_submit_request = true;
2835 
2836 	bdev_nvme_submit_request(ch2, abort_io);
2837 
2838 	poll_threads();
2839 
2840 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2841 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2842 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2843 
2844 	/* Aborting the write request should succeed. */
2845 	write_io->internal.f.in_submit_request = true;
2846 	bdev_nvme_submit_request(ch1, write_io);
2847 
2848 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2849 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2850 
2851 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2852 	abort_io->u.abort.bio_to_abort = write_io;
2853 	abort_io->internal.f.in_submit_request = true;
2854 
2855 	bdev_nvme_submit_request(ch1, abort_io);
2856 
2857 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2858 	poll_threads();
2859 
2860 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2861 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2862 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2863 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2864 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2865 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2866 
2867 	/* Aborting the fuse request should succeed. */
2868 	fuse_io->internal.f.in_submit_request = true;
2869 	bdev_nvme_submit_request(ch1, fuse_io);
2870 
2871 	CU_ASSERT(fuse_io->internal.f.in_submit_request == true);
2872 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2873 
2874 	abort_io->u.abort.bio_to_abort = fuse_io;
2875 	abort_io->internal.f.in_submit_request = true;
2876 
2877 	bdev_nvme_submit_request(ch1, abort_io);
2878 
2879 	spdk_delay_us(10000);
2880 	poll_threads();
2881 
2882 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2883 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2884 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2885 	CU_ASSERT(fuse_io->internal.f.in_submit_request == false);
2886 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2887 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2888 
2889 	/* Aborting the admin request should succeed. */
2890 	admin_io->internal.f.in_submit_request = true;
2891 	bdev_nvme_submit_request(ch1, admin_io);
2892 
2893 	CU_ASSERT(admin_io->internal.f.in_submit_request == true);
2894 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2895 
2896 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2897 	abort_io->u.abort.bio_to_abort = admin_io;
2898 	abort_io->internal.f.in_submit_request = true;
2899 
2900 	bdev_nvme_submit_request(ch2, abort_io);
2901 
2902 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2903 	poll_threads();
2904 
2905 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2906 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2907 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2908 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2909 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2910 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2911 
2912 	set_thread(0);
2913 
2914 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2915 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2916 	 * while resetting the nvme_ctrlr.
2917 	 */
2918 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2919 
2920 	poll_thread_times(0, 3);
2921 
2922 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2923 	CU_ASSERT(nvme_ctrlr->resetting == true);
2924 
2925 	write_io->internal.f.in_submit_request = true;
2926 
2927 	bdev_nvme_submit_request(ch1, write_io);
2928 
2929 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2930 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
2931 
2932 	/* Aborting the queued write request should succeed immediately. */
2933 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2934 	abort_io->u.abort.bio_to_abort = write_io;
2935 	abort_io->internal.f.in_submit_request = true;
2936 
2937 	bdev_nvme_submit_request(ch1, abort_io);
2938 
2939 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2940 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2941 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2942 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2943 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2944 
2945 	poll_threads();
2946 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2947 	poll_threads();
2948 
2949 	spdk_put_io_channel(ch1);
2950 
2951 	set_thread(1);
2952 
2953 	spdk_put_io_channel(ch2);
2954 
2955 	poll_threads();
2956 
2957 	free(write_io);
2958 	free(fuse_io);
2959 	free(admin_io);
2960 	free(abort_io);
2961 
2962 	set_thread(1);
2963 
2964 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2965 	CU_ASSERT(rc == 0);
2966 
2967 	poll_threads();
2968 	spdk_delay_us(1000);
2969 	poll_threads();
2970 
2971 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2972 }
2973 
2974 static void
2975 test_get_io_qpair(void)
2976 {
2977 	struct spdk_nvme_transport_id trid = {};
2978 	struct spdk_nvme_ctrlr ctrlr = {};
2979 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2980 	struct spdk_io_channel *ch;
2981 	struct nvme_ctrlr_channel *ctrlr_ch;
2982 	struct spdk_nvme_qpair *qpair;
2983 	int rc;
2984 
2985 	ut_init_trid(&trid);
2986 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2987 
2988 	set_thread(0);
2989 
2990 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2991 	CU_ASSERT(rc == 0);
2992 
2993 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2994 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2995 
2996 	ch = spdk_get_io_channel(nvme_ctrlr);
2997 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2998 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2999 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
3000 
3001 	qpair = bdev_nvme_get_io_qpair(ch);
3002 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
3003 
3004 	spdk_put_io_channel(ch);
3005 
3006 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3007 	CU_ASSERT(rc == 0);
3008 
3009 	poll_threads();
3010 	spdk_delay_us(1000);
3011 	poll_threads();
3012 
3013 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3014 }
3015 
3016 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
3017  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
3018  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
3019  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
3020  */
3021 static void
3022 test_bdev_unregister(void)
3023 {
3024 	struct spdk_nvme_transport_id trid = {};
3025 	struct spdk_nvme_ctrlr *ctrlr;
3026 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3027 	struct nvme_ctrlr *nvme_ctrlr;
3028 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3029 	const int STRING_SIZE = 32;
3030 	const char *attached_names[STRING_SIZE];
3031 	struct nvme_bdev *nbdev1, *nbdev2;
3032 	int rc;
3033 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3034 
3035 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3036 	bdev_opts.multipath = false;
3037 
3038 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3039 	ut_init_trid(&trid);
3040 
3041 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
3042 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3043 
3044 	g_ut_attach_ctrlr_status = 0;
3045 	g_ut_attach_bdev_count = 2;
3046 
3047 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3048 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3049 	CU_ASSERT(rc == 0);
3050 
3051 	spdk_delay_us(1000);
3052 	poll_threads();
3053 
3054 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3055 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3056 
3057 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
3058 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3059 
3060 	nbdev1 = nvme_ns1->bdev;
3061 	SPDK_CU_ASSERT_FATAL(nbdev1 != NULL);
3062 
3063 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
3064 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3065 
3066 	nbdev2 = nvme_ns2->bdev;
3067 	SPDK_CU_ASSERT_FATAL(nbdev2 != NULL);
3068 
3069 	bdev_nvme_destruct(&nbdev1->disk);
3070 	bdev_nvme_destruct(&nbdev2->disk);
3071 
3072 	poll_threads();
3073 
3074 	CU_ASSERT(nvme_ns1->bdev == NULL);
3075 	CU_ASSERT(nvme_ns2->bdev == NULL);
3076 
3077 	nvme_ctrlr->destruct = true;
3078 	_nvme_ctrlr_destruct(nvme_ctrlr);
3079 
3080 	poll_threads();
3081 	spdk_delay_us(1000);
3082 	poll_threads();
3083 
3084 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3085 }
3086 
3087 static void
3088 test_compare_ns(void)
3089 {
3090 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
3091 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
3092 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
3093 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
3094 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
3095 
3096 	/* No IDs are defined. */
3097 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3098 
3099 	/* Only EUI64 are defined and not matched. */
3100 	nsdata1.eui64 = 0xABCDEF0123456789;
3101 	nsdata2.eui64 = 0xBBCDEF0123456789;
3102 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3103 
3104 	/* Only EUI64 are defined and matched. */
3105 	nsdata2.eui64 = 0xABCDEF0123456789;
3106 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3107 
3108 	/* Only NGUID are defined and not matched. */
3109 	nsdata1.eui64 = 0x0;
3110 	nsdata2.eui64 = 0x0;
3111 	nsdata1.nguid[0] = 0x12;
3112 	nsdata2.nguid[0] = 0x10;
3113 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3114 
3115 	/* Only NGUID are defined and matched. */
3116 	nsdata2.nguid[0] = 0x12;
3117 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3118 
3119 	/* Only UUID are defined and not matched. */
3120 	nsdata1.nguid[0] = 0x0;
3121 	nsdata2.nguid[0] = 0x0;
3122 	ns1.uuid = &uuid1;
3123 	ns2.uuid = &uuid2;
3124 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3125 
3126 	/* Only one UUID is defined. */
3127 	ns1.uuid = NULL;
3128 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3129 
3130 	/* Only UUID are defined and matched. */
3131 	ns1.uuid = &uuid2;
3132 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3133 
3134 	/* All EUI64, NGUID, and UUID are defined and matched. */
3135 	nsdata1.eui64 = 0x123456789ABCDEF;
3136 	nsdata2.eui64 = 0x123456789ABCDEF;
3137 	nsdata1.nguid[15] = 0x34;
3138 	nsdata2.nguid[15] = 0x34;
3139 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3140 
3141 	/* CSI are not matched. */
3142 	ns1.csi = SPDK_NVME_CSI_ZNS;
3143 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3144 }
3145 
3146 static void
3147 test_init_ana_log_page(void)
3148 {
3149 	struct spdk_nvme_transport_id trid = {};
3150 	struct spdk_nvme_ctrlr *ctrlr;
3151 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3152 	struct nvme_ctrlr *nvme_ctrlr;
3153 	const int STRING_SIZE = 32;
3154 	const char *attached_names[STRING_SIZE];
3155 	int rc;
3156 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3157 
3158 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3159 	bdev_opts.multipath = false;
3160 
3161 	set_thread(0);
3162 
3163 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3164 	ut_init_trid(&trid);
3165 
3166 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
3167 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3168 
3169 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3170 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3171 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3172 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3173 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3174 
3175 	g_ut_attach_ctrlr_status = 0;
3176 	g_ut_attach_bdev_count = 5;
3177 
3178 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3179 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3180 	CU_ASSERT(rc == 0);
3181 
3182 	spdk_delay_us(1000);
3183 	poll_threads();
3184 
3185 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3186 	poll_threads();
3187 
3188 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3189 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3190 
3191 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3192 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3193 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3194 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3195 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3196 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3197 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3198 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3199 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3200 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3201 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3202 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3203 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3204 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3205 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3206 
3207 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3208 	CU_ASSERT(rc == 0);
3209 
3210 	poll_threads();
3211 	spdk_delay_us(1000);
3212 	poll_threads();
3213 
3214 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3215 }
3216 
3217 static void
3218 init_accel(void)
3219 {
3220 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3221 				sizeof(int), "accel_p");
3222 }
3223 
3224 static void
3225 fini_accel(void)
3226 {
3227 	spdk_io_device_unregister(g_accel_p, NULL);
3228 }
3229 
3230 static void
3231 test_get_memory_domains(void)
3232 {
3233 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3234 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3235 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3236 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3237 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3238 	struct spdk_memory_domain *domains[4] = {};
3239 	int rc = 0;
3240 
3241 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3242 
3243 	/* nvme controller doesn't have memory domains */
3244 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3245 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3246 	CU_ASSERT(rc == 0);
3247 	CU_ASSERT(domains[0] == NULL);
3248 	CU_ASSERT(domains[1] == NULL);
3249 
3250 	/* nvme controller has a memory domain */
3251 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3252 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3253 	CU_ASSERT(rc == 1);
3254 	CU_ASSERT(domains[0] != NULL);
3255 	memset(domains, 0, sizeof(domains));
3256 
3257 	/* multipath, 2 controllers report 1 memory domain each */
3258 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3259 
3260 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3261 	CU_ASSERT(rc == 2);
3262 	CU_ASSERT(domains[0] != NULL);
3263 	CU_ASSERT(domains[1] != NULL);
3264 	memset(domains, 0, sizeof(domains));
3265 
3266 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3267 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3268 	CU_ASSERT(rc == 2);
3269 
3270 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3271 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3272 	CU_ASSERT(rc == 2);
3273 	CU_ASSERT(domains[0] == NULL);
3274 	CU_ASSERT(domains[1] == NULL);
3275 
3276 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3277 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3278 	CU_ASSERT(rc == 2);
3279 	CU_ASSERT(domains[0] != NULL);
3280 	CU_ASSERT(domains[1] == NULL);
3281 	memset(domains, 0, sizeof(domains));
3282 
3283 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3284 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3285 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3286 	CU_ASSERT(rc == 4);
3287 	CU_ASSERT(domains[0] != NULL);
3288 	CU_ASSERT(domains[1] != NULL);
3289 	CU_ASSERT(domains[2] != NULL);
3290 	CU_ASSERT(domains[3] != NULL);
3291 	memset(domains, 0, sizeof(domains));
3292 
3293 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3294 	 * Array size is less than the number of memory domains */
3295 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3296 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3297 	CU_ASSERT(rc == 4);
3298 	CU_ASSERT(domains[0] != NULL);
3299 	CU_ASSERT(domains[1] != NULL);
3300 	CU_ASSERT(domains[2] != NULL);
3301 	CU_ASSERT(domains[3] == NULL);
3302 	memset(domains, 0, sizeof(domains));
3303 
3304 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3305 }
3306 
3307 static void
3308 test_reconnect_qpair(void)
3309 {
3310 	struct spdk_nvme_transport_id trid = {};
3311 	struct spdk_nvme_ctrlr *ctrlr;
3312 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3313 	struct nvme_ctrlr *nvme_ctrlr;
3314 	const int STRING_SIZE = 32;
3315 	const char *attached_names[STRING_SIZE];
3316 	struct nvme_bdev *nbdev;
3317 	struct spdk_io_channel *ch1, *ch2;
3318 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3319 	struct nvme_io_path *io_path1, *io_path2;
3320 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3321 	int rc;
3322 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3323 
3324 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3325 	bdev_opts.multipath = false;
3326 
3327 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3328 	ut_init_trid(&trid);
3329 
3330 	set_thread(0);
3331 
3332 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3333 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3334 
3335 	g_ut_attach_ctrlr_status = 0;
3336 	g_ut_attach_bdev_count = 1;
3337 
3338 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3339 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3340 	CU_ASSERT(rc == 0);
3341 
3342 	spdk_delay_us(1000);
3343 	poll_threads();
3344 
3345 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3346 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3347 
3348 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3349 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
3350 
3351 	ch1 = spdk_get_io_channel(nbdev);
3352 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3353 
3354 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3355 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3356 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3357 	nvme_qpair1 = io_path1->qpair;
3358 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3359 
3360 	set_thread(1);
3361 
3362 	ch2 = spdk_get_io_channel(nbdev);
3363 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3364 
3365 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3366 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3367 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3368 	nvme_qpair2 = io_path2->qpair;
3369 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3370 
3371 	/* If a qpair is disconnected, it is freed and then reconnected via
3372 	 * resetting the corresponding nvme_ctrlr.
3373 	 */
3374 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3375 	ctrlr->is_failed = true;
3376 
3377 	poll_thread_times(1, 3);
3378 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3379 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3380 	CU_ASSERT(nvme_ctrlr->resetting == true);
3381 
3382 	poll_thread_times(0, 3);
3383 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3384 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3385 	CU_ASSERT(ctrlr->is_failed == true);
3386 
3387 	poll_thread_times(1, 2);
3388 	poll_thread_times(0, 1);
3389 	CU_ASSERT(ctrlr->is_failed == false);
3390 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3391 
3392 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3393 	poll_thread_times(0, 2);
3394 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3395 
3396 	poll_thread_times(0, 1);
3397 	poll_thread_times(1, 1);
3398 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3399 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3400 	CU_ASSERT(nvme_ctrlr->resetting == true);
3401 
3402 	poll_thread_times(0, 2);
3403 	poll_thread_times(1, 1);
3404 	poll_thread_times(0, 1);
3405 	CU_ASSERT(nvme_ctrlr->resetting == false);
3406 
3407 	poll_threads();
3408 
3409 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3410 	 * fails, the qpair is just freed.
3411 	 */
3412 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3413 	ctrlr->is_failed = true;
3414 	ctrlr->fail_reset = true;
3415 
3416 	poll_thread_times(1, 3);
3417 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3418 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3419 	CU_ASSERT(nvme_ctrlr->resetting == true);
3420 
3421 	poll_thread_times(0, 3);
3422 	poll_thread_times(1, 1);
3423 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3424 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3425 	CU_ASSERT(ctrlr->is_failed == true);
3426 
3427 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3428 	poll_thread_times(0, 3);
3429 	poll_thread_times(1, 1);
3430 	poll_thread_times(0, 1);
3431 	CU_ASSERT(ctrlr->is_failed == true);
3432 	CU_ASSERT(nvme_ctrlr->resetting == false);
3433 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3434 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3435 
3436 	poll_threads();
3437 
3438 	spdk_put_io_channel(ch2);
3439 
3440 	set_thread(0);
3441 
3442 	spdk_put_io_channel(ch1);
3443 
3444 	poll_threads();
3445 
3446 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3447 	CU_ASSERT(rc == 0);
3448 
3449 	poll_threads();
3450 	spdk_delay_us(1000);
3451 	poll_threads();
3452 
3453 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3454 }
3455 
3456 static void
3457 test_create_bdev_ctrlr(void)
3458 {
3459 	struct nvme_path_id path1 = {}, path2 = {};
3460 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3461 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3462 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3463 	const int STRING_SIZE = 32;
3464 	const char *attached_names[STRING_SIZE];
3465 	int rc;
3466 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3467 
3468 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3469 	bdev_opts.multipath = true;
3470 
3471 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3472 	ut_init_trid(&path1.trid);
3473 	ut_init_trid2(&path2.trid);
3474 
3475 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3476 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3477 
3478 	g_ut_attach_ctrlr_status = 0;
3479 	g_ut_attach_bdev_count = 0;
3480 
3481 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3482 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3483 	CU_ASSERT(rc == 0);
3484 
3485 	spdk_delay_us(1000);
3486 	poll_threads();
3487 
3488 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3489 	poll_threads();
3490 
3491 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3492 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3493 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3494 
3495 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3496 	g_ut_attach_ctrlr_status = -EINVAL;
3497 
3498 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3499 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3500 
3501 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3502 
3503 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3504 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3505 	CU_ASSERT(rc == 0);
3506 
3507 	spdk_delay_us(1000);
3508 	poll_threads();
3509 
3510 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3511 	poll_threads();
3512 
3513 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3514 
3515 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3516 	g_ut_attach_ctrlr_status = 0;
3517 
3518 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3519 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3520 
3521 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3522 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3523 	CU_ASSERT(rc == 0);
3524 
3525 	spdk_delay_us(1000);
3526 	poll_threads();
3527 
3528 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3529 	poll_threads();
3530 
3531 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3532 
3533 	/* Delete two ctrlrs at once. */
3534 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3535 	CU_ASSERT(rc == 0);
3536 
3537 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3538 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3539 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3540 
3541 	poll_threads();
3542 	spdk_delay_us(1000);
3543 	poll_threads();
3544 
3545 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3546 
3547 	/* Add two ctrlrs and delete one by one. */
3548 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3549 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3550 
3551 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3552 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3553 
3554 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3555 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3556 	CU_ASSERT(rc == 0);
3557 
3558 	spdk_delay_us(1000);
3559 	poll_threads();
3560 
3561 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3562 	poll_threads();
3563 
3564 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3565 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3566 	CU_ASSERT(rc == 0);
3567 
3568 	spdk_delay_us(1000);
3569 	poll_threads();
3570 
3571 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3572 	poll_threads();
3573 
3574 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3575 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3576 
3577 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3578 	CU_ASSERT(rc == 0);
3579 
3580 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3581 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3582 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3583 
3584 	poll_threads();
3585 	spdk_delay_us(1000);
3586 	poll_threads();
3587 
3588 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3589 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3590 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3591 
3592 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3593 	CU_ASSERT(rc == 0);
3594 
3595 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3596 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3597 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3598 
3599 	poll_threads();
3600 	spdk_delay_us(1000);
3601 	poll_threads();
3602 
3603 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3604 }
3605 
3606 static struct nvme_ns *
3607 _nvme_bdev_get_ns(struct nvme_bdev *nbdev, struct nvme_ctrlr *nvme_ctrlr)
3608 {
3609 	struct nvme_ns *nvme_ns;
3610 
3611 	TAILQ_FOREACH(nvme_ns, &nbdev->nvme_ns_list, tailq) {
3612 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3613 			return nvme_ns;
3614 		}
3615 	}
3616 
3617 	return NULL;
3618 }
3619 
3620 static void
3621 test_add_multi_ns_to_bdev(void)
3622 {
3623 	struct nvme_path_id path1 = {}, path2 = {};
3624 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3625 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3626 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3627 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3628 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3629 	struct nvme_bdev *nbdev1, *nbdev2, *nbdev3, *nbdev4;
3630 	const int STRING_SIZE = 32;
3631 	const char *attached_names[STRING_SIZE];
3632 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3633 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3634 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3635 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3636 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3637 	int rc;
3638 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3639 
3640 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3641 	bdev_opts.multipath = true;
3642 
3643 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3644 	ut_init_trid(&path1.trid);
3645 	ut_init_trid2(&path2.trid);
3646 
3647 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3648 
3649 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3650 	 * namespaces are populated.
3651 	 */
3652 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3653 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3654 
3655 	ctrlr1->ns[1].is_active = false;
3656 	ctrlr1->ns[4].is_active = false;
3657 	ctrlr1->ns[0].uuid = &uuid1;
3658 	ctrlr1->ns[2].uuid = &uuid3;
3659 	ctrlr1->ns[3].uuid = &uuid4;
3660 
3661 	g_ut_attach_ctrlr_status = 0;
3662 	g_ut_attach_bdev_count = 3;
3663 
3664 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3665 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3666 	CU_ASSERT(rc == 0);
3667 
3668 	spdk_delay_us(1000);
3669 	poll_threads();
3670 
3671 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3672 	poll_threads();
3673 
3674 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3675 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3676 	 * adding 4th namespace to a bdev should fail.
3677 	 */
3678 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3679 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3680 
3681 	ctrlr2->ns[2].is_active = false;
3682 	ctrlr2->ns[4].is_active = false;
3683 	ctrlr2->ns[0].uuid = &uuid1;
3684 	ctrlr2->ns[1].uuid = &uuid2;
3685 	ctrlr2->ns[3].uuid = &uuid44;
3686 
3687 	g_ut_attach_ctrlr_status = 0;
3688 	g_ut_attach_bdev_count = 2;
3689 
3690 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3691 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3692 	CU_ASSERT(rc == 0);
3693 
3694 	spdk_delay_us(1000);
3695 	poll_threads();
3696 
3697 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3698 	poll_threads();
3699 
3700 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3701 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3702 
3703 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3704 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3705 
3706 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3707 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3708 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3709 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3710 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3711 
3712 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3713 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3714 
3715 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3716 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3717 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3718 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3719 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3720 
3721 	nbdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3722 	SPDK_CU_ASSERT_FATAL(nbdev1 != NULL);
3723 	nbdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3724 	SPDK_CU_ASSERT_FATAL(nbdev2 != NULL);
3725 	nbdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3726 	SPDK_CU_ASSERT_FATAL(nbdev3 != NULL);
3727 	nbdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3728 	SPDK_CU_ASSERT_FATAL(nbdev4 != NULL);
3729 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3730 
3731 	CU_ASSERT(nbdev1->ref == 2);
3732 	CU_ASSERT(nbdev2->ref == 1);
3733 	CU_ASSERT(nbdev3->ref == 1);
3734 	CU_ASSERT(nbdev4->ref == 1);
3735 
3736 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3737 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3738 	CU_ASSERT(rc == 0);
3739 
3740 	poll_threads();
3741 	spdk_delay_us(1000);
3742 	poll_threads();
3743 
3744 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3745 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3746 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2);
3747 
3748 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3749 	CU_ASSERT(rc == 0);
3750 
3751 	poll_threads();
3752 	spdk_delay_us(1000);
3753 	poll_threads();
3754 
3755 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3756 
3757 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3758 	 * can be deleted when the bdev subsystem shutdown.
3759 	 */
3760 	g_ut_attach_bdev_count = 1;
3761 
3762 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3763 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3764 
3765 	ctrlr1->ns[0].uuid = &uuid1;
3766 
3767 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3768 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3769 	CU_ASSERT(rc == 0);
3770 
3771 	spdk_delay_us(1000);
3772 	poll_threads();
3773 
3774 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3775 	poll_threads();
3776 
3777 	ut_init_trid2(&path2.trid);
3778 
3779 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3780 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3781 
3782 	ctrlr2->ns[0].uuid = &uuid1;
3783 
3784 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3785 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3786 	CU_ASSERT(rc == 0);
3787 
3788 	spdk_delay_us(1000);
3789 	poll_threads();
3790 
3791 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3792 	poll_threads();
3793 
3794 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3795 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3796 
3797 	nbdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3798 	SPDK_CU_ASSERT_FATAL(nbdev1 != NULL);
3799 
3800 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3801 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3802 
3803 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3804 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3805 
3806 	/* Check if a nvme_bdev has two nvme_ns. */
3807 	nvme_ns1 = _nvme_bdev_get_ns(nbdev1, nvme_ctrlr1);
3808 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3809 	CU_ASSERT(nvme_ns1->bdev == nbdev1);
3810 
3811 	nvme_ns2 = _nvme_bdev_get_ns(nbdev1, nvme_ctrlr2);
3812 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3813 	CU_ASSERT(nvme_ns2->bdev == nbdev1);
3814 
3815 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3816 	bdev_nvme_destruct(&nbdev1->disk);
3817 
3818 	poll_threads();
3819 
3820 	CU_ASSERT(nvme_ns1->bdev == NULL);
3821 	CU_ASSERT(nvme_ns2->bdev == NULL);
3822 
3823 	nvme_ctrlr1->destruct = true;
3824 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3825 
3826 	poll_threads();
3827 	spdk_delay_us(1000);
3828 	poll_threads();
3829 
3830 	nvme_ctrlr2->destruct = true;
3831 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3832 
3833 	poll_threads();
3834 	spdk_delay_us(1000);
3835 	poll_threads();
3836 
3837 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3838 }
3839 
3840 static void
3841 test_add_multi_io_paths_to_nbdev_ch(void)
3842 {
3843 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3844 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3845 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3846 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3847 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3848 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3849 	const int STRING_SIZE = 32;
3850 	const char *attached_names[STRING_SIZE];
3851 	struct nvme_bdev *nbdev;
3852 	struct spdk_io_channel *ch;
3853 	struct nvme_bdev_channel *nbdev_ch;
3854 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3855 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3856 	int rc;
3857 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3858 
3859 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3860 	bdev_opts.multipath = true;
3861 
3862 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3863 	ut_init_trid(&path1.trid);
3864 	ut_init_trid2(&path2.trid);
3865 	ut_init_trid3(&path3.trid);
3866 	g_ut_attach_ctrlr_status = 0;
3867 	g_ut_attach_bdev_count = 1;
3868 
3869 	set_thread(1);
3870 
3871 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3872 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3873 
3874 	ctrlr1->ns[0].uuid = &uuid1;
3875 
3876 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3877 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3878 	CU_ASSERT(rc == 0);
3879 
3880 	spdk_delay_us(1000);
3881 	poll_threads();
3882 
3883 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3884 	poll_threads();
3885 
3886 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3887 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3888 
3889 	ctrlr2->ns[0].uuid = &uuid1;
3890 
3891 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3892 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3893 	CU_ASSERT(rc == 0);
3894 
3895 	spdk_delay_us(1000);
3896 	poll_threads();
3897 
3898 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3899 	poll_threads();
3900 
3901 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3902 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3903 
3904 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3905 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3906 
3907 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3908 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3909 
3910 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3911 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
3912 
3913 	nvme_ns1 = _nvme_bdev_get_ns(nbdev, nvme_ctrlr1);
3914 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3915 
3916 	nvme_ns2 = _nvme_bdev_get_ns(nbdev, nvme_ctrlr2);
3917 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3918 
3919 	set_thread(0);
3920 
3921 	ch = spdk_get_io_channel(nbdev);
3922 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3923 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3924 
3925 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3926 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3927 
3928 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3929 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3930 
3931 	set_thread(1);
3932 
3933 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3934 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3935 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3936 
3937 	ctrlr3->ns[0].uuid = &uuid1;
3938 
3939 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3940 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3941 	CU_ASSERT(rc == 0);
3942 
3943 	spdk_delay_us(1000);
3944 	poll_threads();
3945 
3946 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3947 	poll_threads();
3948 
3949 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn);
3950 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3951 
3952 	nvme_ns3 = _nvme_bdev_get_ns(nbdev, nvme_ctrlr3);
3953 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3954 
3955 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3956 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3957 
3958 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3959 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3960 	CU_ASSERT(rc == 0);
3961 
3962 	poll_threads();
3963 	spdk_delay_us(1000);
3964 	poll_threads();
3965 
3966 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1);
3967 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3968 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3);
3969 
3970 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3971 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3972 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3973 
3974 	set_thread(0);
3975 
3976 	spdk_put_io_channel(ch);
3977 
3978 	poll_threads();
3979 
3980 	set_thread(1);
3981 
3982 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3983 	CU_ASSERT(rc == 0);
3984 
3985 	poll_threads();
3986 	spdk_delay_us(1000);
3987 	poll_threads();
3988 
3989 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3990 }
3991 
3992 static void
3993 test_admin_path(void)
3994 {
3995 	struct nvme_path_id path1 = {}, path2 = {};
3996 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3997 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3998 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3999 	const int STRING_SIZE = 32;
4000 	const char *attached_names[STRING_SIZE];
4001 	struct nvme_bdev *nbdev;
4002 	struct spdk_io_channel *ch;
4003 	struct spdk_bdev_io *bdev_io;
4004 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4005 	int rc;
4006 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4007 
4008 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4009 	bdev_opts.multipath = true;
4010 
4011 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4012 	ut_init_trid(&path1.trid);
4013 	ut_init_trid2(&path2.trid);
4014 	g_ut_attach_ctrlr_status = 0;
4015 	g_ut_attach_bdev_count = 1;
4016 
4017 	set_thread(0);
4018 
4019 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4020 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4021 
4022 	ctrlr1->ns[0].uuid = &uuid1;
4023 
4024 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4025 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4026 	CU_ASSERT(rc == 0);
4027 
4028 	spdk_delay_us(1000);
4029 	poll_threads();
4030 
4031 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4032 	poll_threads();
4033 
4034 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4035 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4036 
4037 	ctrlr2->ns[0].uuid = &uuid1;
4038 
4039 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4040 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4041 	CU_ASSERT(rc == 0);
4042 
4043 	spdk_delay_us(1000);
4044 	poll_threads();
4045 
4046 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4047 	poll_threads();
4048 
4049 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4050 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4051 
4052 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4053 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
4054 
4055 	ch = spdk_get_io_channel(nbdev);
4056 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4057 
4058 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, nbdev, ch);
4059 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4060 
4061 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
4062 	 * submitted to ctrlr2.
4063 	 */
4064 	ctrlr1->is_failed = true;
4065 	bdev_io->internal.f.in_submit_request = true;
4066 
4067 	bdev_nvme_submit_request(ch, bdev_io);
4068 
4069 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4070 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
4071 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4072 
4073 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4074 	poll_threads();
4075 
4076 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4077 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4078 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4079 
4080 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
4081 	ctrlr2->is_failed = true;
4082 	bdev_io->internal.f.in_submit_request = true;
4083 
4084 	bdev_nvme_submit_request(ch, bdev_io);
4085 
4086 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4087 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4088 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4089 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4090 
4091 	free(bdev_io);
4092 
4093 	spdk_put_io_channel(ch);
4094 
4095 	poll_threads();
4096 
4097 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4098 	CU_ASSERT(rc == 0);
4099 
4100 	poll_threads();
4101 	spdk_delay_us(1000);
4102 	poll_threads();
4103 
4104 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4105 }
4106 
4107 static struct nvme_io_path *
4108 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
4109 			struct nvme_ctrlr *nvme_ctrlr)
4110 {
4111 	struct nvme_io_path *io_path;
4112 
4113 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
4114 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
4115 			return io_path;
4116 		}
4117 	}
4118 
4119 	return NULL;
4120 }
4121 
4122 static void
4123 test_reset_bdev_ctrlr(void)
4124 {
4125 	struct nvme_path_id path1 = {}, path2 = {};
4126 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4127 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4128 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4129 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4130 	struct nvme_path_id *curr_path1, *curr_path2;
4131 	const int STRING_SIZE = 32;
4132 	const char *attached_names[STRING_SIZE];
4133 	struct nvme_bdev *nbdev;
4134 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
4135 	struct nvme_bdev_io *first_bio;
4136 	struct spdk_io_channel *ch1, *ch2;
4137 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
4138 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
4139 	int rc;
4140 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4141 
4142 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4143 	bdev_opts.multipath = true;
4144 
4145 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4146 	ut_init_trid(&path1.trid);
4147 	ut_init_trid2(&path2.trid);
4148 	g_ut_attach_ctrlr_status = 0;
4149 	g_ut_attach_bdev_count = 1;
4150 
4151 	set_thread(0);
4152 
4153 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4154 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4155 
4156 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4157 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4158 	CU_ASSERT(rc == 0);
4159 
4160 	spdk_delay_us(1000);
4161 	poll_threads();
4162 
4163 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4164 	poll_threads();
4165 
4166 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4167 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4168 
4169 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4170 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4171 	CU_ASSERT(rc == 0);
4172 
4173 	spdk_delay_us(1000);
4174 	poll_threads();
4175 
4176 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4177 	poll_threads();
4178 
4179 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4180 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4181 
4182 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4183 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
4184 
4185 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
4186 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
4187 
4188 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4189 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
4190 
4191 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
4192 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
4193 
4194 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4195 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
4196 
4197 	set_thread(0);
4198 
4199 	ch1 = spdk_get_io_channel(nbdev);
4200 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
4201 
4202 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
4203 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
4204 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
4205 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
4206 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4207 
4208 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, nbdev, ch1);
4209 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4210 
4211 	set_thread(1);
4212 
4213 	ch2 = spdk_get_io_channel(nbdev);
4214 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4215 
4216 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4217 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4218 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4219 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4220 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4221 
4222 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, nbdev, ch2);
4223 
4224 	/* The first reset request from bdev_io is submitted on thread 0.
4225 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4226 	 *
4227 	 * A few extra polls are necessary after resetting ctrlr1 to check
4228 	 * pending reset requests for ctrlr1.
4229 	 */
4230 	ctrlr1->is_failed = true;
4231 	curr_path1->last_failed_tsc = spdk_get_ticks();
4232 	ctrlr2->is_failed = true;
4233 	curr_path2->last_failed_tsc = spdk_get_ticks();
4234 
4235 	set_thread(0);
4236 
4237 	bdev_nvme_submit_request(ch1, first_bdev_io);
4238 
4239 	poll_thread_times(0, 1);
4240 	poll_thread_times(1, 1);
4241 	poll_thread_times(0, 2);
4242 	poll_thread_times(1, 1);
4243 	poll_thread_times(0, 1);
4244 
4245 	CU_ASSERT(first_bio->io_path == io_path11);
4246 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4247 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4248 
4249 	poll_thread_times(0, 3);
4250 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4251 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4252 
4253 	poll_thread_times(1, 2);
4254 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4255 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4256 	CU_ASSERT(ctrlr1->is_failed == true);
4257 
4258 	poll_thread_times(0, 1);
4259 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4260 	CU_ASSERT(ctrlr1->is_failed == false);
4261 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4262 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4263 
4264 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4265 	poll_thread_times(0, 2);
4266 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4267 
4268 	poll_thread_times(0, 1);
4269 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4270 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4271 
4272 	poll_thread_times(1, 1);
4273 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4274 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4275 
4276 	poll_thread_times(0, 1);
4277 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4278 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4279 	poll_thread_times(0, 1);
4280 	CU_ASSERT(first_bio->io_path == io_path12);
4281 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4282 
4283 	poll_thread_times(0, 2);
4284 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4285 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4286 
4287 	poll_thread_times(1, 2);
4288 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4289 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4290 	CU_ASSERT(ctrlr2->is_failed == true);
4291 
4292 	poll_thread_times(0, 1);
4293 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4294 	CU_ASSERT(ctrlr2->is_failed == false);
4295 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4296 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4297 
4298 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4299 	poll_thread_times(0, 2);
4300 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4301 
4302 	poll_thread_times(0, 1);
4303 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4304 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4305 
4306 	poll_thread_times(1, 2);
4307 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4308 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4309 
4310 	poll_thread_times(0, 1);
4311 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4312 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4313 
4314 	poll_thread_times(0, 1);
4315 	CU_ASSERT(first_bio->io_path == NULL);
4316 
4317 	poll_threads();
4318 
4319 	/* There is a race between two reset requests from bdev_io.
4320 	 *
4321 	 * The first reset request is submitted on thread 0, and the second reset
4322 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4323 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4324 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4325 	 * The second is pending on ctrlr2 again. After the first completes resetting
4326 	 * ctrl2, both complete successfully.
4327 	 */
4328 	ctrlr1->is_failed = true;
4329 	curr_path1->last_failed_tsc = spdk_get_ticks();
4330 	ctrlr2->is_failed = true;
4331 	curr_path2->last_failed_tsc = spdk_get_ticks();
4332 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4333 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4334 
4335 	set_thread(0);
4336 
4337 	bdev_nvme_submit_request(ch1, first_bdev_io);
4338 
4339 	set_thread(1);
4340 
4341 	bdev_nvme_submit_request(ch2, second_bdev_io);
4342 
4343 	poll_thread_times(0, 1);
4344 	poll_thread_times(1, 1);
4345 	poll_thread_times(0, 2);
4346 	poll_thread_times(1, 1);
4347 	poll_thread_times(0, 1);
4348 	poll_thread_times(1, 1);
4349 
4350 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4351 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4352 	CU_ASSERT(TAILQ_FIRST(&nvme_ctrlr1->pending_resets) ==
4353 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4354 
4355 	poll_threads();
4356 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4357 	poll_threads();
4358 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4359 	poll_threads();
4360 
4361 	CU_ASSERT(ctrlr1->is_failed == false);
4362 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4363 	CU_ASSERT(ctrlr2->is_failed == false);
4364 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4365 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4366 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4367 
4368 	/* Reset of the first path succeeds, reset of the second path fails.
4369 	 * Since we have at least one working path we should not fail RESET IO.
4370 	 */
4371 	ctrlr1->is_failed = true;
4372 	curr_path1->last_failed_tsc = spdk_get_ticks();
4373 	ctrlr2->is_failed = true;
4374 	curr_path2->last_failed_tsc = spdk_get_ticks();
4375 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4376 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4377 
4378 	set_thread(0);
4379 	bdev_nvme_submit_request(ch1, first_bdev_io);
4380 
4381 	set_thread(1);
4382 	bdev_nvme_submit_request(ch2, second_bdev_io);
4383 
4384 	poll_thread_times(0, 1);
4385 	poll_thread_times(1, 1);
4386 	poll_thread_times(0, 2);
4387 	poll_thread_times(1, 1);
4388 	poll_thread_times(0, 1);
4389 	poll_thread_times(1, 1);
4390 
4391 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4392 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4393 	CU_ASSERT(TAILQ_FIRST(&nvme_ctrlr1->pending_resets) ==
4394 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4395 
4396 	ctrlr2->fail_reset = true;
4397 
4398 	poll_threads();
4399 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4400 	poll_threads();
4401 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4402 	poll_threads();
4403 
4404 	CU_ASSERT(ctrlr1->is_failed == false);
4405 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4406 	CU_ASSERT(ctrlr2->is_failed == true);
4407 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4408 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4409 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4410 
4411 	/* Path 2 recovers */
4412 	ctrlr2->fail_reset = false;
4413 	poll_threads();
4414 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4415 	poll_threads();
4416 
4417 	CU_ASSERT(ctrlr2->is_failed == false);
4418 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4419 
4420 	/* Reset of the first path fails, reset of the second path succeeds.
4421 	 * Since we have at least one working path we should not fail RESET IO.
4422 	 */
4423 	ctrlr1->is_failed = true;
4424 	curr_path1->last_failed_tsc = spdk_get_ticks();
4425 	ctrlr2->is_failed = true;
4426 	curr_path2->last_failed_tsc = spdk_get_ticks();
4427 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4428 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4429 
4430 	set_thread(0);
4431 	bdev_nvme_submit_request(ch1, first_bdev_io);
4432 
4433 	set_thread(1);
4434 	bdev_nvme_submit_request(ch2, second_bdev_io);
4435 
4436 	poll_thread_times(0, 1);
4437 	poll_thread_times(1, 1);
4438 	poll_thread_times(0, 2);
4439 	poll_thread_times(1, 1);
4440 	poll_thread_times(0, 1);
4441 	poll_thread_times(1, 1);
4442 
4443 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4444 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4445 	CU_ASSERT(TAILQ_FIRST(&nvme_ctrlr1->pending_resets) ==
4446 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4447 
4448 	ctrlr1->fail_reset = true;
4449 
4450 	poll_threads();
4451 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4452 	poll_threads();
4453 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4454 	poll_threads();
4455 
4456 	CU_ASSERT(ctrlr1->is_failed == true);
4457 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4458 	CU_ASSERT(ctrlr2->is_failed == false);
4459 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4460 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4461 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4462 
4463 	/* Path 1 recovers */
4464 	ctrlr1->fail_reset = false;
4465 	poll_threads();
4466 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4467 	poll_threads();
4468 
4469 	CU_ASSERT(ctrlr1->is_failed == false);
4470 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4471 
4472 	/* Reset of both paths fail.
4473 	 * Since we have no working paths we should fail RESET IO.
4474 	 */
4475 	ctrlr1->is_failed = true;
4476 	curr_path1->last_failed_tsc = spdk_get_ticks();
4477 	ctrlr2->is_failed = true;
4478 	curr_path2->last_failed_tsc = spdk_get_ticks();
4479 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4480 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4481 
4482 	set_thread(0);
4483 	bdev_nvme_submit_request(ch1, first_bdev_io);
4484 
4485 	set_thread(1);
4486 	bdev_nvme_submit_request(ch2, second_bdev_io);
4487 
4488 	poll_thread_times(0, 1);
4489 	poll_thread_times(1, 1);
4490 	poll_thread_times(0, 2);
4491 	poll_thread_times(1, 1);
4492 	poll_thread_times(0, 1);
4493 	poll_thread_times(1, 1);
4494 
4495 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4496 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4497 	CU_ASSERT(TAILQ_FIRST(&nvme_ctrlr1->pending_resets) ==
4498 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4499 
4500 	ctrlr1->fail_reset = true;
4501 	ctrlr2->fail_reset = true;
4502 
4503 	poll_threads();
4504 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4505 	poll_threads();
4506 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4507 	poll_threads();
4508 
4509 	CU_ASSERT(ctrlr1->is_failed == true);
4510 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4511 	CU_ASSERT(ctrlr2->is_failed == true);
4512 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4513 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4514 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4515 
4516 	/* Paths 1 and 2 recover */
4517 	ctrlr1->fail_reset = false;
4518 	ctrlr2->fail_reset = false;
4519 	poll_threads();
4520 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4521 	poll_threads();
4522 
4523 	CU_ASSERT(ctrlr1->is_failed == false);
4524 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4525 	CU_ASSERT(ctrlr2->is_failed == false);
4526 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4527 
4528 	/* Reset of the first path failes, reset of the second path succeeds.
4529 	 * Since we have at least one working path we should not fail RESET IO.
4530 	 *
4531 	 * Here, reset of the first path fails immediately because it is disabled.
4532 	 *
4533 	 * The purpose is to verify the fix. We had a bug that bdev_io did not
4534 	 * hold io_path when reset of it failed immediately, and then continue
4535 	 * operation caused NULL pointer access.
4536 	 */
4537 	nvme_ctrlr1->disabled = true;
4538 	ctrlr1->is_failed = true;
4539 	curr_path1->last_failed_tsc = spdk_get_ticks();
4540 	ctrlr2->is_failed = true;
4541 	curr_path2->last_failed_tsc = spdk_get_ticks();
4542 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4543 
4544 	set_thread(0);
4545 	bdev_nvme_submit_request(ch1, first_bdev_io);
4546 
4547 	poll_threads();
4548 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4549 	poll_threads();
4550 
4551 	CU_ASSERT(ctrlr1->is_failed == true);
4552 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4553 	CU_ASSERT(ctrlr2->is_failed == false);
4554 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4555 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4556 
4557 	nvme_ctrlr1->disabled = false;
4558 	ctrlr1->is_failed = false;
4559 	curr_path1->last_failed_tsc = 0;
4560 
4561 	set_thread(0);
4562 
4563 	spdk_put_io_channel(ch1);
4564 
4565 	set_thread(1);
4566 
4567 	spdk_put_io_channel(ch2);
4568 
4569 	poll_threads();
4570 
4571 	set_thread(0);
4572 
4573 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4574 	CU_ASSERT(rc == 0);
4575 
4576 	poll_threads();
4577 	spdk_delay_us(1000);
4578 	poll_threads();
4579 
4580 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4581 
4582 	free(first_bdev_io);
4583 	free(second_bdev_io);
4584 }
4585 
4586 static void
4587 test_find_io_path(void)
4588 {
4589 	struct nvme_bdev_channel nbdev_ch = {
4590 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4591 	};
4592 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4593 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4594 	struct spdk_nvme_ns ns1 = {}, ns2 = {};
4595 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4596 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4597 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4598 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4599 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, };
4600 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4601 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4602 
4603 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4604 
4605 	/* Test if io_path whose ANA state is not accessible is excluded. */
4606 
4607 	nvme_qpair1.qpair = &qpair1;
4608 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4609 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4610 
4611 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4612 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4613 
4614 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4615 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4616 
4617 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4618 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4619 
4620 	nbdev_ch.current_io_path = NULL;
4621 
4622 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4623 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4624 
4625 	nbdev_ch.current_io_path = NULL;
4626 
4627 	/* Test if io_path whose qpair is resetting is excluded. */
4628 
4629 	nvme_qpair1.qpair = NULL;
4630 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4631 
4632 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4633 
4634 	/* Test if ANA optimized state or the first found ANA non-optimized state
4635 	 * is prioritized.
4636 	 */
4637 
4638 	nvme_qpair1.qpair = &qpair1;
4639 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4640 	nvme_qpair2.qpair = &qpair2;
4641 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4642 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4643 
4644 	nbdev_ch.current_io_path = NULL;
4645 
4646 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4647 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4648 
4649 	nbdev_ch.current_io_path = NULL;
4650 }
4651 
4652 static void
4653 test_retry_io_if_ana_state_is_updating(void)
4654 {
4655 	struct nvme_path_id path = {};
4656 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
4657 	struct spdk_nvme_ctrlr *ctrlr;
4658 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
4659 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4660 	struct nvme_ctrlr *nvme_ctrlr;
4661 	const int STRING_SIZE = 32;
4662 	const char *attached_names[STRING_SIZE];
4663 	struct nvme_bdev *nbdev;
4664 	struct nvme_ns *nvme_ns;
4665 	struct spdk_bdev_io *bdev_io1;
4666 	struct spdk_io_channel *ch;
4667 	struct nvme_bdev_channel *nbdev_ch;
4668 	struct nvme_io_path *io_path;
4669 	struct nvme_qpair *nvme_qpair;
4670 	int rc;
4671 
4672 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4673 	ut_init_trid(&path.trid);
4674 
4675 	set_thread(0);
4676 
4677 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4678 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4679 
4680 	g_ut_attach_ctrlr_status = 0;
4681 	g_ut_attach_bdev_count = 1;
4682 
4683 	opts.ctrlr_loss_timeout_sec = -1;
4684 	opts.reconnect_delay_sec = 1;
4685 	opts.multipath = false;
4686 
4687 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4688 				   attach_ctrlr_done, NULL, &dopts, &opts);
4689 	CU_ASSERT(rc == 0);
4690 
4691 	spdk_delay_us(1000);
4692 	poll_threads();
4693 
4694 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4695 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4696 
4697 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
4698 	CU_ASSERT(nvme_ctrlr != NULL);
4699 
4700 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4701 	CU_ASSERT(nbdev != NULL);
4702 
4703 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4704 	CU_ASSERT(nvme_ns != NULL);
4705 
4706 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, NULL);
4707 	ut_bdev_io_set_buf(bdev_io1);
4708 
4709 	ch = spdk_get_io_channel(nbdev);
4710 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4711 
4712 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4713 
4714 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4715 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4716 
4717 	nvme_qpair = io_path->qpair;
4718 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4719 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4720 
4721 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4722 
4723 	/* If qpair is connected, I/O should succeed. */
4724 	bdev_io1->internal.f.in_submit_request = true;
4725 
4726 	bdev_nvme_submit_request(ch, bdev_io1);
4727 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4728 
4729 	poll_threads();
4730 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4731 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4732 
4733 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4734 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4735 	nbdev_ch->current_io_path = NULL;
4736 
4737 	bdev_io1->internal.f.in_submit_request = true;
4738 
4739 	bdev_nvme_submit_request(ch, bdev_io1);
4740 
4741 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4742 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4743 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4744 
4745 	/* ANA state became accessible while I/O was queued. */
4746 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4747 
4748 	spdk_delay_us(1000000);
4749 
4750 	poll_thread_times(0, 1);
4751 
4752 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4753 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4754 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4755 
4756 	poll_threads();
4757 
4758 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4759 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4760 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4761 
4762 	free(bdev_io1);
4763 
4764 	spdk_put_io_channel(ch);
4765 
4766 	poll_threads();
4767 
4768 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4769 	CU_ASSERT(rc == 0);
4770 
4771 	poll_threads();
4772 	spdk_delay_us(1000);
4773 	poll_threads();
4774 
4775 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4776 }
4777 
4778 static void
4779 test_retry_io_for_io_path_error(void)
4780 {
4781 	struct nvme_path_id path1 = {}, path2 = {};
4782 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4783 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4784 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4785 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4786 	const int STRING_SIZE = 32;
4787 	const char *attached_names[STRING_SIZE];
4788 	struct nvme_bdev *nbdev;
4789 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4790 	struct spdk_bdev_io *bdev_io;
4791 	struct nvme_bdev_io *bio;
4792 	struct spdk_io_channel *ch;
4793 	struct nvme_bdev_channel *nbdev_ch;
4794 	struct nvme_io_path *io_path1, *io_path2;
4795 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4796 	struct ut_nvme_req *req;
4797 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4798 	int rc;
4799 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4800 
4801 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4802 	bdev_opts.multipath = true;
4803 
4804 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4805 	ut_init_trid(&path1.trid);
4806 	ut_init_trid2(&path2.trid);
4807 
4808 	g_opts.bdev_retry_count = 1;
4809 
4810 	set_thread(0);
4811 
4812 	g_ut_attach_ctrlr_status = 0;
4813 	g_ut_attach_bdev_count = 1;
4814 
4815 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4816 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4817 
4818 	ctrlr1->ns[0].uuid = &uuid1;
4819 
4820 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4821 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4822 	CU_ASSERT(rc == 0);
4823 
4824 	spdk_delay_us(1000);
4825 	poll_threads();
4826 
4827 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4828 	poll_threads();
4829 
4830 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4831 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4832 
4833 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4834 	CU_ASSERT(nvme_ctrlr1 != NULL);
4835 
4836 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4837 	CU_ASSERT(nbdev != NULL);
4838 
4839 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4840 	CU_ASSERT(nvme_ns1 != NULL);
4841 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(nbdev, nvme_ctrlr1));
4842 
4843 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, NULL);
4844 	ut_bdev_io_set_buf(bdev_io);
4845 
4846 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4847 
4848 	ch = spdk_get_io_channel(nbdev);
4849 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4850 
4851 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4852 
4853 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4854 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4855 
4856 	nvme_qpair1 = io_path1->qpair;
4857 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4858 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4859 
4860 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4861 
4862 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4863 	bdev_io->internal.f.in_submit_request = true;
4864 
4865 	bdev_nvme_submit_request(ch, bdev_io);
4866 
4867 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4868 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4869 
4870 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4871 	SPDK_CU_ASSERT_FATAL(req != NULL);
4872 
4873 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4874 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4875 	req->cpl.status.dnr = 1;
4876 
4877 	poll_thread_times(0, 1);
4878 
4879 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4880 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4881 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4882 
4883 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4884 	bdev_io->internal.f.in_submit_request = true;
4885 
4886 	bdev_nvme_submit_request(ch, bdev_io);
4887 
4888 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4889 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4890 
4891 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4892 	SPDK_CU_ASSERT_FATAL(req != NULL);
4893 
4894 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4895 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4896 
4897 	poll_thread_times(0, 1);
4898 
4899 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4900 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4901 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4902 
4903 	poll_threads();
4904 
4905 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4906 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4907 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4908 
4909 	/* Add io_path2 dynamically, and create a multipath configuration. */
4910 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4911 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4912 
4913 	ctrlr2->ns[0].uuid = &uuid1;
4914 
4915 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4916 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4917 	CU_ASSERT(rc == 0);
4918 
4919 	spdk_delay_us(1000);
4920 	poll_threads();
4921 
4922 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4923 	poll_threads();
4924 
4925 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4926 	CU_ASSERT(nvme_ctrlr2 != NULL);
4927 
4928 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4929 	CU_ASSERT(nvme_ns2 != NULL);
4930 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(nbdev, nvme_ctrlr2));
4931 
4932 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4933 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4934 
4935 	nvme_qpair2 = io_path2->qpair;
4936 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4937 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4938 
4939 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4940 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4941 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4942 	 */
4943 	bdev_io->internal.f.in_submit_request = true;
4944 
4945 	bdev_nvme_submit_request(ch, bdev_io);
4946 
4947 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4948 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4949 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4950 
4951 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4952 	SPDK_CU_ASSERT_FATAL(req != NULL);
4953 
4954 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4955 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4956 
4957 	poll_thread_times(0, 1);
4958 
4959 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4960 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4961 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4962 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4963 
4964 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4965 	nvme_qpair1->qpair = NULL;
4966 
4967 	poll_threads();
4968 
4969 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4970 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4971 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4972 
4973 	free(bdev_io);
4974 
4975 	spdk_put_io_channel(ch);
4976 
4977 	poll_threads();
4978 
4979 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4980 	CU_ASSERT(rc == 0);
4981 
4982 	poll_threads();
4983 	spdk_delay_us(1000);
4984 	poll_threads();
4985 
4986 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4987 
4988 	g_opts.bdev_retry_count = 0;
4989 }
4990 
4991 static void
4992 test_retry_io_count(void)
4993 {
4994 	struct nvme_path_id path = {};
4995 	struct spdk_nvme_ctrlr *ctrlr;
4996 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4997 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4998 	struct nvme_ctrlr *nvme_ctrlr;
4999 	const int STRING_SIZE = 32;
5000 	const char *attached_names[STRING_SIZE];
5001 	struct nvme_bdev *nbdev;
5002 	struct nvme_ns *nvme_ns;
5003 	struct spdk_bdev_io *bdev_io;
5004 	struct nvme_bdev_io *bio;
5005 	struct spdk_io_channel *ch;
5006 	struct nvme_bdev_channel *nbdev_ch;
5007 	struct nvme_io_path *io_path;
5008 	struct nvme_qpair *nvme_qpair;
5009 	struct ut_nvme_req *req;
5010 	int rc;
5011 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5012 
5013 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5014 	bdev_opts.multipath = false;
5015 
5016 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5017 	ut_init_trid(&path.trid);
5018 
5019 	set_thread(0);
5020 
5021 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5022 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5023 
5024 	g_ut_attach_ctrlr_status = 0;
5025 	g_ut_attach_bdev_count = 1;
5026 
5027 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5028 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5029 	CU_ASSERT(rc == 0);
5030 
5031 	spdk_delay_us(1000);
5032 	poll_threads();
5033 
5034 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5035 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5036 
5037 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5038 	CU_ASSERT(nvme_ctrlr != NULL);
5039 
5040 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5041 	CU_ASSERT(nbdev != NULL);
5042 
5043 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5044 	CU_ASSERT(nvme_ns != NULL);
5045 
5046 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, NULL);
5047 	ut_bdev_io_set_buf(bdev_io);
5048 
5049 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5050 
5051 	ch = spdk_get_io_channel(nbdev);
5052 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5053 
5054 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5055 
5056 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5057 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5058 
5059 	nvme_qpair = io_path->qpair;
5060 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5061 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5062 
5063 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5064 
5065 	/* If I/O is aborted by request, it should not be retried. */
5066 	g_opts.bdev_retry_count = 1;
5067 
5068 	bdev_io->internal.f.in_submit_request = true;
5069 
5070 	bdev_nvme_submit_request(ch, bdev_io);
5071 
5072 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5073 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5074 
5075 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5076 	SPDK_CU_ASSERT_FATAL(req != NULL);
5077 
5078 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5079 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5080 
5081 	poll_thread_times(0, 1);
5082 
5083 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5084 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5085 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
5086 
5087 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
5088 	 * the failed I/O should not be retried.
5089 	 */
5090 	g_opts.bdev_retry_count = 4;
5091 
5092 	bdev_io->internal.f.in_submit_request = true;
5093 
5094 	bdev_nvme_submit_request(ch, bdev_io);
5095 
5096 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5097 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5098 
5099 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5100 	SPDK_CU_ASSERT_FATAL(req != NULL);
5101 
5102 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5103 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5104 	bio->retry_count = 4;
5105 
5106 	poll_thread_times(0, 1);
5107 
5108 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5109 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5110 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
5111 
5112 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
5113 	g_opts.bdev_retry_count = -1;
5114 
5115 	bdev_io->internal.f.in_submit_request = true;
5116 
5117 	bdev_nvme_submit_request(ch, bdev_io);
5118 
5119 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5120 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5121 
5122 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5123 	SPDK_CU_ASSERT_FATAL(req != NULL);
5124 
5125 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5126 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5127 	bio->retry_count = 4;
5128 
5129 	poll_thread_times(0, 1);
5130 
5131 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5132 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5133 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5134 
5135 	poll_threads();
5136 
5137 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5138 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5139 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5140 
5141 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
5142 	 * the failed I/O should be retried.
5143 	 */
5144 	g_opts.bdev_retry_count = 4;
5145 
5146 	bdev_io->internal.f.in_submit_request = true;
5147 
5148 	bdev_nvme_submit_request(ch, bdev_io);
5149 
5150 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5151 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5152 
5153 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5154 	SPDK_CU_ASSERT_FATAL(req != NULL);
5155 
5156 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5157 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5158 	bio->retry_count = 3;
5159 
5160 	poll_thread_times(0, 1);
5161 
5162 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5163 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5164 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5165 
5166 	poll_threads();
5167 
5168 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5169 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5170 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5171 
5172 	free(bdev_io);
5173 
5174 	spdk_put_io_channel(ch);
5175 
5176 	poll_threads();
5177 
5178 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5179 	CU_ASSERT(rc == 0);
5180 
5181 	poll_threads();
5182 	spdk_delay_us(1000);
5183 	poll_threads();
5184 
5185 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5186 
5187 	g_opts.bdev_retry_count = 0;
5188 }
5189 
5190 static void
5191 test_concurrent_read_ana_log_page(void)
5192 {
5193 	struct spdk_nvme_transport_id trid = {};
5194 	struct spdk_nvme_ctrlr *ctrlr;
5195 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5196 	struct nvme_ctrlr *nvme_ctrlr;
5197 	const int STRING_SIZE = 32;
5198 	const char *attached_names[STRING_SIZE];
5199 	int rc;
5200 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5201 
5202 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5203 	bdev_opts.multipath = false;
5204 
5205 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5206 	ut_init_trid(&trid);
5207 
5208 	set_thread(0);
5209 
5210 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
5211 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5212 
5213 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5214 
5215 	g_ut_attach_ctrlr_status = 0;
5216 	g_ut_attach_bdev_count = 1;
5217 
5218 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
5219 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5220 	CU_ASSERT(rc == 0);
5221 
5222 	spdk_delay_us(1000);
5223 	poll_threads();
5224 
5225 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5226 	poll_threads();
5227 
5228 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5229 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5230 
5231 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5232 
5233 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5234 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5235 
5236 	/* Following read request should be rejected. */
5237 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5238 
5239 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5240 
5241 	set_thread(1);
5242 
5243 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5244 
5245 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5246 
5247 	/* Reset request while reading ANA log page should not be rejected. */
5248 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5249 	CU_ASSERT(rc == 0);
5250 
5251 	poll_threads();
5252 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5253 	poll_threads();
5254 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5255 	poll_threads();
5256 
5257 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5258 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5259 
5260 	/* Read ANA log page while resetting ctrlr should be rejected. */
5261 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5262 	CU_ASSERT(rc == 0);
5263 
5264 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5265 
5266 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5267 
5268 	poll_threads();
5269 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5270 	poll_threads();
5271 
5272 	set_thread(0);
5273 
5274 	/* It is possible that target sent ANA change for inactive namespaces.
5275 	 *
5276 	 * Previously, assert() was added because this case was unlikely.
5277 	 * However, assert() was hit in real environment.
5278 
5279 	 * Hence, remove assert() and add unit test case.
5280 	 *
5281 	 * Simulate this case by depopulating namespaces and then parsing ANA
5282 	 * log page created when all namespaces are active.
5283 	 * Then, check if parsing ANA log page completes successfully.
5284 	 */
5285 	nvme_ctrlr_depopulate_namespaces(nvme_ctrlr);
5286 
5287 	rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr);
5288 	CU_ASSERT(rc == 0);
5289 
5290 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5291 	CU_ASSERT(rc == 0);
5292 
5293 	poll_threads();
5294 	spdk_delay_us(1000);
5295 	poll_threads();
5296 
5297 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5298 }
5299 
5300 static void
5301 test_retry_io_for_ana_error(void)
5302 {
5303 	struct nvme_path_id path = {};
5304 	struct spdk_nvme_ctrlr *ctrlr;
5305 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5306 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5307 	struct nvme_ctrlr *nvme_ctrlr;
5308 	const int STRING_SIZE = 32;
5309 	const char *attached_names[STRING_SIZE];
5310 	struct nvme_bdev *nbdev;
5311 	struct nvme_ns *nvme_ns;
5312 	struct spdk_bdev_io *bdev_io;
5313 	struct nvme_bdev_io *bio;
5314 	struct spdk_io_channel *ch;
5315 	struct nvme_bdev_channel *nbdev_ch;
5316 	struct nvme_io_path *io_path;
5317 	struct nvme_qpair *nvme_qpair;
5318 	struct ut_nvme_req *req;
5319 	uint64_t now;
5320 	int rc;
5321 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5322 
5323 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5324 	bdev_opts.multipath = false;
5325 
5326 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5327 	ut_init_trid(&path.trid);
5328 
5329 	g_opts.bdev_retry_count = 1;
5330 
5331 	set_thread(0);
5332 
5333 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
5334 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5335 
5336 	g_ut_attach_ctrlr_status = 0;
5337 	g_ut_attach_bdev_count = 1;
5338 
5339 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5340 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5341 	CU_ASSERT(rc == 0);
5342 
5343 	spdk_delay_us(1000);
5344 	poll_threads();
5345 
5346 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5347 	poll_threads();
5348 
5349 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5350 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5351 
5352 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5353 	CU_ASSERT(nvme_ctrlr != NULL);
5354 
5355 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5356 	CU_ASSERT(nbdev != NULL);
5357 
5358 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5359 	CU_ASSERT(nvme_ns != NULL);
5360 
5361 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, NULL);
5362 	ut_bdev_io_set_buf(bdev_io);
5363 
5364 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5365 
5366 	ch = spdk_get_io_channel(nbdev);
5367 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5368 
5369 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5370 
5371 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5372 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5373 
5374 	nvme_qpair = io_path->qpair;
5375 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5376 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5377 
5378 	now = spdk_get_ticks();
5379 
5380 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5381 
5382 	/* If I/O got ANA error, it should be queued, the corresponding namespace
5383 	 * should be freezed and its ANA state should be updated.
5384 	 */
5385 	bdev_io->internal.f.in_submit_request = true;
5386 
5387 	bdev_nvme_submit_request(ch, bdev_io);
5388 
5389 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5390 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5391 
5392 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5393 	SPDK_CU_ASSERT_FATAL(req != NULL);
5394 
5395 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5396 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
5397 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
5398 
5399 	poll_thread_times(0, 1);
5400 
5401 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5402 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5403 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5404 	/* I/O should be retried immediately. */
5405 	CU_ASSERT(bio->retry_ticks == now);
5406 	CU_ASSERT(nvme_ns->ana_state_updating == true);
5407 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5408 
5409 	poll_threads();
5410 
5411 	/* Namespace is inaccessible, and hence I/O should be queued again. */
5412 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5413 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5414 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5415 	/* I/O should be retried after a second if no I/O path was found but
5416 	 * any I/O path may become available.
5417 	 */
5418 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
5419 
5420 	/* Namespace should be unfreezed after completing to update its ANA state. */
5421 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5422 	poll_threads();
5423 
5424 	CU_ASSERT(nvme_ns->ana_state_updating == false);
5425 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5426 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5427 
5428 	/* Retry the queued I/O should succeed. */
5429 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
5430 	poll_threads();
5431 
5432 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5433 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5434 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5435 
5436 	free(bdev_io);
5437 
5438 	spdk_put_io_channel(ch);
5439 
5440 	poll_threads();
5441 
5442 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5443 	CU_ASSERT(rc == 0);
5444 
5445 	poll_threads();
5446 	spdk_delay_us(1000);
5447 	poll_threads();
5448 
5449 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5450 
5451 	g_opts.bdev_retry_count = 0;
5452 }
5453 
5454 static void
5455 test_check_io_error_resiliency_params(void)
5456 {
5457 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5458 	 * 3rd parameter is fast_io_fail_timeout_sec.
5459 	 */
5460 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5461 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5462 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5463 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5464 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5465 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5466 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5467 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5468 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5469 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5470 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5471 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5472 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5473 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5474 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5475 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5476 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5477 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5478 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5479 }
5480 
5481 static void
5482 test_retry_io_if_ctrlr_is_resetting(void)
5483 {
5484 	struct nvme_path_id path = {};
5485 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5486 	struct spdk_nvme_ctrlr *ctrlr;
5487 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5488 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5489 	struct nvme_ctrlr *nvme_ctrlr;
5490 	const int STRING_SIZE = 32;
5491 	const char *attached_names[STRING_SIZE];
5492 	struct nvme_bdev *nbdev;
5493 	struct nvme_ns *nvme_ns;
5494 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5495 	struct spdk_io_channel *ch;
5496 	struct nvme_bdev_channel *nbdev_ch;
5497 	struct nvme_io_path *io_path;
5498 	struct nvme_qpair *nvme_qpair;
5499 	int rc;
5500 
5501 	g_opts.bdev_retry_count = 1;
5502 
5503 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5504 	ut_init_trid(&path.trid);
5505 
5506 	set_thread(0);
5507 
5508 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5509 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5510 
5511 	g_ut_attach_ctrlr_status = 0;
5512 	g_ut_attach_bdev_count = 1;
5513 
5514 	opts.ctrlr_loss_timeout_sec = -1;
5515 	opts.reconnect_delay_sec = 1;
5516 	opts.multipath = false;
5517 
5518 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5519 				   attach_ctrlr_done, NULL, &dopts, &opts);
5520 	CU_ASSERT(rc == 0);
5521 
5522 	spdk_delay_us(1000);
5523 	poll_threads();
5524 
5525 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5526 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5527 
5528 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5529 	CU_ASSERT(nvme_ctrlr != NULL);
5530 
5531 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5532 	CU_ASSERT(nbdev != NULL);
5533 
5534 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5535 	CU_ASSERT(nvme_ns != NULL);
5536 
5537 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, NULL);
5538 	ut_bdev_io_set_buf(bdev_io1);
5539 
5540 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, NULL);
5541 	ut_bdev_io_set_buf(bdev_io2);
5542 
5543 	ch = spdk_get_io_channel(nbdev);
5544 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5545 
5546 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5547 
5548 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5549 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5550 
5551 	nvme_qpair = io_path->qpair;
5552 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5553 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5554 
5555 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5556 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5557 
5558 	/* If qpair is connected, I/O should succeed. */
5559 	bdev_io1->internal.f.in_submit_request = true;
5560 
5561 	bdev_nvme_submit_request(ch, bdev_io1);
5562 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5563 
5564 	poll_threads();
5565 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5566 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5567 
5568 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5569 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5570 	 * while resetting the nvme_ctrlr.
5571 	 */
5572 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5573 	ctrlr->is_failed = true;
5574 
5575 	poll_thread_times(0, 5);
5576 
5577 	CU_ASSERT(nvme_qpair->qpair == NULL);
5578 	CU_ASSERT(nvme_ctrlr->resetting == true);
5579 	CU_ASSERT(ctrlr->is_failed == false);
5580 
5581 	bdev_io1->internal.f.in_submit_request = true;
5582 
5583 	bdev_nvme_submit_request(ch, bdev_io1);
5584 
5585 	spdk_delay_us(1);
5586 
5587 	bdev_io2->internal.f.in_submit_request = true;
5588 
5589 	bdev_nvme_submit_request(ch, bdev_io2);
5590 
5591 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5592 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5593 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5594 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(
5595 			  TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx,
5596 				     retry_link)));
5597 
5598 	poll_threads();
5599 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5600 	poll_threads();
5601 
5602 	CU_ASSERT(nvme_qpair->qpair != NULL);
5603 	CU_ASSERT(nvme_ctrlr->resetting == false);
5604 
5605 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5606 
5607 	poll_thread_times(0, 1);
5608 
5609 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5610 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5611 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5612 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5613 
5614 	poll_threads();
5615 
5616 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5617 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5618 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5619 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5620 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5621 
5622 	spdk_delay_us(1);
5623 
5624 	poll_thread_times(0, 1);
5625 
5626 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5627 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5628 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5629 
5630 	poll_threads();
5631 
5632 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5633 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == false);
5634 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5635 
5636 	free(bdev_io1);
5637 	free(bdev_io2);
5638 
5639 	spdk_put_io_channel(ch);
5640 
5641 	poll_threads();
5642 
5643 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5644 	CU_ASSERT(rc == 0);
5645 
5646 	poll_threads();
5647 	spdk_delay_us(1000);
5648 	poll_threads();
5649 
5650 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5651 
5652 	g_opts.bdev_retry_count = 0;
5653 }
5654 
5655 static void
5656 test_reconnect_ctrlr(void)
5657 {
5658 	struct spdk_nvme_transport_id trid = {};
5659 	struct spdk_nvme_ctrlr ctrlr = {};
5660 	struct nvme_ctrlr *nvme_ctrlr;
5661 	struct spdk_io_channel *ch1, *ch2;
5662 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5663 	int rc;
5664 
5665 	ut_init_trid(&trid);
5666 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5667 
5668 	set_thread(0);
5669 
5670 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5671 	CU_ASSERT(rc == 0);
5672 
5673 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5674 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5675 
5676 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5677 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5678 
5679 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5680 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5681 
5682 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5683 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5684 
5685 	set_thread(1);
5686 
5687 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5688 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5689 
5690 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5691 
5692 	/* Reset starts from thread 1. */
5693 	set_thread(1);
5694 
5695 	/* The reset should fail and a reconnect timer should be registered. */
5696 	ctrlr.fail_reset = true;
5697 	ctrlr.is_failed = true;
5698 
5699 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5700 	CU_ASSERT(rc == 0);
5701 	CU_ASSERT(nvme_ctrlr->resetting == true);
5702 	CU_ASSERT(ctrlr.is_failed == true);
5703 
5704 	poll_threads();
5705 
5706 	CU_ASSERT(nvme_ctrlr->resetting == false);
5707 	CU_ASSERT(ctrlr.is_failed == false);
5708 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5709 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5710 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5711 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5712 
5713 	/* A new reset starts from thread 0. */
5714 	set_thread(1);
5715 
5716 	/* The reset should cancel the reconnect timer and should start from reconnection.
5717 	 * Then, the reset should fail and a reconnect timer should be registered again.
5718 	 */
5719 	ctrlr.fail_reset = true;
5720 	ctrlr.is_failed = true;
5721 
5722 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5723 	CU_ASSERT(rc == 0);
5724 	CU_ASSERT(nvme_ctrlr->resetting == true);
5725 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5726 	CU_ASSERT(ctrlr.is_failed == true);
5727 
5728 	poll_threads();
5729 
5730 	CU_ASSERT(nvme_ctrlr->resetting == false);
5731 	CU_ASSERT(ctrlr.is_failed == false);
5732 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5733 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5734 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5735 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5736 
5737 	/* Then a reconnect retry should suceeed. */
5738 	ctrlr.fail_reset = false;
5739 
5740 	spdk_delay_us(SPDK_SEC_TO_USEC);
5741 	poll_thread_times(0, 1);
5742 
5743 	CU_ASSERT(nvme_ctrlr->resetting == true);
5744 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5745 
5746 	poll_threads();
5747 
5748 	CU_ASSERT(nvme_ctrlr->resetting == false);
5749 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5750 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5751 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5752 
5753 	/* The reset should fail and a reconnect timer should be registered. */
5754 	ctrlr.fail_reset = true;
5755 	ctrlr.is_failed = true;
5756 
5757 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5758 	CU_ASSERT(rc == 0);
5759 	CU_ASSERT(nvme_ctrlr->resetting == true);
5760 	CU_ASSERT(ctrlr.is_failed == true);
5761 
5762 	poll_threads();
5763 
5764 	CU_ASSERT(nvme_ctrlr->resetting == false);
5765 	CU_ASSERT(ctrlr.is_failed == false);
5766 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5767 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5768 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5769 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5770 
5771 	/* Then a reconnect retry should still fail. */
5772 	spdk_delay_us(SPDK_SEC_TO_USEC);
5773 	poll_thread_times(0, 1);
5774 
5775 	CU_ASSERT(nvme_ctrlr->resetting == true);
5776 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5777 
5778 	poll_threads();
5779 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5780 	poll_threads();
5781 
5782 	CU_ASSERT(nvme_ctrlr->resetting == false);
5783 	CU_ASSERT(ctrlr.is_failed == false);
5784 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5785 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5786 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5787 
5788 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5789 	spdk_delay_us(SPDK_SEC_TO_USEC);
5790 	poll_threads();
5791 
5792 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5793 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5794 	CU_ASSERT(nvme_ctrlr->destruct == true);
5795 
5796 	spdk_put_io_channel(ch2);
5797 
5798 	set_thread(0);
5799 
5800 	spdk_put_io_channel(ch1);
5801 
5802 	poll_threads();
5803 	spdk_delay_us(1000);
5804 	poll_threads();
5805 
5806 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5807 }
5808 
5809 static struct nvme_path_id *
5810 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5811 		       const struct spdk_nvme_transport_id *trid)
5812 {
5813 	struct nvme_path_id *p;
5814 
5815 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5816 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5817 			break;
5818 		}
5819 	}
5820 
5821 	return p;
5822 }
5823 
5824 static void
5825 test_retry_failover_ctrlr(void)
5826 {
5827 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5828 	struct spdk_nvme_ctrlr ctrlr = {};
5829 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5830 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5831 	struct spdk_io_channel *ch;
5832 	struct nvme_ctrlr_channel *ctrlr_ch;
5833 	int rc;
5834 
5835 	ut_init_trid(&trid1);
5836 	ut_init_trid2(&trid2);
5837 	ut_init_trid3(&trid3);
5838 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5839 
5840 	set_thread(0);
5841 
5842 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5843 	CU_ASSERT(rc == 0);
5844 
5845 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5846 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5847 
5848 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5849 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5850 
5851 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5852 	CU_ASSERT(rc == 0);
5853 
5854 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5855 	CU_ASSERT(rc == 0);
5856 
5857 	ch = spdk_get_io_channel(nvme_ctrlr);
5858 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5859 
5860 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5861 
5862 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5863 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5864 	CU_ASSERT(path_id1->last_failed_tsc == 0);
5865 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5866 
5867 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5868 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5869 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5870 
5871 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5872 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5873 
5874 	/* It is expected that connecting both of trid1, trid2, and trid3 fail,
5875 	 * and a reconnect timer is started. */
5876 	ctrlr.fail_reset = true;
5877 	ctrlr.is_failed = true;
5878 
5879 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5880 	CU_ASSERT(rc == 0);
5881 
5882 	poll_threads();
5883 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5884 	poll_threads();
5885 
5886 	CU_ASSERT(nvme_ctrlr->resetting == false);
5887 	CU_ASSERT(ctrlr.is_failed == false);
5888 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5889 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5890 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5891 	CU_ASSERT(path_id1->last_failed_tsc != 0);
5892 
5893 	CU_ASSERT(path_id2->last_failed_tsc != 0);
5894 	CU_ASSERT(path_id3->last_failed_tsc != 0);
5895 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5896 
5897 	/* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is
5898 	 * switched to trid2 but reset is not started.
5899 	 */
5900 	rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true);
5901 	CU_ASSERT(rc == -EALREADY);
5902 
5903 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL);
5904 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5905 
5906 	CU_ASSERT(nvme_ctrlr->resetting == false);
5907 
5908 	/* If reconnect succeeds, trid2 should be the active path_id */
5909 	ctrlr.fail_reset = false;
5910 
5911 	spdk_delay_us(SPDK_SEC_TO_USEC);
5912 	poll_thread_times(0, 1);
5913 
5914 	CU_ASSERT(nvme_ctrlr->resetting == true);
5915 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5916 
5917 	poll_threads();
5918 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5919 	poll_threads();
5920 
5921 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL);
5922 	CU_ASSERT(path_id2->last_failed_tsc == 0);
5923 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5924 	CU_ASSERT(nvme_ctrlr->resetting == false);
5925 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5926 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5927 
5928 	spdk_put_io_channel(ch);
5929 
5930 	poll_threads();
5931 
5932 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5933 	CU_ASSERT(rc == 0);
5934 
5935 	poll_threads();
5936 	spdk_delay_us(1000);
5937 	poll_threads();
5938 
5939 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5940 }
5941 
5942 static void
5943 test_fail_path(void)
5944 {
5945 	struct nvme_path_id path = {};
5946 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5947 	struct spdk_nvme_ctrlr *ctrlr;
5948 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5949 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5950 	struct nvme_ctrlr *nvme_ctrlr;
5951 	const int STRING_SIZE = 32;
5952 	const char *attached_names[STRING_SIZE];
5953 	struct nvme_bdev *nbdev;
5954 	struct nvme_ns *nvme_ns;
5955 	struct spdk_bdev_io *bdev_io;
5956 	struct spdk_io_channel *ch;
5957 	struct nvme_bdev_channel *nbdev_ch;
5958 	struct nvme_io_path *io_path;
5959 	struct nvme_ctrlr_channel *ctrlr_ch;
5960 	int rc;
5961 
5962 	/* The test scenario is the following.
5963 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5964 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5965 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5966 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5967 	 *   comes first. The queued I/O is failed.
5968 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5969 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5970 	 */
5971 
5972 	g_opts.bdev_retry_count = 1;
5973 
5974 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5975 	ut_init_trid(&path.trid);
5976 
5977 	set_thread(0);
5978 
5979 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5980 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5981 
5982 	g_ut_attach_ctrlr_status = 0;
5983 	g_ut_attach_bdev_count = 1;
5984 
5985 	opts.ctrlr_loss_timeout_sec = 4;
5986 	opts.reconnect_delay_sec = 1;
5987 	opts.fast_io_fail_timeout_sec = 2;
5988 	opts.multipath = false;
5989 
5990 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5991 				   attach_ctrlr_done, NULL, &dopts, &opts);
5992 	CU_ASSERT(rc == 0);
5993 
5994 	spdk_delay_us(1000);
5995 	poll_threads();
5996 
5997 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5998 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5999 
6000 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
6001 	CU_ASSERT(nvme_ctrlr != NULL);
6002 
6003 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6004 	CU_ASSERT(nbdev != NULL);
6005 
6006 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
6007 	CU_ASSERT(nvme_ns != NULL);
6008 
6009 	ch = spdk_get_io_channel(nbdev);
6010 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6011 
6012 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6013 
6014 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
6015 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6016 
6017 	ctrlr_ch = io_path->qpair->ctrlr_ch;
6018 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
6019 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
6020 
6021 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, ch);
6022 	ut_bdev_io_set_buf(bdev_io);
6023 
6024 
6025 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
6026 	ctrlr->fail_reset = true;
6027 	ctrlr->is_failed = true;
6028 
6029 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
6030 	CU_ASSERT(rc == 0);
6031 	CU_ASSERT(nvme_ctrlr->resetting == true);
6032 	CU_ASSERT(ctrlr->is_failed == true);
6033 
6034 	poll_threads();
6035 
6036 	CU_ASSERT(nvme_ctrlr->resetting == false);
6037 	CU_ASSERT(ctrlr->is_failed == false);
6038 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6039 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6040 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
6041 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6042 
6043 	/* I/O should be queued. */
6044 	bdev_io->internal.f.in_submit_request = true;
6045 
6046 	bdev_nvme_submit_request(ch, bdev_io);
6047 
6048 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6049 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6050 
6051 	/* After a second, the I/O should be still queued and the ctrlr should be
6052 	 * still recovering.
6053 	 */
6054 	spdk_delay_us(SPDK_SEC_TO_USEC);
6055 	poll_threads();
6056 
6057 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6058 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6059 
6060 	CU_ASSERT(nvme_ctrlr->resetting == false);
6061 	CU_ASSERT(ctrlr->is_failed == false);
6062 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6063 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6064 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6065 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6066 
6067 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6068 
6069 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
6070 	spdk_delay_us(SPDK_SEC_TO_USEC);
6071 	poll_threads();
6072 
6073 	CU_ASSERT(nvme_ctrlr->resetting == false);
6074 	CU_ASSERT(ctrlr->is_failed == false);
6075 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6076 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6077 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6078 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
6079 
6080 	/* Then within a second, pending I/O should be failed. */
6081 	spdk_delay_us(SPDK_SEC_TO_USEC);
6082 	poll_threads();
6083 
6084 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6085 	poll_threads();
6086 
6087 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6088 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6089 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
6090 
6091 	/* Another I/O submission should be failed immediately. */
6092 	bdev_io->internal.f.in_submit_request = true;
6093 
6094 	bdev_nvme_submit_request(ch, bdev_io);
6095 
6096 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6097 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6098 
6099 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
6100 	 * be deleted.
6101 	 */
6102 	spdk_delay_us(SPDK_SEC_TO_USEC);
6103 	poll_threads();
6104 
6105 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6106 	poll_threads();
6107 
6108 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
6109 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
6110 	CU_ASSERT(nvme_ctrlr->destruct == true);
6111 
6112 	spdk_put_io_channel(ch);
6113 
6114 	poll_threads();
6115 	spdk_delay_us(1000);
6116 	poll_threads();
6117 
6118 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6119 
6120 	free(bdev_io);
6121 
6122 	g_opts.bdev_retry_count = 0;
6123 }
6124 
6125 static void
6126 test_nvme_ns_cmp(void)
6127 {
6128 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
6129 
6130 	nvme_ns1.id = 0;
6131 	nvme_ns2.id = UINT32_MAX;
6132 
6133 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
6134 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
6135 }
6136 
6137 static void
6138 test_ana_transition(void)
6139 {
6140 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
6141 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
6142 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
6143 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
6144 
6145 	/* case 1: ANA transition timedout is canceled. */
6146 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6147 	nvme_ns.ana_transition_timedout = true;
6148 
6149 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6150 
6151 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6152 
6153 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
6154 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6155 
6156 	/* case 2: ANATT timer is kept. */
6157 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6158 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
6159 			      &nvme_ns,
6160 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6161 
6162 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6163 
6164 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6165 
6166 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6167 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
6168 
6169 	/* case 3: ANATT timer is stopped. */
6170 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6171 
6172 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6173 
6174 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6175 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6176 
6177 	/* ANATT timer is started. */
6178 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6179 
6180 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6181 
6182 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6183 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
6184 
6185 	/* ANATT timer is expired. */
6186 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6187 
6188 	poll_threads();
6189 
6190 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6191 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
6192 }
6193 
6194 static void
6195 _set_preferred_path_cb(void *cb_arg, int rc)
6196 {
6197 	bool *done = cb_arg;
6198 
6199 	*done = true;
6200 }
6201 
6202 static void
6203 test_set_preferred_path(void)
6204 {
6205 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
6206 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
6207 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6208 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6209 	const int STRING_SIZE = 32;
6210 	const char *attached_names[STRING_SIZE];
6211 	struct nvme_bdev *nbdev;
6212 	struct spdk_io_channel *ch;
6213 	struct nvme_bdev_channel *nbdev_ch;
6214 	struct nvme_io_path *io_path;
6215 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6216 	const struct spdk_nvme_ctrlr_data *cdata;
6217 	bool done;
6218 	int rc;
6219 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6220 
6221 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6222 	bdev_opts.multipath = true;
6223 
6224 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6225 	ut_init_trid(&path1.trid);
6226 	ut_init_trid2(&path2.trid);
6227 	ut_init_trid3(&path3.trid);
6228 	g_ut_attach_ctrlr_status = 0;
6229 	g_ut_attach_bdev_count = 1;
6230 
6231 	set_thread(0);
6232 
6233 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6234 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6235 
6236 	ctrlr1->ns[0].uuid = &uuid1;
6237 
6238 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6239 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6240 	CU_ASSERT(rc == 0);
6241 
6242 	spdk_delay_us(1000);
6243 	poll_threads();
6244 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6245 	poll_threads();
6246 
6247 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6248 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6249 
6250 	ctrlr2->ns[0].uuid = &uuid1;
6251 
6252 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6253 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6254 	CU_ASSERT(rc == 0);
6255 
6256 	spdk_delay_us(1000);
6257 	poll_threads();
6258 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6259 	poll_threads();
6260 
6261 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
6262 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
6263 
6264 	ctrlr3->ns[0].uuid = &uuid1;
6265 
6266 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
6267 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6268 	CU_ASSERT(rc == 0);
6269 
6270 	spdk_delay_us(1000);
6271 	poll_threads();
6272 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6273 	poll_threads();
6274 
6275 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6276 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6277 
6278 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6279 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
6280 
6281 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6282 
6283 	ch = spdk_get_io_channel(nbdev);
6284 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6285 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6286 
6287 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6288 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6289 
6290 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6291 
6292 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
6293 	 * should return io_path to ctrlr2.
6294 	 */
6295 
6296 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
6297 	done = false;
6298 
6299 	bdev_nvme_set_preferred_path(nbdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6300 
6301 	poll_threads();
6302 	CU_ASSERT(done == true);
6303 
6304 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6305 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6306 
6307 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6308 
6309 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
6310 	 * acquired, find_io_path() should return io_path to ctrlr3.
6311 	 */
6312 
6313 	spdk_put_io_channel(ch);
6314 
6315 	poll_threads();
6316 
6317 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
6318 	done = false;
6319 
6320 	bdev_nvme_set_preferred_path(nbdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6321 
6322 	poll_threads();
6323 	CU_ASSERT(done == true);
6324 
6325 	ch = spdk_get_io_channel(nbdev);
6326 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6327 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6328 
6329 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6330 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6331 
6332 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
6333 
6334 	spdk_put_io_channel(ch);
6335 
6336 	poll_threads();
6337 
6338 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6339 	CU_ASSERT(rc == 0);
6340 
6341 	poll_threads();
6342 	spdk_delay_us(1000);
6343 	poll_threads();
6344 
6345 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6346 }
6347 
6348 static void
6349 test_find_next_io_path(void)
6350 {
6351 	struct nvme_bdev_channel nbdev_ch = {
6352 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6353 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6354 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
6355 	};
6356 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6357 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6358 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6359 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6360 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6361 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6362 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6363 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6364 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6365 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6366 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6367 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6368 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6369 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6370 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6371 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6372 
6373 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6374 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6375 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6376 
6377 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
6378 	 * is covered in test_find_io_path.
6379 	 */
6380 
6381 	nbdev_ch.current_io_path = &io_path2;
6382 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6383 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6384 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6385 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6386 
6387 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6388 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6389 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6390 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6391 
6392 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6393 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6394 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6395 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6396 
6397 	nbdev_ch.current_io_path = &io_path3;
6398 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6399 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6400 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6401 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6402 
6403 	/* Test if next io_path is selected according to rr_min_io */
6404 
6405 	nbdev_ch.current_io_path = NULL;
6406 	nbdev_ch.rr_min_io = 2;
6407 	nbdev_ch.rr_counter = 0;
6408 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6409 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6410 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6411 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6412 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6413 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6414 
6415 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6416 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6417 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6418 }
6419 
6420 static void
6421 test_find_io_path_min_qd(void)
6422 {
6423 	struct nvme_bdev_channel nbdev_ch = {
6424 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6425 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6426 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
6427 	};
6428 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6429 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6430 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6431 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6432 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6433 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6434 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6435 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6436 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6437 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6438 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6439 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6440 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6441 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6442 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6443 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6444 
6445 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6446 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6447 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6448 
6449 	/* Test if the minimum io_outstanding or the ANA optimized state is
6450 	 * prioritized when using least queue depth selector
6451 	 */
6452 	qpair1.num_outstanding_reqs = 2;
6453 	qpair2.num_outstanding_reqs = 1;
6454 	qpair3.num_outstanding_reqs = 0;
6455 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6456 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6457 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6458 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6459 
6460 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6461 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6462 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6463 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6464 
6465 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6466 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6467 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6468 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6469 
6470 	qpair2.num_outstanding_reqs = 4;
6471 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6472 }
6473 
6474 static void
6475 test_disable_auto_failback(void)
6476 {
6477 	struct nvme_path_id path1 = {}, path2 = {};
6478 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6479 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6480 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6481 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6482 	struct nvme_ctrlr *nvme_ctrlr1;
6483 	const int STRING_SIZE = 32;
6484 	const char *attached_names[STRING_SIZE];
6485 	struct nvme_bdev *nbdev;
6486 	struct spdk_io_channel *ch;
6487 	struct nvme_bdev_channel *nbdev_ch;
6488 	struct nvme_io_path *io_path;
6489 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6490 	const struct spdk_nvme_ctrlr_data *cdata;
6491 	bool done;
6492 	int rc;
6493 
6494 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6495 	ut_init_trid(&path1.trid);
6496 	ut_init_trid2(&path2.trid);
6497 	g_ut_attach_ctrlr_status = 0;
6498 	g_ut_attach_bdev_count = 1;
6499 
6500 	g_opts.disable_auto_failback = true;
6501 
6502 	opts.ctrlr_loss_timeout_sec = -1;
6503 	opts.reconnect_delay_sec = 1;
6504 	opts.multipath = true;
6505 
6506 	set_thread(0);
6507 
6508 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6509 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6510 
6511 	ctrlr1->ns[0].uuid = &uuid1;
6512 
6513 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6514 				   attach_ctrlr_done, NULL, &dopts, &opts);
6515 	CU_ASSERT(rc == 0);
6516 
6517 	spdk_delay_us(1000);
6518 	poll_threads();
6519 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6520 	poll_threads();
6521 
6522 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6523 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6524 
6525 	ctrlr2->ns[0].uuid = &uuid1;
6526 
6527 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6528 				   attach_ctrlr_done, NULL, &dopts, &opts);
6529 	CU_ASSERT(rc == 0);
6530 
6531 	spdk_delay_us(1000);
6532 	poll_threads();
6533 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6534 	poll_threads();
6535 
6536 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6537 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6538 
6539 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6540 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
6541 
6542 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn);
6543 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6544 
6545 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6546 
6547 	ch = spdk_get_io_channel(nbdev);
6548 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6549 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6550 
6551 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6552 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6553 
6554 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6555 
6556 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6557 	ctrlr1->fail_reset = true;
6558 	ctrlr1->is_failed = true;
6559 
6560 	bdev_nvme_reset_ctrlr(nvme_ctrlr1);
6561 
6562 	poll_threads();
6563 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6564 	poll_threads();
6565 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6566 	poll_threads();
6567 
6568 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6569 
6570 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6571 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6572 
6573 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6574 
6575 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6576 	 * Hence, io_path to ctrlr2 should still be used.
6577 	 */
6578 	ctrlr1->fail_reset = false;
6579 
6580 	spdk_delay_us(SPDK_SEC_TO_USEC);
6581 	poll_threads();
6582 
6583 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6584 
6585 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6586 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6587 
6588 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6589 
6590 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6591 	 * be used again.
6592 	 */
6593 
6594 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6595 	done = false;
6596 
6597 	bdev_nvme_set_preferred_path(nbdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6598 
6599 	poll_threads();
6600 	CU_ASSERT(done == true);
6601 
6602 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6603 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6604 
6605 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6606 
6607 	spdk_put_io_channel(ch);
6608 
6609 	poll_threads();
6610 
6611 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6612 	CU_ASSERT(rc == 0);
6613 
6614 	poll_threads();
6615 	spdk_delay_us(1000);
6616 	poll_threads();
6617 
6618 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6619 
6620 	g_opts.disable_auto_failback = false;
6621 }
6622 
6623 static void
6624 ut_set_multipath_policy_done(void *cb_arg, int rc)
6625 {
6626 	int *done = cb_arg;
6627 
6628 	SPDK_CU_ASSERT_FATAL(done != NULL);
6629 	*done = rc;
6630 }
6631 
6632 static void
6633 test_set_multipath_policy(void)
6634 {
6635 	struct nvme_path_id path1 = {}, path2 = {};
6636 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6637 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6638 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6639 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6640 	const int STRING_SIZE = 32;
6641 	const char *attached_names[STRING_SIZE];
6642 	struct nvme_bdev *nbdev;
6643 	struct spdk_io_channel *ch;
6644 	struct nvme_bdev_channel *nbdev_ch;
6645 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6646 	int done;
6647 	int rc;
6648 
6649 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6650 	ut_init_trid(&path1.trid);
6651 	ut_init_trid2(&path2.trid);
6652 	g_ut_attach_ctrlr_status = 0;
6653 	g_ut_attach_bdev_count = 1;
6654 
6655 	g_opts.disable_auto_failback = true;
6656 
6657 	opts.ctrlr_loss_timeout_sec = -1;
6658 	opts.reconnect_delay_sec = 1;
6659 	opts.multipath = true;
6660 
6661 	set_thread(0);
6662 
6663 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6664 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6665 
6666 	ctrlr1->ns[0].uuid = &uuid1;
6667 
6668 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6669 				   attach_ctrlr_done, NULL, &dopts, &opts);
6670 	CU_ASSERT(rc == 0);
6671 
6672 	spdk_delay_us(1000);
6673 	poll_threads();
6674 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6675 	poll_threads();
6676 
6677 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6678 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6679 
6680 	ctrlr2->ns[0].uuid = &uuid1;
6681 
6682 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6683 				   attach_ctrlr_done, NULL, &dopts, &opts);
6684 	CU_ASSERT(rc == 0);
6685 
6686 	spdk_delay_us(1000);
6687 	poll_threads();
6688 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6689 	poll_threads();
6690 
6691 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6692 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6693 
6694 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6695 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
6696 
6697 	/* If multipath policy is updated before getting any I/O channel,
6698 	 * an new I/O channel should have the update.
6699 	 */
6700 	done = -1;
6701 	spdk_bdev_nvme_set_multipath_policy(nbdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6702 					    BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6703 					    ut_set_multipath_policy_done, &done);
6704 	poll_threads();
6705 	CU_ASSERT(done == 0);
6706 
6707 	CU_ASSERT(nbdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6708 	CU_ASSERT(nbdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6709 	CU_ASSERT(nbdev->rr_min_io == UINT32_MAX);
6710 
6711 	ch = spdk_get_io_channel(nbdev);
6712 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6713 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6714 
6715 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6716 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6717 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6718 
6719 	/* If multipath policy is updated while a I/O channel is active,
6720 	 * the update should be applied to the I/O channel immediately.
6721 	 */
6722 	done = -1;
6723 	spdk_bdev_nvme_set_multipath_policy(nbdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6724 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6725 					    ut_set_multipath_policy_done, &done);
6726 	poll_threads();
6727 	CU_ASSERT(done == 0);
6728 
6729 	CU_ASSERT(nbdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6730 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6731 	CU_ASSERT(nbdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6732 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6733 	CU_ASSERT(nbdev->rr_min_io == UINT32_MAX);
6734 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6735 
6736 	spdk_put_io_channel(ch);
6737 
6738 	poll_threads();
6739 
6740 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6741 	CU_ASSERT(rc == 0);
6742 
6743 	poll_threads();
6744 	spdk_delay_us(1000);
6745 	poll_threads();
6746 
6747 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6748 }
6749 
6750 static void
6751 test_uuid_generation(void)
6752 {
6753 	uint32_t nsid1 = 1, nsid2 = 2;
6754 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6755 	char sn3[21] = "                    ";
6756 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6757 	struct spdk_uuid uuid1, uuid2;
6758 	int rc;
6759 
6760 	/* Test case 1:
6761 	 * Serial numbers are the same, nsids are different.
6762 	 * Compare two generated UUID - they should be different. */
6763 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6764 	CU_ASSERT(rc == 0);
6765 	rc = nvme_generate_uuid(sn1, nsid2, &uuid2);
6766 	CU_ASSERT(rc == 0);
6767 
6768 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6769 
6770 	/* Test case 2:
6771 	 * Serial numbers differ only by one character, nsids are the same.
6772 	 * Compare two generated UUID - they should be different. */
6773 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6774 	CU_ASSERT(rc == 0);
6775 	rc = nvme_generate_uuid(sn2, nsid1, &uuid2);
6776 	CU_ASSERT(rc == 0);
6777 
6778 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6779 
6780 	/* Test case 3:
6781 	 * Serial number comprises only of space characters.
6782 	 * Validate the generated UUID. */
6783 	rc = nvme_generate_uuid(sn3, nsid1, &uuid1);
6784 	CU_ASSERT(rc == 0);
6785 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6786 
6787 }
6788 
6789 static void
6790 test_retry_io_to_same_path(void)
6791 {
6792 	struct nvme_path_id path1 = {}, path2 = {};
6793 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6794 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6795 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6796 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6797 	const int STRING_SIZE = 32;
6798 	const char *attached_names[STRING_SIZE];
6799 	struct nvme_bdev *nbdev;
6800 	struct spdk_bdev_io *bdev_io;
6801 	struct nvme_bdev_io *bio;
6802 	struct spdk_io_channel *ch;
6803 	struct nvme_bdev_channel *nbdev_ch;
6804 	struct nvme_io_path *io_path1, *io_path2;
6805 	struct ut_nvme_req *req;
6806 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6807 	int done;
6808 	int rc;
6809 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6810 
6811 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6812 	bdev_opts.multipath = true;
6813 
6814 	g_opts.nvme_ioq_poll_period_us = 1;
6815 
6816 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6817 	ut_init_trid(&path1.trid);
6818 	ut_init_trid2(&path2.trid);
6819 	g_ut_attach_ctrlr_status = 0;
6820 	g_ut_attach_bdev_count = 1;
6821 
6822 	set_thread(0);
6823 
6824 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6825 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6826 
6827 	ctrlr1->ns[0].uuid = &uuid1;
6828 
6829 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6830 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6831 	CU_ASSERT(rc == 0);
6832 
6833 	spdk_delay_us(1000);
6834 	poll_threads();
6835 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6836 	poll_threads();
6837 
6838 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6839 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6840 
6841 	ctrlr2->ns[0].uuid = &uuid1;
6842 
6843 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6844 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6845 	CU_ASSERT(rc == 0);
6846 
6847 	spdk_delay_us(1000);
6848 	poll_threads();
6849 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6850 	poll_threads();
6851 
6852 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6853 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6854 
6855 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
6856 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6857 
6858 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
6859 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6860 
6861 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6862 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
6863 
6864 	done = -1;
6865 	spdk_bdev_nvme_set_multipath_policy(nbdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6866 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6867 	poll_threads();
6868 	CU_ASSERT(done == 0);
6869 
6870 	CU_ASSERT(nbdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6871 	CU_ASSERT(nbdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6872 	CU_ASSERT(nbdev->rr_min_io == 1);
6873 
6874 	ch = spdk_get_io_channel(nbdev);
6875 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6876 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6877 
6878 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6879 	CU_ASSERT(nbdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6880 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6881 
6882 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, ch);
6883 	ut_bdev_io_set_buf(bdev_io);
6884 
6885 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6886 
6887 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6888 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6889 
6890 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6891 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6892 
6893 	/* The 1st I/O should be submitted to io_path1. */
6894 	bdev_io->internal.f.in_submit_request = true;
6895 
6896 	bdev_nvme_submit_request(ch, bdev_io);
6897 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6898 	CU_ASSERT(bio->io_path == io_path1);
6899 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6900 
6901 	spdk_delay_us(1);
6902 
6903 	poll_threads();
6904 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6905 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
6906 
6907 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6908 	 * policy is round-robin.
6909 	 */
6910 	bdev_io->internal.f.in_submit_request = true;
6911 
6912 	bdev_nvme_submit_request(ch, bdev_io);
6913 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6914 	CU_ASSERT(bio->io_path == io_path2);
6915 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6916 
6917 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6918 	SPDK_CU_ASSERT_FATAL(req != NULL);
6919 
6920 	/* Set retry count to non-zero. */
6921 	g_opts.bdev_retry_count = 2;
6922 
6923 	/* Inject an I/O error. */
6924 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6925 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6926 
6927 	/* The 2nd I/O should be queued to nbdev_ch. */
6928 	spdk_delay_us(1);
6929 	poll_thread_times(0, 1);
6930 
6931 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6932 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6933 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6934 
6935 	/* The 2nd I/O should keep caching io_path2. */
6936 	CU_ASSERT(bio->io_path == io_path2);
6937 
6938 	/* The 2nd I/O should be submitted to io_path2 again. */
6939 	poll_thread_times(0, 1);
6940 
6941 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6942 	CU_ASSERT(bio->io_path == io_path2);
6943 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6944 
6945 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6946 	SPDK_CU_ASSERT_FATAL(req != NULL);
6947 
6948 	/* Inject an I/O error again. */
6949 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6950 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6951 	req->cpl.status.crd = 1;
6952 
6953 	ctrlr2->cdata.crdt[1] = 1;
6954 
6955 	/* The 2nd I/O should be queued to nbdev_ch. */
6956 	spdk_delay_us(1);
6957 	poll_thread_times(0, 1);
6958 
6959 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6960 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6961 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6962 
6963 	/* The 2nd I/O should keep caching io_path2. */
6964 	CU_ASSERT(bio->io_path == io_path2);
6965 
6966 	/* Detach ctrlr2 dynamically. */
6967 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
6968 	CU_ASSERT(rc == 0);
6969 
6970 	spdk_delay_us(1000);
6971 	poll_threads();
6972 	spdk_delay_us(1000);
6973 	poll_threads();
6974 	spdk_delay_us(1000);
6975 	poll_threads();
6976 	spdk_delay_us(1000);
6977 	poll_threads();
6978 
6979 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
6980 
6981 	poll_threads();
6982 	spdk_delay_us(100000);
6983 	poll_threads();
6984 	spdk_delay_us(1);
6985 	poll_threads();
6986 
6987 	/* The 2nd I/O should succeed by io_path1. */
6988 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6989 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
6990 	CU_ASSERT(bio->io_path == io_path1);
6991 
6992 	free(bdev_io);
6993 
6994 	spdk_put_io_channel(ch);
6995 
6996 	poll_threads();
6997 	spdk_delay_us(1);
6998 	poll_threads();
6999 
7000 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7001 	CU_ASSERT(rc == 0);
7002 
7003 	poll_threads();
7004 	spdk_delay_us(1000);
7005 	poll_threads();
7006 
7007 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7008 
7009 	g_opts.nvme_ioq_poll_period_us = 0;
7010 	g_opts.bdev_retry_count = 0;
7011 }
7012 
7013 /* This case is to verify a fix for a complex race condition that
7014  * failover is lost if fabric connect command gets timeout while
7015  * controller is being reset.
7016  */
7017 static void
7018 test_race_between_reset_and_disconnected(void)
7019 {
7020 	struct spdk_nvme_transport_id trid = {};
7021 	struct spdk_nvme_ctrlr ctrlr = {};
7022 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7023 	struct nvme_path_id *curr_trid;
7024 	struct spdk_io_channel *ch1, *ch2;
7025 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7026 	int rc;
7027 
7028 	ut_init_trid(&trid);
7029 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7030 
7031 	set_thread(0);
7032 
7033 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7034 	CU_ASSERT(rc == 0);
7035 
7036 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7037 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7038 
7039 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7040 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7041 
7042 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7043 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7044 
7045 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7046 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7047 
7048 	set_thread(1);
7049 
7050 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7051 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7052 
7053 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7054 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7055 
7056 	/* Reset starts from thread 1. */
7057 	set_thread(1);
7058 
7059 	nvme_ctrlr->resetting = false;
7060 	curr_trid->last_failed_tsc = spdk_get_ticks();
7061 	ctrlr.is_failed = true;
7062 
7063 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7064 	CU_ASSERT(rc == 0);
7065 	CU_ASSERT(nvme_ctrlr->resetting == true);
7066 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7067 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7068 
7069 	poll_thread_times(0, 3);
7070 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7071 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7072 
7073 	poll_thread_times(0, 1);
7074 	poll_thread_times(1, 1);
7075 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7076 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7077 	CU_ASSERT(ctrlr.is_failed == true);
7078 
7079 	poll_thread_times(1, 1);
7080 	poll_thread_times(0, 1);
7081 	CU_ASSERT(ctrlr.is_failed == false);
7082 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7083 
7084 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7085 	poll_thread_times(0, 2);
7086 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7087 
7088 	poll_thread_times(0, 1);
7089 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7090 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7091 
7092 	poll_thread_times(1, 1);
7093 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7094 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7095 	CU_ASSERT(nvme_ctrlr->resetting == true);
7096 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
7097 
7098 	/* Here is just one poll before _bdev_nvme_reset_complete() is executed.
7099 	 *
7100 	 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric
7101 	 * connect command is executed. If fabric connect command gets timeout,
7102 	 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until
7103 	 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false.
7104 	 *
7105 	 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr().
7106 	 */
7107 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
7108 	CU_ASSERT(rc == -EINPROGRESS);
7109 	CU_ASSERT(nvme_ctrlr->resetting == true);
7110 	CU_ASSERT(nvme_ctrlr->pending_failover == true);
7111 
7112 	poll_thread_times(0, 1);
7113 	CU_ASSERT(nvme_ctrlr->resetting == true);
7114 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7115 
7116 	poll_threads();
7117 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7118 	poll_threads();
7119 
7120 	CU_ASSERT(nvme_ctrlr->resetting == false);
7121 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7122 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7123 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7124 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7125 
7126 	spdk_put_io_channel(ch2);
7127 
7128 	set_thread(0);
7129 
7130 	spdk_put_io_channel(ch1);
7131 
7132 	poll_threads();
7133 
7134 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7135 	CU_ASSERT(rc == 0);
7136 
7137 	poll_threads();
7138 	spdk_delay_us(1000);
7139 	poll_threads();
7140 
7141 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7142 }
7143 static void
7144 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
7145 {
7146 	int *_rc = (int *)cb_arg;
7147 
7148 	SPDK_CU_ASSERT_FATAL(_rc != NULL);
7149 	*_rc = rc;
7150 }
7151 
7152 static void
7153 test_ctrlr_op_rpc(void)
7154 {
7155 	struct spdk_nvme_transport_id trid = {};
7156 	struct spdk_nvme_ctrlr ctrlr = {};
7157 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7158 	struct nvme_path_id *curr_trid;
7159 	struct spdk_io_channel *ch1, *ch2;
7160 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7161 	int ctrlr_op_rc;
7162 	int rc;
7163 
7164 	ut_init_trid(&trid);
7165 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7166 
7167 	set_thread(0);
7168 
7169 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7170 	CU_ASSERT(rc == 0);
7171 
7172 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7173 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7174 
7175 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7176 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7177 
7178 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7179 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7180 
7181 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7182 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7183 
7184 	set_thread(1);
7185 
7186 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7187 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7188 
7189 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7190 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7191 
7192 	/* Reset starts from thread 1. */
7193 	set_thread(1);
7194 
7195 	/* Case 1: ctrlr is already being destructed. */
7196 	nvme_ctrlr->destruct = true;
7197 	ctrlr_op_rc = 0;
7198 
7199 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7200 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7201 
7202 	poll_threads();
7203 
7204 	CU_ASSERT(ctrlr_op_rc == -ENXIO);
7205 
7206 	/* Case 2: reset is in progress. */
7207 	nvme_ctrlr->destruct = false;
7208 	nvme_ctrlr->resetting = true;
7209 	ctrlr_op_rc = 0;
7210 
7211 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7212 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7213 
7214 	poll_threads();
7215 
7216 	CU_ASSERT(ctrlr_op_rc == -EBUSY);
7217 
7218 	/* Case 3: reset completes successfully. */
7219 	nvme_ctrlr->resetting = false;
7220 	curr_trid->last_failed_tsc = spdk_get_ticks();
7221 	ctrlr.is_failed = true;
7222 	ctrlr_op_rc = -1;
7223 
7224 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7225 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7226 
7227 	CU_ASSERT(nvme_ctrlr->resetting == true);
7228 	CU_ASSERT(ctrlr_op_rc == -1);
7229 
7230 	poll_threads();
7231 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7232 	poll_threads();
7233 
7234 	CU_ASSERT(nvme_ctrlr->resetting == false);
7235 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7236 	CU_ASSERT(ctrlr.is_failed == false);
7237 	CU_ASSERT(ctrlr_op_rc == 0);
7238 
7239 	/* Case 4: invalid operation. */
7240 	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
7241 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7242 
7243 	poll_threads();
7244 
7245 	CU_ASSERT(ctrlr_op_rc == -EINVAL);
7246 
7247 	spdk_put_io_channel(ch2);
7248 
7249 	set_thread(0);
7250 
7251 	spdk_put_io_channel(ch1);
7252 
7253 	poll_threads();
7254 
7255 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7256 	CU_ASSERT(rc == 0);
7257 
7258 	poll_threads();
7259 	spdk_delay_us(1000);
7260 	poll_threads();
7261 
7262 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7263 }
7264 
7265 static void
7266 test_bdev_ctrlr_op_rpc(void)
7267 {
7268 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
7269 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
7270 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7271 	struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL;
7272 	struct nvme_path_id *curr_trid1, *curr_trid2;
7273 	struct spdk_io_channel *ch11, *ch12, *ch21, *ch22;
7274 	struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22;
7275 	int ctrlr_op_rc;
7276 	int rc;
7277 
7278 	ut_init_trid(&trid1);
7279 	ut_init_trid2(&trid2);
7280 	TAILQ_INIT(&ctrlr1.active_io_qpairs);
7281 	TAILQ_INIT(&ctrlr2.active_io_qpairs);
7282 	ctrlr1.cdata.cmic.multi_ctrlr = 1;
7283 	ctrlr2.cdata.cmic.multi_ctrlr = 1;
7284 	ctrlr1.cdata.cntlid = 1;
7285 	ctrlr2.cdata.cntlid = 2;
7286 	ctrlr1.adminq.is_connected = true;
7287 	ctrlr2.adminq.is_connected = true;
7288 
7289 	set_thread(0);
7290 
7291 	rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL);
7292 	CU_ASSERT(rc == 0);
7293 
7294 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7295 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7296 
7297 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN);
7298 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
7299 
7300 	curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
7301 	SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL);
7302 
7303 	ch11 = spdk_get_io_channel(nvme_ctrlr1);
7304 	SPDK_CU_ASSERT_FATAL(ch11 != NULL);
7305 
7306 	ctrlr_ch11 = spdk_io_channel_get_ctx(ch11);
7307 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7308 
7309 	set_thread(1);
7310 
7311 	ch12 = spdk_get_io_channel(nvme_ctrlr1);
7312 	SPDK_CU_ASSERT_FATAL(ch12 != NULL);
7313 
7314 	ctrlr_ch12 = spdk_io_channel_get_ctx(ch12);
7315 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7316 
7317 	set_thread(0);
7318 
7319 	rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL);
7320 	CU_ASSERT(rc == 0);
7321 
7322 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN);
7323 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
7324 
7325 	curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
7326 	SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL);
7327 
7328 	ch21 = spdk_get_io_channel(nvme_ctrlr2);
7329 	SPDK_CU_ASSERT_FATAL(ch21 != NULL);
7330 
7331 	ctrlr_ch21 = spdk_io_channel_get_ctx(ch21);
7332 	CU_ASSERT(ctrlr_ch21->qpair != NULL);
7333 
7334 	set_thread(1);
7335 
7336 	ch22 = spdk_get_io_channel(nvme_ctrlr2);
7337 	SPDK_CU_ASSERT_FATAL(ch22 != NULL);
7338 
7339 	ctrlr_ch22 = spdk_io_channel_get_ctx(ch22);
7340 	CU_ASSERT(ctrlr_ch22->qpair != NULL);
7341 
7342 	/* Reset starts from thread 1. */
7343 	set_thread(1);
7344 
7345 	nvme_ctrlr1->resetting = false;
7346 	nvme_ctrlr2->resetting = false;
7347 	curr_trid1->last_failed_tsc = spdk_get_ticks();
7348 	curr_trid2->last_failed_tsc = spdk_get_ticks();
7349 	ctrlr_op_rc = -1;
7350 
7351 	nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET,
7352 			       ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7353 
7354 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7355 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7356 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7357 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7358 
7359 	poll_thread_times(0, 3);
7360 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7361 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7362 
7363 	poll_thread_times(0, 1);
7364 	poll_thread_times(1, 1);
7365 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7366 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7367 
7368 	poll_thread_times(1, 1);
7369 	poll_thread_times(0, 1);
7370 	CU_ASSERT(ctrlr1.adminq.is_connected == false);
7371 
7372 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7373 	poll_thread_times(0, 2);
7374 	CU_ASSERT(ctrlr1.adminq.is_connected == true);
7375 
7376 	poll_thread_times(0, 1);
7377 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7378 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7379 
7380 	poll_thread_times(1, 1);
7381 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7382 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7383 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7384 	CU_ASSERT(curr_trid1->last_failed_tsc != 0);
7385 
7386 	poll_thread_times(0, 2);
7387 	poll_thread_times(1, 1);
7388 	poll_thread_times(0, 1);
7389 	poll_thread_times(1, 1);
7390 	poll_thread_times(0, 1);
7391 	poll_thread_times(1, 1);
7392 	poll_thread_times(0, 1);
7393 
7394 	CU_ASSERT(nvme_ctrlr1->resetting == false);
7395 	CU_ASSERT(curr_trid1->last_failed_tsc == 0);
7396 	CU_ASSERT(nvme_ctrlr2->resetting == true);
7397 
7398 	poll_threads();
7399 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7400 	poll_threads();
7401 
7402 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7403 	CU_ASSERT(ctrlr_op_rc == 0);
7404 
7405 	set_thread(1);
7406 
7407 	spdk_put_io_channel(ch12);
7408 	spdk_put_io_channel(ch22);
7409 
7410 	set_thread(0);
7411 
7412 	spdk_put_io_channel(ch11);
7413 	spdk_put_io_channel(ch21);
7414 
7415 	poll_threads();
7416 
7417 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7418 	CU_ASSERT(rc == 0);
7419 
7420 	poll_threads();
7421 	spdk_delay_us(1000);
7422 	poll_threads();
7423 
7424 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7425 }
7426 
7427 static void
7428 test_disable_enable_ctrlr(void)
7429 {
7430 	struct spdk_nvme_transport_id trid = {};
7431 	struct spdk_nvme_ctrlr ctrlr = {};
7432 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7433 	struct nvme_path_id *curr_trid;
7434 	struct spdk_io_channel *ch1, *ch2;
7435 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7436 	int rc;
7437 
7438 	ut_init_trid(&trid);
7439 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7440 	ctrlr.adminq.is_connected = true;
7441 
7442 	set_thread(0);
7443 
7444 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7445 	CU_ASSERT(rc == 0);
7446 
7447 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7448 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7449 
7450 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7451 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7452 
7453 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7454 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7455 
7456 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7457 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7458 
7459 	set_thread(1);
7460 
7461 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7462 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7463 
7464 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7465 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7466 
7467 	/* Disable starts from thread 1. */
7468 	set_thread(1);
7469 
7470 	/* Case 1: ctrlr is already disabled. */
7471 	nvme_ctrlr->disabled = true;
7472 
7473 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7474 	CU_ASSERT(rc == -EALREADY);
7475 
7476 	/* Case 2: ctrlr is already being destructed. */
7477 	nvme_ctrlr->disabled = false;
7478 	nvme_ctrlr->destruct = true;
7479 
7480 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7481 	CU_ASSERT(rc == -ENXIO);
7482 
7483 	/* Case 3: reset is in progress. */
7484 	nvme_ctrlr->destruct = false;
7485 	nvme_ctrlr->resetting = true;
7486 
7487 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7488 	CU_ASSERT(rc == -EBUSY);
7489 
7490 	/* Case 4: disable completes successfully. */
7491 	nvme_ctrlr->resetting = false;
7492 
7493 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7494 	CU_ASSERT(rc == 0);
7495 	CU_ASSERT(nvme_ctrlr->resetting == true);
7496 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7497 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7498 
7499 	poll_thread_times(0, 3);
7500 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7501 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7502 
7503 	poll_thread_times(0, 1);
7504 	poll_thread_times(1, 1);
7505 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7506 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7507 
7508 	poll_thread_times(1, 1);
7509 	poll_thread_times(0, 1);
7510 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7511 	poll_thread_times(1, 1);
7512 	poll_thread_times(0, 1);
7513 	poll_thread_times(1, 1);
7514 	poll_thread_times(0, 1);
7515 	CU_ASSERT(nvme_ctrlr->resetting == false);
7516 	CU_ASSERT(nvme_ctrlr->disabled == true);
7517 
7518 	/* Case 5: enable completes successfully. */
7519 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7520 	CU_ASSERT(rc == 0);
7521 
7522 	CU_ASSERT(nvme_ctrlr->resetting == true);
7523 	CU_ASSERT(nvme_ctrlr->disabled == false);
7524 
7525 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7526 	poll_thread_times(0, 2);
7527 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7528 
7529 	poll_thread_times(0, 1);
7530 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7531 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7532 
7533 	poll_thread_times(1, 1);
7534 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7535 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7536 	CU_ASSERT(nvme_ctrlr->resetting == true);
7537 
7538 	poll_thread_times(0, 1);
7539 	CU_ASSERT(nvme_ctrlr->resetting == false);
7540 
7541 	/* Case 6: ctrlr is already enabled. */
7542 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7543 	CU_ASSERT(rc == -EALREADY);
7544 
7545 	set_thread(0);
7546 
7547 	/* Case 7: disable cancels delayed reconnect. */
7548 	nvme_ctrlr->opts.reconnect_delay_sec = 10;
7549 	ctrlr.fail_reset = true;
7550 
7551 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7552 	CU_ASSERT(rc == 0);
7553 
7554 	poll_threads();
7555 
7556 	CU_ASSERT(nvme_ctrlr->resetting == false);
7557 	CU_ASSERT(ctrlr.is_failed == false);
7558 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7559 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7560 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
7561 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
7562 
7563 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7564 	CU_ASSERT(rc == 0);
7565 
7566 	CU_ASSERT(nvme_ctrlr->resetting == true);
7567 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
7568 
7569 	poll_threads();
7570 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7571 	poll_threads();
7572 
7573 	CU_ASSERT(nvme_ctrlr->resetting == false);
7574 	CU_ASSERT(nvme_ctrlr->disabled == true);
7575 
7576 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7577 	CU_ASSERT(rc == 0);
7578 
7579 	CU_ASSERT(nvme_ctrlr->resetting == true);
7580 	CU_ASSERT(nvme_ctrlr->disabled == false);
7581 
7582 	poll_threads();
7583 
7584 	CU_ASSERT(nvme_ctrlr->resetting == false);
7585 
7586 	set_thread(1);
7587 
7588 	spdk_put_io_channel(ch2);
7589 
7590 	set_thread(0);
7591 
7592 	spdk_put_io_channel(ch1);
7593 
7594 	poll_threads();
7595 
7596 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7597 	CU_ASSERT(rc == 0);
7598 
7599 	poll_threads();
7600 	spdk_delay_us(1000);
7601 	poll_threads();
7602 
7603 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7604 }
7605 
7606 static void
7607 ut_delete_done(void *ctx, int rc)
7608 {
7609 	int *delete_done_rc = ctx;
7610 	*delete_done_rc = rc;
7611 }
7612 
7613 static void
7614 test_delete_ctrlr_done(void)
7615 {
7616 	struct spdk_nvme_transport_id trid = {};
7617 	struct spdk_nvme_ctrlr ctrlr = {};
7618 	int delete_done_rc = 0xDEADBEEF;
7619 	int rc;
7620 
7621 	ut_init_trid(&trid);
7622 
7623 	nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7624 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
7625 
7626 	rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc);
7627 	CU_ASSERT(rc == 0);
7628 
7629 	for (int i = 0; i < 20; i++) {
7630 		poll_threads();
7631 		if (delete_done_rc == 0) {
7632 			break;
7633 		}
7634 		spdk_delay_us(1000);
7635 	}
7636 
7637 	CU_ASSERT(delete_done_rc == 0);
7638 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7639 }
7640 
7641 static void
7642 test_ns_remove_during_reset(void)
7643 {
7644 	struct nvme_path_id path = {};
7645 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7646 	struct spdk_nvme_ctrlr *ctrlr;
7647 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7648 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7649 	struct nvme_ctrlr *nvme_ctrlr;
7650 	const int STRING_SIZE = 32;
7651 	const char *attached_names[STRING_SIZE];
7652 	struct nvme_bdev *nbdev;
7653 	struct nvme_ns *nvme_ns;
7654 	union spdk_nvme_async_event_completion event = {};
7655 	struct spdk_nvme_cpl cpl = {};
7656 	int rc;
7657 
7658 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
7659 	ut_init_trid(&path.trid);
7660 
7661 	set_thread(0);
7662 
7663 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
7664 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7665 
7666 	g_ut_attach_ctrlr_status = 0;
7667 	g_ut_attach_bdev_count = 1;
7668 
7669 	opts.multipath = false;
7670 
7671 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
7672 				   attach_ctrlr_done, NULL, &dopts, &opts);
7673 	CU_ASSERT(rc == 0);
7674 
7675 	spdk_delay_us(1000);
7676 	poll_threads();
7677 
7678 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7679 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7680 
7681 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
7682 	CU_ASSERT(nvme_ctrlr != NULL);
7683 
7684 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
7685 	CU_ASSERT(nbdev != NULL);
7686 
7687 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
7688 	CU_ASSERT(nvme_ns != NULL);
7689 
7690 	/* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist,
7691 	 * but nvme_ns->ns should be NULL.
7692 	 */
7693 
7694 	CU_ASSERT(ctrlr->ns[0].is_active == true);
7695 	ctrlr->ns[0].is_active = false;
7696 
7697 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7698 	CU_ASSERT(rc == 0);
7699 
7700 	poll_threads();
7701 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7702 	poll_threads();
7703 
7704 	CU_ASSERT(nvme_ctrlr->resetting == false);
7705 	CU_ASSERT(ctrlr->adminq.is_connected == true);
7706 
7707 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7708 	CU_ASSERT(nbdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7709 	CU_ASSERT(nvme_ns->bdev == nbdev);
7710 	CU_ASSERT(nvme_ns->ns == NULL);
7711 
7712 	/* Then, async event should fill nvme_ns->ns again. */
7713 
7714 	ctrlr->ns[0].is_active = true;
7715 
7716 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
7717 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
7718 	cpl.cdw0 = event.raw;
7719 
7720 	aer_cb(nvme_ctrlr, &cpl);
7721 
7722 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7723 	CU_ASSERT(nbdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7724 	CU_ASSERT(nvme_ns->bdev == nbdev);
7725 	CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]);
7726 
7727 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7728 	CU_ASSERT(rc == 0);
7729 
7730 	poll_threads();
7731 	spdk_delay_us(1000);
7732 	poll_threads();
7733 
7734 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7735 }
7736 
7737 static void
7738 test_io_path_is_current(void)
7739 {
7740 	struct nvme_bdev_channel nbdev_ch = {
7741 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
7742 	};
7743 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
7744 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
7745 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
7746 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, },
7747 	nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
7748 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {};
7749 	struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
7750 	struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
7751 	struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, };
7752 	struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, };
7753 	struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, };
7754 	struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, };
7755 	struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
7756 	struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
7757 	struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
7758 
7759 	/* io_path1 is deleting */
7760 	io_path1.nbdev_ch = NULL;
7761 
7762 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == false);
7763 
7764 	io_path1.nbdev_ch = &nbdev_ch;
7765 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
7766 	io_path2.nbdev_ch = &nbdev_ch;
7767 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
7768 	io_path3.nbdev_ch = &nbdev_ch;
7769 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
7770 
7771 	/* active/active: io_path is current if it is available and ANA optimized. */
7772 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7773 
7774 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7775 
7776 	/* active/active: io_path is not current if it is disconnected even if it is
7777 	 * ANA optimized.
7778 	 */
7779 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7780 
7781 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7782 
7783 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7784 
7785 	/* active/passive: io_path is current if it is available and cached.
7786 	 * (only ANA optimized path is cached for active/passive.)
7787 	 */
7788 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7789 	nbdev_ch.current_io_path = &io_path2;
7790 
7791 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7792 
7793 	/* active:passive: io_path is not current if it is disconnected even if it is cached */
7794 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7795 
7796 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7797 
7798 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7799 
7800 	/* active/active and active/passive: io_path is not current if it is ANA inaccessible. */
7801 	nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
7802 
7803 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7804 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7805 
7806 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7807 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7808 
7809 	/* active/active: non-optimized path is current only if there is no optimized path. */
7810 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7811 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7812 
7813 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7814 
7815 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7816 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7817 
7818 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7819 
7820 	/* active/passive: current is true if it is the first one when there is no optimized path. */
7821 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7822 	nbdev_ch.current_io_path = NULL;
7823 
7824 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == true);
7825 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7826 	CU_ASSERT(nvme_io_path_is_current(&io_path3) == false);
7827 }
7828 
7829 static void
7830 test_bdev_reset_abort_io(void)
7831 {
7832 	struct spdk_nvme_transport_id trid = {};
7833 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7834 	struct spdk_nvme_ctrlr *ctrlr;
7835 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7836 	struct nvme_ctrlr *nvme_ctrlr;
7837 	const int STRING_SIZE = 32;
7838 	const char *attached_names[STRING_SIZE];
7839 	struct nvme_bdev *nbdev;
7840 	struct spdk_bdev_io *write_io, *read_io, *reset_io;
7841 	struct spdk_io_channel *ch1, *ch2;
7842 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
7843 	struct nvme_io_path *io_path1, *io_path2;
7844 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
7845 	int rc;
7846 
7847 	g_opts.bdev_retry_count = -1;
7848 
7849 	ut_init_trid(&trid);
7850 
7851 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
7852 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7853 
7854 	g_ut_attach_ctrlr_status = 0;
7855 	g_ut_attach_bdev_count = 1;
7856 
7857 	set_thread(1);
7858 
7859 	opts.ctrlr_loss_timeout_sec = -1;
7860 	opts.reconnect_delay_sec = 1;
7861 	opts.multipath = false;
7862 
7863 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
7864 				   attach_ctrlr_done, NULL, &dopts, &opts);
7865 	CU_ASSERT(rc == 0);
7866 
7867 	spdk_delay_us(1000);
7868 	poll_threads();
7869 
7870 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7871 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7872 
7873 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
7874 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
7875 
7876 	set_thread(0);
7877 
7878 	ch1 = spdk_get_io_channel(nbdev);
7879 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7880 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
7881 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
7882 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
7883 	nvme_qpair1 = io_path1->qpair;
7884 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
7885 
7886 	set_thread(1);
7887 
7888 	ch2 = spdk_get_io_channel(nbdev);
7889 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7890 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
7891 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
7892 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
7893 	nvme_qpair2 = io_path2->qpair;
7894 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
7895 
7896 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, nbdev, ch1);
7897 	ut_bdev_io_set_buf(write_io);
7898 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7899 
7900 	read_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_READ, nbdev, ch1);
7901 	ut_bdev_io_set_buf(read_io);
7902 	read_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7903 
7904 	reset_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, nbdev, ch2);
7905 
7906 	/* If qpair is disconnected, it is freed and then reconnected via resetting
7907 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
7908 	 * while resetting the nvme_ctrlr.
7909 	 */
7910 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7911 
7912 	poll_thread_times(0, 3);
7913 
7914 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7915 	CU_ASSERT(nvme_ctrlr->resetting == true);
7916 
7917 	set_thread(0);
7918 
7919 	write_io->internal.f.in_submit_request = true;
7920 
7921 	bdev_nvme_submit_request(ch1, write_io);
7922 
7923 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
7924 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
7925 
7926 	set_thread(1);
7927 
7928 	/* Submit a reset request to a bdev while resetting a nvme_ctrlr.
7929 	 * Further I/O queueing should be disabled and queued I/Os should be aborted.
7930 	 * Verify these behaviors.
7931 	 */
7932 	reset_io->internal.f.in_submit_request = true;
7933 
7934 	bdev_nvme_submit_request(ch2, reset_io);
7935 
7936 	poll_thread_times(0, 1);
7937 	poll_thread_times(1, 2);
7938 
7939 	CU_ASSERT(nbdev_ch1->resetting == true);
7940 
7941 	/* qpair1 should be still disconnected. */
7942 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7943 
7944 	set_thread(0);
7945 
7946 	read_io->internal.f.in_submit_request = true;
7947 
7948 	bdev_nvme_submit_request(ch1, read_io);
7949 
7950 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7951 
7952 	poll_thread_times(0, 1);
7953 
7954 	/* The I/O which was submitted during bdev_reset should fail immediately. */
7955 	CU_ASSERT(read_io->internal.f.in_submit_request == false);
7956 	CU_ASSERT(read_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
7957 
7958 	poll_threads();
7959 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7960 	poll_threads();
7961 
7962 	/* The completion of bdev_reset should ensure queued I/O is aborted. */
7963 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
7964 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
7965 
7966 	/* The reset request itself should complete with success. */
7967 	CU_ASSERT(reset_io->internal.f.in_submit_request == false);
7968 	CU_ASSERT(reset_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
7969 
7970 	set_thread(0);
7971 
7972 	spdk_put_io_channel(ch1);
7973 
7974 	set_thread(1);
7975 
7976 	spdk_put_io_channel(ch2);
7977 
7978 	poll_threads();
7979 
7980 	set_thread(0);
7981 
7982 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7983 	CU_ASSERT(rc == 0);
7984 
7985 	poll_threads();
7986 	spdk_delay_us(1000);
7987 	poll_threads();
7988 
7989 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7990 
7991 	free(write_io);
7992 	free(read_io);
7993 	free(reset_io);
7994 
7995 	g_opts.bdev_retry_count = 0;
7996 }
7997 
7998 static void
7999 test_race_between_clear_pending_resets_and_reset_ctrlr_complete(void)
8000 {
8001 	struct nvme_path_id path = {};
8002 	struct spdk_nvme_ctrlr *ctrlr;
8003 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
8004 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
8005 	struct nvme_ctrlr *nvme_ctrlr;
8006 	const int STRING_SIZE = 32;
8007 	const char *attached_names[STRING_SIZE];
8008 	struct nvme_bdev *nbdev;
8009 	struct spdk_bdev_io *bdev_io;
8010 	struct nvme_bdev_io *bio;
8011 	struct spdk_io_channel *ch1, *ch2;
8012 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
8013 	struct nvme_io_path *io_path1, *io_path2;
8014 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
8015 	int rc;
8016 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
8017 
8018 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
8019 	bdev_opts.multipath = true;
8020 
8021 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
8022 	ut_init_trid(&path.trid);
8023 	g_ut_attach_ctrlr_status = 0;
8024 	g_ut_attach_bdev_count = 1;
8025 
8026 	set_thread(0);
8027 
8028 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, true);
8029 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
8030 
8031 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
8032 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
8033 	CU_ASSERT(rc == 0);
8034 
8035 	spdk_delay_us(1000);
8036 	poll_threads();
8037 
8038 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
8039 	poll_threads();
8040 
8041 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
8042 	poll_threads();
8043 
8044 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
8045 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
8046 
8047 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
8048 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
8049 
8050 	nbdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
8051 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
8052 
8053 	set_thread(0);
8054 
8055 	ch1 = spdk_get_io_channel(nbdev);
8056 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
8057 
8058 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
8059 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr);
8060 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
8061 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
8062 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
8063 
8064 	set_thread(1);
8065 
8066 	ch2 = spdk_get_io_channel(nbdev);
8067 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
8068 
8069 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
8070 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr);
8071 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
8072 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
8073 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
8074 
8075 	/* Internal reset request started. */
8076 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
8077 	CU_ASSERT(rc == 0);
8078 	CU_ASSERT(nvme_ctrlr->resetting == true);
8079 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
8080 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
8081 
8082 	poll_thread_times(0, 3);
8083 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
8084 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
8085 
8086 	poll_thread_times(0, 1);
8087 	poll_thread_times(1, 1);
8088 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
8089 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
8090 
8091 	poll_thread_times(1, 1);
8092 	poll_thread_times(0, 1);
8093 	CU_ASSERT(ctrlr->adminq.is_connected == false);
8094 
8095 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
8096 	poll_thread_times(0, 2);
8097 	CU_ASSERT(ctrlr->adminq.is_connected == true);
8098 
8099 	poll_thread_times(0, 1);
8100 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
8101 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
8102 
8103 	poll_thread_times(1, 1);
8104 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
8105 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
8106 	CU_ASSERT(nvme_ctrlr->resetting == true);
8107 
8108 	set_thread(0);
8109 
8110 	/* Submit external reset request from bdev_io just one polling before completing
8111 	 * internal before reset request.
8112 	 *
8113 	 * Previously, there was a race window before clearing pending reset and completing
8114 	 * reset request. If external reset request was submitted in the window, it never woke up.
8115 	 *
8116 	 * The lost wake up bug was fixed and there is no such race window.
8117 	 *
8118 	 * Hence, submit external reset request as late as possible to avoid future degradation.
8119 	 */
8120 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, nbdev, ch1);
8121 	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
8122 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
8123 
8124 	/* For simplicity, skip freezing bdev channels. */
8125 	bdev_nvme_freeze_bdev_channel_done(nbdev, bio, 0);
8126 
8127 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&nvme_ctrlr->pending_resets)) == bdev_io);
8128 
8129 	poll_thread_times(0, 1);
8130 
8131 	/* External reset request should be cleared. */
8132 	CU_ASSERT(nvme_ctrlr->resetting == false);
8133 	CU_ASSERT(TAILQ_EMPTY(&nvme_ctrlr->pending_resets));
8134 
8135 	poll_threads();
8136 
8137 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
8138 
8139 	set_thread(0);
8140 
8141 	spdk_put_io_channel(ch1);
8142 
8143 	set_thread(1);
8144 
8145 	spdk_put_io_channel(ch2);
8146 
8147 	poll_threads();
8148 
8149 	set_thread(0);
8150 
8151 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
8152 	CU_ASSERT(rc == 0);
8153 
8154 	poll_threads();
8155 	spdk_delay_us(1000);
8156 	poll_threads();
8157 
8158 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
8159 
8160 	free(bdev_io);
8161 }
8162 
8163 int
8164 main(int argc, char **argv)
8165 {
8166 	CU_pSuite	suite = NULL;
8167 	unsigned int	num_failures;
8168 
8169 	CU_initialize_registry();
8170 
8171 	suite = CU_add_suite("nvme", NULL, NULL);
8172 
8173 	CU_ADD_TEST(suite, test_create_ctrlr);
8174 	CU_ADD_TEST(suite, test_reset_ctrlr);
8175 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
8176 	CU_ADD_TEST(suite, test_failover_ctrlr);
8177 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
8178 	CU_ADD_TEST(suite, test_pending_reset);
8179 	CU_ADD_TEST(suite, test_attach_ctrlr);
8180 	CU_ADD_TEST(suite, test_aer_cb);
8181 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
8182 	CU_ADD_TEST(suite, test_add_remove_trid);
8183 	CU_ADD_TEST(suite, test_abort);
8184 	CU_ADD_TEST(suite, test_get_io_qpair);
8185 	CU_ADD_TEST(suite, test_bdev_unregister);
8186 	CU_ADD_TEST(suite, test_compare_ns);
8187 	CU_ADD_TEST(suite, test_init_ana_log_page);
8188 	CU_ADD_TEST(suite, test_get_memory_domains);
8189 	CU_ADD_TEST(suite, test_reconnect_qpair);
8190 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
8191 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
8192 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
8193 	CU_ADD_TEST(suite, test_admin_path);
8194 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
8195 	CU_ADD_TEST(suite, test_find_io_path);
8196 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
8197 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
8198 	CU_ADD_TEST(suite, test_retry_io_count);
8199 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
8200 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
8201 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
8202 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
8203 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
8204 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
8205 	CU_ADD_TEST(suite, test_fail_path);
8206 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
8207 	CU_ADD_TEST(suite, test_ana_transition);
8208 	CU_ADD_TEST(suite, test_set_preferred_path);
8209 	CU_ADD_TEST(suite, test_find_next_io_path);
8210 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
8211 	CU_ADD_TEST(suite, test_disable_auto_failback);
8212 	CU_ADD_TEST(suite, test_set_multipath_policy);
8213 	CU_ADD_TEST(suite, test_uuid_generation);
8214 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
8215 	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
8216 	CU_ADD_TEST(suite, test_ctrlr_op_rpc);
8217 	CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc);
8218 	CU_ADD_TEST(suite, test_disable_enable_ctrlr);
8219 	CU_ADD_TEST(suite, test_delete_ctrlr_done);
8220 	CU_ADD_TEST(suite, test_ns_remove_during_reset);
8221 	CU_ADD_TEST(suite, test_io_path_is_current);
8222 	CU_ADD_TEST(suite, test_bdev_reset_abort_io);
8223 	CU_ADD_TEST(suite, test_race_between_clear_pending_resets_and_reset_ctrlr_complete);
8224 
8225 	allocate_threads(3);
8226 	set_thread(0);
8227 	bdev_nvme_library_init();
8228 	init_accel();
8229 
8230 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
8231 
8232 	set_thread(0);
8233 	bdev_nvme_library_fini();
8234 	fini_accel();
8235 	free_threads();
8236 
8237 	CU_cleanup_registry();
8238 
8239 	return num_failures;
8240 }
8241