xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 6895e9d97a9660a5a07a24215f22d23de8ae9315)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 
12 #include "common/lib/ut_multithread.c"
13 
14 #include "bdev/nvme/bdev_nvme.c"
15 
16 #include "unit/lib/json_mock.c"
17 
18 #include "bdev/nvme/bdev_mdns_client.c"
19 
20 static void *g_accel_p = (void *)0xdeadbeaf;
21 
22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
23 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
24 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
25 	     spdk_nvme_remove_cb remove_cb), NULL);
26 
27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
28 		enum spdk_nvme_transport_type trtype));
29 
30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
31 	    NULL);
32 
33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
34 
35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
36 		struct spdk_nvme_transport_id *trid), 0);
37 
38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
39 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
40 
41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0);
43 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr),
44 	    SPDK_ENV_NUMA_ID_ANY);
45 
46 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
47 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
48 
49 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
50 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
51 
52 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
53 
54 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
55 		int error_code, const char *msg));
56 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *,
57 	    (struct spdk_jsonrpc_request *request), NULL);
58 DEFINE_STUB_V(spdk_jsonrpc_end_result,
59 	      (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w));
60 
61 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
62 		size_t opts_size));
63 
64 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
65 		size_t opts_size), 0);
66 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL);
67 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL);
68 
69 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
70 
71 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
72 					enum spdk_bdev_reset_stat_mode mode));
73 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
74 				      struct spdk_bdev_io_stat *add));
75 
76 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr));
77 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL);
78 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k));
79 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL);
80 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0);
81 
82 int
83 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
84 				   struct spdk_memory_domain **domains, int array_size)
85 {
86 	int i, min_array_size;
87 
88 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
89 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
90 		for (i = 0; i < min_array_size; i++) {
91 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
92 		}
93 	}
94 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
95 
96 	return 0;
97 }
98 
99 struct spdk_io_channel *
100 spdk_accel_get_io_channel(void)
101 {
102 	return spdk_get_io_channel(g_accel_p);
103 }
104 
105 void
106 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
107 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
108 {
109 	/* Avoid warning that opts is used uninitialised */
110 	memset(opts, 0, opts_size);
111 }
112 
113 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"
114 
115 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN};
116 
117 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
118 	    (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts);
119 
120 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
121 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
122 
123 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
124 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
125 
126 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
127 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
128 
129 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
130 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
131 
132 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
133 
134 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
135 
136 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
137 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
138 
139 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
140 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
141 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
142 
143 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
144 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
145 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
146 
147 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, (
148 		    struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
149 		    struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf,
150 		    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
151 		    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
152 		    spdk_nvme_req_next_sge_cb next_sge_fn), 0);
153 
154 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
155 		size_t *size), 0);
156 
157 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
158 
159 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
160 
161 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
162 
163 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
164 
165 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns),
166 	    SPDK_NVME_16B_GUARD_PI);
167 
168 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
169 
170 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
171 
172 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
173 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
174 
175 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
176 
177 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
178 		char *name, size_t *size), 0);
179 
180 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
181 	    (struct spdk_nvme_ns *ns), 0);
182 
183 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
184 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
185 
186 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
187 	    (struct spdk_nvme_ns *ns), 0);
188 
189 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
190 	    (struct spdk_nvme_ns *ns), 0);
191 
192 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
193 	    (struct spdk_nvme_ns *ns), 0);
194 
195 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
196 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
197 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
198 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
199 
200 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
201 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
202 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
203 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
204 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
205 
206 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
207 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
208 	     void *payload, uint32_t payload_size, uint64_t slba,
209 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
210 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
211 
212 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
213 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
214 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
215 
216 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
217 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
218 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
219 
220 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
221 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
222 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
223 
224 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
225 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
226 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
227 
228 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
229 
230 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
231 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
232 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
233 
234 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
235 	    (const struct spdk_nvme_status *status), NULL);
236 
237 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
238 	    (const struct spdk_nvme_status *status), NULL);
239 
240 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
241 
242 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
243 
244 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
245 
246 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
247 
248 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
249 
250 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
251 		struct iovec *iov,
252 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
253 DEFINE_STUB(spdk_accel_append_crc32c, int,
254 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst,
255 	     struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx,
256 	     uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
257 DEFINE_STUB(spdk_accel_append_copy, int,
258 	    (struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
259 	     struct iovec *dst_iovs, uint32_t dst_iovcnt,
260 	     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
261 	     struct iovec *src_iovs, uint32_t src_iovcnt,
262 	     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
263 	     spdk_accel_step_cb cb_fn, void *cb_arg), 0);
264 DEFINE_STUB_V(spdk_accel_sequence_finish,
265 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
266 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
267 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
268 DEFINE_STUB(spdk_nvme_qpair_authenticate, int,
269 	    (struct spdk_nvme_qpair *qpair, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
270 DEFINE_STUB(spdk_nvme_ctrlr_authenticate, int,
271 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
272 DEFINE_STUB(spdk_nvme_ctrlr_set_keys, int,
273 	    (struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr_key_opts *opts), 0);
274 
275 struct ut_nvme_req {
276 	uint16_t			opc;
277 	spdk_nvme_cmd_cb		cb_fn;
278 	void				*cb_arg;
279 	struct spdk_nvme_cpl		cpl;
280 	TAILQ_ENTRY(ut_nvme_req)	tailq;
281 };
282 
283 struct spdk_nvme_ns {
284 	struct spdk_nvme_ctrlr		*ctrlr;
285 	uint32_t			id;
286 	bool				is_active;
287 	struct spdk_uuid		*uuid;
288 	enum spdk_nvme_ana_state	ana_state;
289 	enum spdk_nvme_csi		csi;
290 };
291 
292 struct spdk_nvme_qpair {
293 	struct spdk_nvme_ctrlr		*ctrlr;
294 	uint8_t				failure_reason;
295 	bool				is_connected;
296 	bool				in_completion_context;
297 	bool				delete_after_completion_context;
298 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
299 	uint32_t			num_outstanding_reqs;
300 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
301 	struct spdk_nvme_poll_group	*poll_group;
302 	void				*poll_group_tailq_head;
303 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
304 };
305 
306 struct spdk_nvme_ctrlr {
307 	uint32_t			num_ns;
308 	struct spdk_nvme_ns		*ns;
309 	struct spdk_nvme_ns_data	*nsdata;
310 	struct spdk_nvme_qpair		adminq;
311 	struct spdk_nvme_ctrlr_data	cdata;
312 	bool				attached;
313 	bool				is_failed;
314 	bool				fail_reset;
315 	bool				is_removed;
316 	struct spdk_nvme_transport_id	trid;
317 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
318 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
319 	struct spdk_nvme_ctrlr_opts	opts;
320 };
321 
322 struct spdk_nvme_poll_group {
323 	void				*ctx;
324 	struct spdk_nvme_accel_fn_table	accel_fn_table;
325 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
326 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
327 };
328 
329 struct spdk_nvme_probe_ctx {
330 	struct spdk_nvme_transport_id	trid;
331 	void				*cb_ctx;
332 	spdk_nvme_attach_cb		attach_cb;
333 	struct spdk_nvme_ctrlr		*init_ctrlr;
334 };
335 
336 uint32_t
337 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
338 {
339 	uint32_t nsid;
340 
341 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
342 		if (ctrlr->ns[nsid - 1].is_active) {
343 			return nsid;
344 		}
345 	}
346 
347 	return 0;
348 }
349 
350 uint32_t
351 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
352 {
353 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
354 		if (ctrlr->ns[nsid - 1].is_active) {
355 			return nsid;
356 		}
357 	}
358 
359 	return 0;
360 }
361 
362 uint32_t
363 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
364 {
365 	return qpair->num_outstanding_reqs;
366 }
367 
368 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
369 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
370 			g_ut_attached_ctrlrs);
371 static int g_ut_attach_ctrlr_status;
372 static size_t g_ut_attach_bdev_count;
373 static int g_ut_register_bdev_status;
374 static struct spdk_bdev *g_ut_registered_bdev;
375 static uint16_t g_ut_cntlid;
376 static struct nvme_path_id g_any_path = {};
377 
378 static void
379 ut_init_trid(struct spdk_nvme_transport_id *trid)
380 {
381 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
382 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
383 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
384 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
385 }
386 
387 static void
388 ut_init_trid2(struct spdk_nvme_transport_id *trid)
389 {
390 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
391 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
392 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
393 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
394 }
395 
396 static void
397 ut_init_trid3(struct spdk_nvme_transport_id *trid)
398 {
399 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
400 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
401 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
402 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
403 }
404 
405 static int
406 cmp_int(int a, int b)
407 {
408 	return a - b;
409 }
410 
411 int
412 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
413 			       const struct spdk_nvme_transport_id *trid2)
414 {
415 	int cmp;
416 
417 	/* We assume trtype is TCP for now. */
418 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
419 
420 	cmp = cmp_int(trid1->trtype, trid2->trtype);
421 	if (cmp) {
422 		return cmp;
423 	}
424 
425 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
426 	if (cmp) {
427 		return cmp;
428 	}
429 
430 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
431 	if (cmp) {
432 		return cmp;
433 	}
434 
435 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
436 	if (cmp) {
437 		return cmp;
438 	}
439 
440 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
441 	if (cmp) {
442 		return cmp;
443 	}
444 
445 	return 0;
446 }
447 
448 static struct spdk_nvme_ctrlr *
449 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
450 		bool ana_reporting, bool multipath)
451 {
452 	struct spdk_nvme_ctrlr *ctrlr;
453 	uint32_t i;
454 
455 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
456 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
457 			/* There is a ctrlr whose trid matches. */
458 			return NULL;
459 		}
460 	}
461 
462 	ctrlr = calloc(1, sizeof(*ctrlr));
463 	if (ctrlr == NULL) {
464 		return NULL;
465 	}
466 
467 	ctrlr->attached = true;
468 	ctrlr->adminq.ctrlr = ctrlr;
469 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
470 	ctrlr->adminq.is_connected = true;
471 
472 	if (num_ns != 0) {
473 		ctrlr->num_ns = num_ns;
474 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
475 		if (ctrlr->ns == NULL) {
476 			free(ctrlr);
477 			return NULL;
478 		}
479 
480 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
481 		if (ctrlr->nsdata == NULL) {
482 			free(ctrlr->ns);
483 			free(ctrlr);
484 			return NULL;
485 		}
486 
487 		for (i = 0; i < num_ns; i++) {
488 			ctrlr->ns[i].id = i + 1;
489 			ctrlr->ns[i].ctrlr = ctrlr;
490 			ctrlr->ns[i].is_active = true;
491 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
492 			ctrlr->nsdata[i].nsze = 1024;
493 			ctrlr->nsdata[i].nmic.can_share = multipath;
494 		}
495 
496 		ctrlr->cdata.nn = num_ns;
497 		ctrlr->cdata.mnan = num_ns;
498 		ctrlr->cdata.nanagrpid = num_ns;
499 	}
500 
501 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
502 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
503 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
504 	ctrlr->trid = *trid;
505 	TAILQ_INIT(&ctrlr->active_io_qpairs);
506 
507 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
508 
509 	return ctrlr;
510 }
511 
512 static void
513 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
514 {
515 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
516 
517 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
518 	free(ctrlr->nsdata);
519 	free(ctrlr->ns);
520 	free(ctrlr);
521 }
522 
523 static int
524 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
525 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
526 {
527 	struct ut_nvme_req *req;
528 
529 	req = calloc(1, sizeof(*req));
530 	if (req == NULL) {
531 		return -ENOMEM;
532 	}
533 
534 	req->opc = opc;
535 	req->cb_fn = cb_fn;
536 	req->cb_arg = cb_arg;
537 
538 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
539 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
540 
541 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
542 	qpair->num_outstanding_reqs++;
543 
544 	return 0;
545 }
546 
547 static struct ut_nvme_req *
548 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
549 {
550 	struct ut_nvme_req *req;
551 
552 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
553 		if (req->cb_arg == cb_arg) {
554 			break;
555 		}
556 	}
557 
558 	return req;
559 }
560 
561 static struct spdk_bdev_io *
562 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
563 		 struct spdk_io_channel *ch)
564 {
565 	struct spdk_bdev_io *bdev_io;
566 
567 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
568 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
569 	bdev_io->type = type;
570 	bdev_io->bdev = &nbdev->disk;
571 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
572 
573 	return bdev_io;
574 }
575 
576 static void
577 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
578 {
579 	bdev_io->u.bdev.iovs = &bdev_io->iov;
580 	bdev_io->u.bdev.iovcnt = 1;
581 
582 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
583 	bdev_io->iov.iov_len = 4096;
584 }
585 
586 static void
587 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
588 {
589 	if (ctrlr->is_failed) {
590 		free(ctrlr);
591 		return;
592 	}
593 
594 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
595 	if (probe_ctx->cb_ctx) {
596 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
597 	}
598 
599 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
600 
601 	if (probe_ctx->attach_cb) {
602 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
603 	}
604 }
605 
606 int
607 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
608 {
609 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
610 
611 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
612 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
613 			continue;
614 		}
615 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
616 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
617 	}
618 
619 	free(probe_ctx);
620 
621 	return 0;
622 }
623 
624 struct spdk_nvme_probe_ctx *
625 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
626 			const struct spdk_nvme_ctrlr_opts *opts,
627 			spdk_nvme_attach_cb attach_cb)
628 {
629 	struct spdk_nvme_probe_ctx *probe_ctx;
630 
631 	if (trid == NULL) {
632 		return NULL;
633 	}
634 
635 	probe_ctx = calloc(1, sizeof(*probe_ctx));
636 	if (probe_ctx == NULL) {
637 		return NULL;
638 	}
639 
640 	probe_ctx->trid = *trid;
641 	probe_ctx->cb_ctx = (void *)opts;
642 	probe_ctx->attach_cb = attach_cb;
643 
644 	return probe_ctx;
645 }
646 
647 int
648 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
649 {
650 	if (ctrlr->attached) {
651 		ut_detach_ctrlr(ctrlr);
652 	}
653 
654 	return 0;
655 }
656 
657 int
658 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
659 {
660 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
661 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
662 
663 	return 0;
664 }
665 
666 int
667 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
668 {
669 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
670 }
671 
672 void
673 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
674 {
675 	memset(opts, 0, opts_size);
676 
677 	snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN);
678 }
679 
680 const struct spdk_nvme_ctrlr_data *
681 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
682 {
683 	return &ctrlr->cdata;
684 }
685 
686 uint32_t
687 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
688 {
689 	return ctrlr->num_ns;
690 }
691 
692 struct spdk_nvme_ns *
693 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
694 {
695 	if (nsid < 1 || nsid > ctrlr->num_ns) {
696 		return NULL;
697 	}
698 
699 	return &ctrlr->ns[nsid - 1];
700 }
701 
702 bool
703 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
704 {
705 	if (nsid < 1 || nsid > ctrlr->num_ns) {
706 		return false;
707 	}
708 
709 	return ctrlr->ns[nsid - 1].is_active;
710 }
711 
712 union spdk_nvme_csts_register
713 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
714 {
715 	union spdk_nvme_csts_register csts;
716 
717 	csts.raw = 0;
718 
719 	return csts;
720 }
721 
722 union spdk_nvme_vs_register
723 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
724 {
725 	union spdk_nvme_vs_register vs;
726 
727 	vs.raw = 0;
728 
729 	return vs;
730 }
731 
732 struct spdk_nvme_qpair *
733 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
734 			       const struct spdk_nvme_io_qpair_opts *user_opts,
735 			       size_t opts_size)
736 {
737 	struct spdk_nvme_qpair *qpair;
738 
739 	qpair = calloc(1, sizeof(*qpair));
740 	if (qpair == NULL) {
741 		return NULL;
742 	}
743 
744 	qpair->ctrlr = ctrlr;
745 	TAILQ_INIT(&qpair->outstanding_reqs);
746 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
747 
748 	return qpair;
749 }
750 
751 static void
752 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
753 {
754 	struct spdk_nvme_poll_group *group = qpair->poll_group;
755 
756 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
757 
758 	qpair->poll_group_tailq_head = &group->connected_qpairs;
759 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
760 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
761 }
762 
763 static void
764 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
765 {
766 	struct spdk_nvme_poll_group *group = qpair->poll_group;
767 
768 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
769 
770 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
771 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
772 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
773 }
774 
775 int
776 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
777 				 struct spdk_nvme_qpair *qpair)
778 {
779 	if (qpair->is_connected) {
780 		return -EISCONN;
781 	}
782 
783 	qpair->is_connected = true;
784 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
785 
786 	if (qpair->poll_group) {
787 		nvme_poll_group_connect_qpair(qpair);
788 	}
789 
790 	return 0;
791 }
792 
793 void
794 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
795 {
796 	if (!qpair->is_connected) {
797 		return;
798 	}
799 
800 	qpair->is_connected = false;
801 
802 	if (qpair->poll_group != NULL) {
803 		nvme_poll_group_disconnect_qpair(qpair);
804 	}
805 }
806 
807 int
808 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
809 {
810 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
811 
812 	if (qpair->in_completion_context) {
813 		qpair->delete_after_completion_context = true;
814 		return 0;
815 	}
816 
817 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
818 
819 	if (qpair->poll_group != NULL) {
820 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
821 	}
822 
823 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
824 
825 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
826 
827 	free(qpair);
828 
829 	return 0;
830 }
831 
832 int
833 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
834 {
835 	if (ctrlr->fail_reset) {
836 		ctrlr->is_failed = true;
837 		return -EIO;
838 	}
839 
840 	ctrlr->adminq.is_connected = true;
841 	return 0;
842 }
843 
844 void
845 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
846 {
847 }
848 
849 int
850 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
851 {
852 	if (ctrlr->is_removed) {
853 		return -ENXIO;
854 	}
855 
856 	ctrlr->adminq.is_connected = false;
857 	ctrlr->is_failed = false;
858 
859 	return 0;
860 }
861 
862 void
863 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
864 {
865 	ctrlr->is_failed = true;
866 }
867 
868 bool
869 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
870 {
871 	return ctrlr->is_failed;
872 }
873 
874 spdk_nvme_qp_failure_reason
875 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
876 {
877 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
878 }
879 
880 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
881 				 sizeof(uint32_t))
882 static void
883 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
884 {
885 	struct spdk_nvme_ana_page ana_hdr;
886 	char _ana_desc[UT_ANA_DESC_SIZE];
887 	struct spdk_nvme_ana_group_descriptor *ana_desc;
888 	struct spdk_nvme_ns *ns;
889 	uint32_t i;
890 
891 	memset(&ana_hdr, 0, sizeof(ana_hdr));
892 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
893 
894 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
895 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
896 
897 	buf += sizeof(ana_hdr);
898 	length -= sizeof(ana_hdr);
899 
900 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
901 
902 	for (i = 0; i < ctrlr->num_ns; i++) {
903 		ns = &ctrlr->ns[i];
904 
905 		if (!ns->is_active) {
906 			continue;
907 		}
908 
909 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
910 
911 		ana_desc->ana_group_id = ns->id;
912 		ana_desc->num_of_nsid = 1;
913 		ana_desc->ana_state = ns->ana_state;
914 		ana_desc->nsid[0] = ns->id;
915 
916 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
917 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
918 
919 		buf += UT_ANA_DESC_SIZE;
920 		length -= UT_ANA_DESC_SIZE;
921 	}
922 }
923 
924 int
925 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
926 				 uint8_t log_page, uint32_t nsid,
927 				 void *payload, uint32_t payload_size,
928 				 uint64_t offset,
929 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
930 {
931 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
932 		SPDK_CU_ASSERT_FATAL(offset == 0);
933 		ut_create_ana_log_page(ctrlr, payload, payload_size);
934 	}
935 
936 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
937 				      cb_fn, cb_arg);
938 }
939 
940 int
941 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
942 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
943 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
944 {
945 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
946 }
947 
948 int
949 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
950 			      void *cmd_cb_arg,
951 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
952 {
953 	struct ut_nvme_req *req = NULL, *abort_req;
954 
955 	if (qpair == NULL) {
956 		qpair = &ctrlr->adminq;
957 	}
958 
959 	abort_req = calloc(1, sizeof(*abort_req));
960 	if (abort_req == NULL) {
961 		return -ENOMEM;
962 	}
963 
964 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
965 		if (req->cb_arg == cmd_cb_arg) {
966 			break;
967 		}
968 	}
969 
970 	if (req == NULL) {
971 		free(abort_req);
972 		return -ENOENT;
973 	}
974 
975 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
976 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
977 
978 	abort_req->opc = SPDK_NVME_OPC_ABORT;
979 	abort_req->cb_fn = cb_fn;
980 	abort_req->cb_arg = cb_arg;
981 
982 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
983 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
984 	abort_req->cpl.cdw0 = 0;
985 
986 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
987 	ctrlr->adminq.num_outstanding_reqs++;
988 
989 	return 0;
990 }
991 
992 int32_t
993 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
994 {
995 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
996 }
997 
998 uint32_t
999 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
1000 {
1001 	return ns->id;
1002 }
1003 
1004 struct spdk_nvme_ctrlr *
1005 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
1006 {
1007 	return ns->ctrlr;
1008 }
1009 
1010 static inline struct spdk_nvme_ns_data *
1011 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
1012 {
1013 	return &ns->ctrlr->nsdata[ns->id - 1];
1014 }
1015 
1016 const struct spdk_nvme_ns_data *
1017 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
1018 {
1019 	return _nvme_ns_get_data(ns);
1020 }
1021 
1022 uint64_t
1023 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
1024 {
1025 	return _nvme_ns_get_data(ns)->nsze;
1026 }
1027 
1028 const struct spdk_uuid *
1029 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
1030 {
1031 	return ns->uuid;
1032 }
1033 
1034 enum spdk_nvme_csi
1035 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
1036 	return ns->csi;
1037 }
1038 
1039 int
1040 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1041 			      void *metadata, uint64_t lba, uint32_t lba_count,
1042 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1043 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1044 {
1045 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1046 }
1047 
1048 int
1049 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1050 			       void *buffer, void *metadata, uint64_t lba,
1051 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1052 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1053 {
1054 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1055 }
1056 
1057 int
1058 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1059 			       uint64_t lba, uint32_t lba_count,
1060 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1061 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1062 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1063 			       uint16_t apptag_mask, uint16_t apptag)
1064 {
1065 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1066 }
1067 
1068 int
1069 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1070 				uint64_t lba, uint32_t lba_count,
1071 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1072 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1073 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1074 				uint16_t apptag_mask, uint16_t apptag)
1075 {
1076 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1077 }
1078 
1079 static bool g_ut_readv_ext_called;
1080 int
1081 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1082 			   uint64_t lba, uint32_t lba_count,
1083 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1084 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1085 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1086 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1087 {
1088 	g_ut_readv_ext_called = true;
1089 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1090 }
1091 
1092 static bool g_ut_read_ext_called;
1093 int
1094 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1095 			  uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1096 			  struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1097 {
1098 	g_ut_read_ext_called = true;
1099 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1100 }
1101 
1102 static bool g_ut_writev_ext_called;
1103 int
1104 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1105 			    uint64_t lba, uint32_t lba_count,
1106 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1107 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1108 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1109 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1110 {
1111 	g_ut_writev_ext_called = true;
1112 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1113 }
1114 
1115 static bool g_ut_write_ext_called;
1116 int
1117 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1118 			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1119 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1120 {
1121 	g_ut_write_ext_called = true;
1122 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1123 }
1124 
1125 int
1126 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1127 				  uint64_t lba, uint32_t lba_count,
1128 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1129 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1130 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1131 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1132 {
1133 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1134 }
1135 
1136 int
1137 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1138 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1139 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1140 {
1141 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1142 }
1143 
1144 int
1145 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1146 			      uint64_t lba, uint32_t lba_count,
1147 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1148 			      uint32_t io_flags)
1149 {
1150 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1151 }
1152 
1153 int
1154 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1155 		      const struct spdk_nvme_scc_source_range *ranges,
1156 		      uint16_t num_ranges, uint64_t dest_lba,
1157 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1158 {
1159 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1160 }
1161 
1162 struct spdk_nvme_poll_group *
1163 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1164 {
1165 	struct spdk_nvme_poll_group *group;
1166 
1167 	group = calloc(1, sizeof(*group));
1168 	if (group == NULL) {
1169 		return NULL;
1170 	}
1171 
1172 	group->ctx = ctx;
1173 	if (table != NULL) {
1174 		group->accel_fn_table = *table;
1175 	}
1176 	TAILQ_INIT(&group->connected_qpairs);
1177 	TAILQ_INIT(&group->disconnected_qpairs);
1178 
1179 	return group;
1180 }
1181 
1182 int
1183 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1184 {
1185 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1186 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1187 		return -EBUSY;
1188 	}
1189 
1190 	free(group);
1191 
1192 	return 0;
1193 }
1194 
1195 spdk_nvme_qp_failure_reason
1196 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1197 {
1198 	return qpair->failure_reason;
1199 }
1200 
1201 bool
1202 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair)
1203 {
1204 	return qpair->is_connected;
1205 }
1206 
1207 int32_t
1208 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1209 				    uint32_t max_completions)
1210 {
1211 	struct ut_nvme_req *req, *tmp;
1212 	uint32_t num_completions = 0;
1213 
1214 	if (!qpair->is_connected) {
1215 		return -ENXIO;
1216 	}
1217 
1218 	qpair->in_completion_context = true;
1219 
1220 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1221 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1222 		qpair->num_outstanding_reqs--;
1223 
1224 		req->cb_fn(req->cb_arg, &req->cpl);
1225 
1226 		free(req);
1227 		num_completions++;
1228 	}
1229 
1230 	qpair->in_completion_context = false;
1231 	if (qpair->delete_after_completion_context) {
1232 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1233 	}
1234 
1235 	return num_completions;
1236 }
1237 
1238 int64_t
1239 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1240 		uint32_t completions_per_qpair,
1241 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1242 {
1243 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1244 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1245 
1246 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1247 
1248 	if (disconnected_qpair_cb == NULL) {
1249 		return -EINVAL;
1250 	}
1251 
1252 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1253 		disconnected_qpair_cb(qpair, group->ctx);
1254 	}
1255 
1256 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1257 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1258 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1259 			/* Bump the number of completions so this counts as "busy" */
1260 			num_completions++;
1261 			continue;
1262 		}
1263 
1264 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1265 				    completions_per_qpair);
1266 		if (local_completions < 0 && error_reason == 0) {
1267 			error_reason = local_completions;
1268 		} else {
1269 			num_completions += local_completions;
1270 			assert(num_completions >= 0);
1271 		}
1272 	}
1273 
1274 	return error_reason ? error_reason : num_completions;
1275 }
1276 
1277 int
1278 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1279 			 struct spdk_nvme_qpair *qpair)
1280 {
1281 	CU_ASSERT(!qpair->is_connected);
1282 
1283 	qpair->poll_group = group;
1284 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1285 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1286 
1287 	return 0;
1288 }
1289 
1290 int
1291 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1292 			    struct spdk_nvme_qpair *qpair)
1293 {
1294 	CU_ASSERT(!qpair->is_connected);
1295 
1296 	if (qpair->poll_group == NULL) {
1297 		return -ENOENT;
1298 	}
1299 
1300 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1301 
1302 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1303 
1304 	qpair->poll_group = NULL;
1305 	qpair->poll_group_tailq_head = NULL;
1306 
1307 	return 0;
1308 }
1309 
1310 int
1311 spdk_bdev_register(struct spdk_bdev *bdev)
1312 {
1313 	g_ut_registered_bdev = bdev;
1314 
1315 	return g_ut_register_bdev_status;
1316 }
1317 
1318 void
1319 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1320 {
1321 	int rc;
1322 
1323 	rc = bdev->fn_table->destruct(bdev->ctxt);
1324 
1325 	if (bdev == g_ut_registered_bdev) {
1326 		g_ut_registered_bdev = NULL;
1327 	}
1328 
1329 	if (rc <= 0 && cb_fn != NULL) {
1330 		cb_fn(cb_arg, rc);
1331 	}
1332 }
1333 
1334 int
1335 spdk_bdev_open_ext(const char *bdev_name, bool write,
1336 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1337 		   struct spdk_bdev_desc **desc)
1338 {
1339 	if (g_ut_registered_bdev == NULL ||
1340 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1341 		return -ENODEV;
1342 	}
1343 
1344 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1345 
1346 	return 0;
1347 }
1348 
1349 struct spdk_bdev *
1350 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1351 {
1352 	return (struct spdk_bdev *)desc;
1353 }
1354 
1355 int
1356 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1357 {
1358 	bdev->blockcnt = size;
1359 
1360 	return 0;
1361 }
1362 
1363 struct spdk_io_channel *
1364 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1365 {
1366 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1367 }
1368 
1369 struct spdk_thread *
1370 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
1371 {
1372 	return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io));
1373 }
1374 
1375 void
1376 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1377 {
1378 	bdev_io->internal.status = status;
1379 	bdev_io->internal.f.in_submit_request = false;
1380 }
1381 
1382 void
1383 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1384 {
1385 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1386 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1387 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1388 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1389 	} else {
1390 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1391 	}
1392 
1393 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1394 	bdev_io->internal.error.nvme.sct = sct;
1395 	bdev_io->internal.error.nvme.sc = sc;
1396 
1397 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1398 }
1399 
1400 void
1401 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1402 {
1403 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1404 
1405 	ut_bdev_io_set_buf(bdev_io);
1406 
1407 	cb(ch, bdev_io, true);
1408 }
1409 
1410 static void
1411 test_create_ctrlr(void)
1412 {
1413 	struct spdk_nvme_transport_id trid = {};
1414 	struct spdk_nvme_ctrlr ctrlr = {};
1415 	int rc;
1416 
1417 	ut_init_trid(&trid);
1418 
1419 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1420 	CU_ASSERT(rc == 0);
1421 
1422 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1423 
1424 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1425 	CU_ASSERT(rc == 0);
1426 
1427 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1428 
1429 	poll_threads();
1430 	spdk_delay_us(1000);
1431 	poll_threads();
1432 
1433 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1434 }
1435 
1436 static void
1437 ut_check_hotplug_on_reset(void *cb_arg, int rc)
1438 {
1439 	bool *detect_remove = cb_arg;
1440 
1441 	CU_ASSERT(rc != 0);
1442 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1443 
1444 	*detect_remove = true;
1445 }
1446 
1447 static void
1448 test_reset_ctrlr(void)
1449 {
1450 	struct spdk_nvme_transport_id trid = {};
1451 	struct spdk_nvme_ctrlr ctrlr = {};
1452 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1453 	struct nvme_path_id *curr_trid;
1454 	struct spdk_io_channel *ch1, *ch2;
1455 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1456 	bool detect_remove;
1457 	int rc;
1458 
1459 	ut_init_trid(&trid);
1460 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1461 
1462 	set_thread(0);
1463 
1464 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1465 	CU_ASSERT(rc == 0);
1466 
1467 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1468 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1469 
1470 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1471 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1472 
1473 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1474 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1475 
1476 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1477 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1478 
1479 	set_thread(1);
1480 
1481 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1482 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1483 
1484 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1485 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1486 
1487 	/* Reset starts from thread 1. */
1488 	set_thread(1);
1489 
1490 	/* Case 1: ctrlr is already being destructed. */
1491 	nvme_ctrlr->destruct = true;
1492 
1493 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1494 	CU_ASSERT(rc == -ENXIO);
1495 
1496 	/* Case 2: reset is in progress. */
1497 	nvme_ctrlr->destruct = false;
1498 	nvme_ctrlr->resetting = true;
1499 
1500 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1501 	CU_ASSERT(rc == -EBUSY);
1502 
1503 	/* Case 3: reset completes successfully. */
1504 	nvme_ctrlr->resetting = false;
1505 	curr_trid->last_failed_tsc = spdk_get_ticks();
1506 	ctrlr.is_failed = true;
1507 
1508 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1509 	CU_ASSERT(rc == 0);
1510 	CU_ASSERT(nvme_ctrlr->resetting == true);
1511 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1512 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1513 
1514 	poll_thread_times(0, 3);
1515 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1516 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1517 
1518 	poll_thread_times(0, 1);
1519 	poll_thread_times(1, 1);
1520 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1521 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1522 	CU_ASSERT(ctrlr.is_failed == true);
1523 
1524 	poll_thread_times(1, 1);
1525 	poll_thread_times(0, 1);
1526 	CU_ASSERT(ctrlr.is_failed == false);
1527 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1528 
1529 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1530 	poll_thread_times(0, 2);
1531 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1532 
1533 	poll_thread_times(0, 1);
1534 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1535 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1536 
1537 	poll_thread_times(1, 1);
1538 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1539 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1540 	CU_ASSERT(nvme_ctrlr->resetting == true);
1541 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1542 
1543 	poll_thread_times(0, 2);
1544 	CU_ASSERT(nvme_ctrlr->resetting == true);
1545 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1546 	poll_thread_times(1, 1);
1547 	CU_ASSERT(nvme_ctrlr->resetting == true);
1548 	poll_thread_times(0, 1);
1549 	CU_ASSERT(nvme_ctrlr->resetting == false);
1550 
1551 	/* Case 4: ctrlr is already removed. */
1552 	ctrlr.is_removed = true;
1553 
1554 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1555 	CU_ASSERT(rc == 0);
1556 
1557 	detect_remove = false;
1558 	nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset;
1559 	nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove;
1560 
1561 	poll_threads();
1562 
1563 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
1564 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
1565 	CU_ASSERT(detect_remove == true);
1566 
1567 	ctrlr.is_removed = false;
1568 
1569 	spdk_put_io_channel(ch2);
1570 
1571 	set_thread(0);
1572 
1573 	spdk_put_io_channel(ch1);
1574 
1575 	poll_threads();
1576 
1577 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1578 	CU_ASSERT(rc == 0);
1579 
1580 	poll_threads();
1581 	spdk_delay_us(1000);
1582 	poll_threads();
1583 
1584 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1585 }
1586 
1587 static void
1588 test_race_between_reset_and_destruct_ctrlr(void)
1589 {
1590 	struct spdk_nvme_transport_id trid = {};
1591 	struct spdk_nvme_ctrlr ctrlr = {};
1592 	struct nvme_ctrlr *nvme_ctrlr;
1593 	struct spdk_io_channel *ch1, *ch2;
1594 	int rc;
1595 
1596 	ut_init_trid(&trid);
1597 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1598 
1599 	set_thread(0);
1600 
1601 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1602 	CU_ASSERT(rc == 0);
1603 
1604 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1605 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1606 
1607 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1608 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1609 
1610 	set_thread(1);
1611 
1612 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1613 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1614 
1615 	/* Reset starts from thread 1. */
1616 	set_thread(1);
1617 
1618 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1619 	CU_ASSERT(rc == 0);
1620 	CU_ASSERT(nvme_ctrlr->resetting == true);
1621 
1622 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1623 	set_thread(0);
1624 
1625 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1626 	CU_ASSERT(rc == 0);
1627 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1628 	CU_ASSERT(nvme_ctrlr->destruct == true);
1629 	CU_ASSERT(nvme_ctrlr->resetting == true);
1630 
1631 	poll_threads();
1632 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1633 	poll_threads();
1634 
1635 	/* Reset completed but ctrlr is not still destructed yet. */
1636 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1637 	CU_ASSERT(nvme_ctrlr->destruct == true);
1638 	CU_ASSERT(nvme_ctrlr->resetting == false);
1639 
1640 	/* New reset request is rejected. */
1641 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1642 	CU_ASSERT(rc == -ENXIO);
1643 
1644 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1645 	 * However there are two channels and destruct is not completed yet.
1646 	 */
1647 	poll_threads();
1648 
1649 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1650 
1651 	set_thread(0);
1652 
1653 	spdk_put_io_channel(ch1);
1654 
1655 	set_thread(1);
1656 
1657 	spdk_put_io_channel(ch2);
1658 
1659 	poll_threads();
1660 	spdk_delay_us(1000);
1661 	poll_threads();
1662 
1663 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1664 }
1665 
1666 static void
1667 test_failover_ctrlr(void)
1668 {
1669 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1670 	struct spdk_nvme_ctrlr ctrlr = {};
1671 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1672 	struct nvme_path_id *curr_trid, *next_trid;
1673 	struct spdk_io_channel *ch1, *ch2;
1674 	int rc;
1675 
1676 	ut_init_trid(&trid1);
1677 	ut_init_trid2(&trid2);
1678 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1679 
1680 	set_thread(0);
1681 
1682 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1683 	CU_ASSERT(rc == 0);
1684 
1685 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1686 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1687 
1688 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1689 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1690 
1691 	set_thread(1);
1692 
1693 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1694 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1695 
1696 	/* First, test one trid case. */
1697 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1698 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1699 
1700 	/* Failover starts from thread 1. */
1701 	set_thread(1);
1702 
1703 	/* Case 1: ctrlr is already being destructed. */
1704 	nvme_ctrlr->destruct = true;
1705 
1706 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1707 	CU_ASSERT(rc == -ENXIO);
1708 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1709 
1710 	/* Case 2: reset is in progress. */
1711 	nvme_ctrlr->destruct = false;
1712 	nvme_ctrlr->resetting = true;
1713 
1714 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1715 	CU_ASSERT(rc == -EINPROGRESS);
1716 
1717 	/* Case 3: reset completes successfully. */
1718 	nvme_ctrlr->resetting = false;
1719 
1720 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1721 	CU_ASSERT(rc == 0);
1722 
1723 	CU_ASSERT(nvme_ctrlr->resetting == true);
1724 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1725 
1726 	poll_threads();
1727 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1728 	poll_threads();
1729 
1730 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1731 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1732 
1733 	CU_ASSERT(nvme_ctrlr->resetting == false);
1734 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1735 
1736 	set_thread(0);
1737 
1738 	/* Second, test two trids case. */
1739 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1740 	CU_ASSERT(rc == 0);
1741 
1742 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1743 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1744 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1745 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1746 
1747 	/* Failover starts from thread 1. */
1748 	set_thread(1);
1749 
1750 	/* Case 4: reset is in progress. */
1751 	nvme_ctrlr->resetting = true;
1752 
1753 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1754 	CU_ASSERT(rc == -EINPROGRESS);
1755 
1756 	/* Case 5: failover completes successfully. */
1757 	nvme_ctrlr->resetting = false;
1758 
1759 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1760 	CU_ASSERT(rc == 0);
1761 
1762 	CU_ASSERT(nvme_ctrlr->resetting == true);
1763 
1764 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1765 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1766 	CU_ASSERT(next_trid != curr_trid);
1767 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1768 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1769 
1770 	poll_threads();
1771 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1772 	poll_threads();
1773 
1774 	CU_ASSERT(nvme_ctrlr->resetting == false);
1775 
1776 	spdk_put_io_channel(ch2);
1777 
1778 	set_thread(0);
1779 
1780 	spdk_put_io_channel(ch1);
1781 
1782 	poll_threads();
1783 
1784 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1785 	CU_ASSERT(rc == 0);
1786 
1787 	poll_threads();
1788 	spdk_delay_us(1000);
1789 	poll_threads();
1790 
1791 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1792 }
1793 
1794 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1795  *
1796  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1797  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1798  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1799  * have been active, i.e., the head of the list until the failover completed.
1800  * However trid3 was inserted to the head of the list by mistake.
1801  *
1802  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1803  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1804  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1805  * may be executed repeatedly before failover is executed. Hence this bug is real.
1806  *
1807  * The following test verifies the fix.
1808  */
1809 static void
1810 test_race_between_failover_and_add_secondary_trid(void)
1811 {
1812 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1813 	struct spdk_nvme_ctrlr ctrlr = {};
1814 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1815 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1816 	struct spdk_io_channel *ch1, *ch2;
1817 	int rc;
1818 
1819 	ut_init_trid(&trid1);
1820 	ut_init_trid2(&trid2);
1821 	ut_init_trid3(&trid3);
1822 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1823 
1824 	set_thread(0);
1825 
1826 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1827 	CU_ASSERT(rc == 0);
1828 
1829 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1830 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1831 
1832 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1833 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1834 
1835 	set_thread(1);
1836 
1837 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1838 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1839 
1840 	set_thread(0);
1841 
1842 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1843 	CU_ASSERT(rc == 0);
1844 
1845 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1846 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1847 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1848 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1849 	path_id2 = TAILQ_NEXT(path_id1, link);
1850 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1851 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1852 
1853 	ctrlr.fail_reset = true;
1854 
1855 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1856 	CU_ASSERT(rc == 0);
1857 
1858 	poll_threads();
1859 
1860 	CU_ASSERT(path_id1->last_failed_tsc != 0);
1861 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1862 
1863 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1864 	CU_ASSERT(rc == 0);
1865 
1866 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1867 	CU_ASSERT(rc == 0);
1868 
1869 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1870 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1871 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1872 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1873 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1874 	path_id3 = TAILQ_NEXT(path_id2, link);
1875 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1876 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1877 
1878 	poll_threads();
1879 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1880 	poll_threads();
1881 
1882 	spdk_put_io_channel(ch1);
1883 
1884 	set_thread(1);
1885 
1886 	spdk_put_io_channel(ch2);
1887 
1888 	poll_threads();
1889 
1890 	set_thread(0);
1891 
1892 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1893 	CU_ASSERT(rc == 0);
1894 
1895 	poll_threads();
1896 	spdk_delay_us(1000);
1897 	poll_threads();
1898 
1899 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1900 }
1901 
1902 static void
1903 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1904 {
1905 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1906 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1907 }
1908 
1909 static void
1910 test_pending_reset(void)
1911 {
1912 	struct spdk_nvme_transport_id trid = {};
1913 	struct spdk_nvme_ctrlr *ctrlr;
1914 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
1915 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1916 	const int STRING_SIZE = 32;
1917 	const char *attached_names[STRING_SIZE];
1918 	struct nvme_bdev *bdev;
1919 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1920 	struct spdk_io_channel *ch1, *ch2;
1921 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1922 	struct nvme_io_path *io_path1, *io_path2;
1923 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1924 	int rc;
1925 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
1926 
1927 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
1928 	bdev_opts.multipath = false;
1929 
1930 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1931 	ut_init_trid(&trid);
1932 
1933 	set_thread(0);
1934 
1935 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1936 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1937 
1938 	g_ut_attach_ctrlr_status = 0;
1939 	g_ut_attach_bdev_count = 1;
1940 
1941 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1942 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
1943 	CU_ASSERT(rc == 0);
1944 
1945 	spdk_delay_us(1000);
1946 	poll_threads();
1947 
1948 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1949 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1950 
1951 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1952 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1953 
1954 	ch1 = spdk_get_io_channel(bdev);
1955 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1956 
1957 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1958 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1959 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1960 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1961 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1962 
1963 	set_thread(1);
1964 
1965 	ch2 = spdk_get_io_channel(bdev);
1966 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1967 
1968 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1969 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1970 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1971 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1972 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1973 
1974 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1975 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1976 
1977 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1978 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1979 
1980 	/* The first reset request is submitted on thread 1, and the second reset request
1981 	 * is submitted on thread 0 while processing the first request.
1982 	 */
1983 	bdev_nvme_submit_request(ch2, first_bdev_io);
1984 
1985 	poll_thread_times(0, 1);
1986 	poll_thread_times(1, 2);
1987 
1988 	CU_ASSERT(nvme_ctrlr->resetting == true);
1989 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1990 
1991 	set_thread(0);
1992 
1993 	bdev_nvme_submit_request(ch1, second_bdev_io);
1994 
1995 	poll_thread_times(0, 1);
1996 	poll_thread_times(1, 1);
1997 	poll_thread_times(0, 2);
1998 	poll_thread_times(1, 1);
1999 	poll_thread_times(0, 1);
2000 
2001 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io);
2002 
2003 	poll_threads();
2004 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2005 	poll_threads();
2006 
2007 	CU_ASSERT(nvme_ctrlr->resetting == false);
2008 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2009 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2010 
2011 	/* The first reset request is submitted on thread 1, and the second reset request
2012 	 * is submitted on thread 0 while processing the first request.
2013 	 *
2014 	 * The difference from the above scenario is that the controller is removed while
2015 	 * processing the first request. Hence both reset requests should fail.
2016 	 */
2017 	set_thread(1);
2018 
2019 	bdev_nvme_submit_request(ch2, first_bdev_io);
2020 
2021 	poll_thread_times(0, 1);
2022 	poll_thread_times(1, 2);
2023 
2024 	CU_ASSERT(nvme_ctrlr->resetting == true);
2025 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
2026 
2027 	set_thread(0);
2028 
2029 	bdev_nvme_submit_request(ch1, second_bdev_io);
2030 
2031 	poll_thread_times(0, 1);
2032 	poll_thread_times(1, 1);
2033 	poll_thread_times(0, 2);
2034 	poll_thread_times(1, 1);
2035 	poll_thread_times(0, 1);
2036 
2037 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io);
2038 
2039 	ctrlr->fail_reset = true;
2040 
2041 	poll_threads();
2042 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2043 	poll_threads();
2044 
2045 	CU_ASSERT(nvme_ctrlr->resetting == false);
2046 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2047 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2048 
2049 	spdk_put_io_channel(ch1);
2050 
2051 	set_thread(1);
2052 
2053 	spdk_put_io_channel(ch2);
2054 
2055 	poll_threads();
2056 
2057 	set_thread(0);
2058 
2059 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2060 	CU_ASSERT(rc == 0);
2061 
2062 	poll_threads();
2063 	spdk_delay_us(1000);
2064 	poll_threads();
2065 
2066 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2067 
2068 	free(first_bdev_io);
2069 	free(second_bdev_io);
2070 }
2071 
2072 static void
2073 test_attach_ctrlr(void)
2074 {
2075 	struct spdk_nvme_transport_id trid = {};
2076 	struct spdk_nvme_ctrlr *ctrlr;
2077 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2078 	struct nvme_ctrlr *nvme_ctrlr;
2079 	const int STRING_SIZE = 32;
2080 	const char *attached_names[STRING_SIZE];
2081 	struct nvme_bdev *nbdev;
2082 	int rc;
2083 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2084 
2085 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2086 	bdev_opts.multipath = false;
2087 
2088 	set_thread(0);
2089 
2090 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2091 	ut_init_trid(&trid);
2092 
2093 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
2094 	 * by probe polling.
2095 	 */
2096 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2097 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2098 
2099 	ctrlr->is_failed = true;
2100 	g_ut_attach_ctrlr_status = -EIO;
2101 	g_ut_attach_bdev_count = 0;
2102 
2103 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2104 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2105 	CU_ASSERT(rc == 0);
2106 
2107 	spdk_delay_us(1000);
2108 	poll_threads();
2109 
2110 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2111 
2112 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2113 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2114 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2115 
2116 	g_ut_attach_ctrlr_status = 0;
2117 
2118 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2119 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2120 	CU_ASSERT(rc == 0);
2121 
2122 	spdk_delay_us(1000);
2123 	poll_threads();
2124 
2125 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2126 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2127 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2128 
2129 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2130 	CU_ASSERT(rc == 0);
2131 
2132 	poll_threads();
2133 	spdk_delay_us(1000);
2134 	poll_threads();
2135 
2136 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2137 
2138 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2139 	 * one nvme_bdev is created.
2140 	 */
2141 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2142 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2143 
2144 	g_ut_attach_bdev_count = 1;
2145 
2146 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2147 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2148 	CU_ASSERT(rc == 0);
2149 
2150 	spdk_delay_us(1000);
2151 	poll_threads();
2152 
2153 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2154 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2155 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2156 
2157 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2158 	attached_names[0] = NULL;
2159 
2160 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2161 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2162 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2163 
2164 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2165 	CU_ASSERT(rc == 0);
2166 
2167 	poll_threads();
2168 	spdk_delay_us(1000);
2169 	poll_threads();
2170 
2171 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2172 
2173 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2174 	 * created because creating one nvme_bdev failed.
2175 	 */
2176 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2177 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2178 
2179 	g_ut_register_bdev_status = -EINVAL;
2180 	g_ut_attach_bdev_count = 0;
2181 
2182 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2183 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2184 	CU_ASSERT(rc == 0);
2185 
2186 	spdk_delay_us(1000);
2187 	poll_threads();
2188 
2189 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2190 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2191 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2192 
2193 	CU_ASSERT(attached_names[0] == NULL);
2194 
2195 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2196 	CU_ASSERT(rc == 0);
2197 
2198 	poll_threads();
2199 	spdk_delay_us(1000);
2200 	poll_threads();
2201 
2202 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2203 
2204 	g_ut_register_bdev_status = 0;
2205 }
2206 
2207 static void
2208 test_aer_cb(void)
2209 {
2210 	struct spdk_nvme_transport_id trid = {};
2211 	struct spdk_nvme_ctrlr *ctrlr;
2212 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2213 	struct nvme_ctrlr *nvme_ctrlr;
2214 	struct nvme_bdev *bdev;
2215 	const int STRING_SIZE = 32;
2216 	const char *attached_names[STRING_SIZE];
2217 	union spdk_nvme_async_event_completion event = {};
2218 	struct spdk_nvme_cpl cpl = {};
2219 	int rc;
2220 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2221 
2222 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2223 	bdev_opts.multipath = false;
2224 
2225 	set_thread(0);
2226 
2227 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2228 	ut_init_trid(&trid);
2229 
2230 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2231 	 * namespaces are populated.
2232 	 */
2233 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2234 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2235 
2236 	ctrlr->ns[0].is_active = false;
2237 
2238 	g_ut_attach_ctrlr_status = 0;
2239 	g_ut_attach_bdev_count = 3;
2240 
2241 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2242 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2243 	CU_ASSERT(rc == 0);
2244 
2245 	spdk_delay_us(1000);
2246 	poll_threads();
2247 
2248 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2249 	poll_threads();
2250 
2251 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2252 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2253 
2254 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2255 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2256 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2257 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2258 
2259 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2260 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2261 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2262 
2263 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2264 	 * change the size of the 4th namespace.
2265 	 */
2266 	ctrlr->ns[0].is_active = true;
2267 	ctrlr->ns[2].is_active = false;
2268 	ctrlr->nsdata[3].nsze = 2048;
2269 
2270 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2271 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2272 	cpl.cdw0 = event.raw;
2273 
2274 	aer_cb(nvme_ctrlr, &cpl);
2275 
2276 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2277 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2278 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2279 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2280 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2281 
2282 	/* Change ANA state of active namespaces. */
2283 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2284 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2285 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2286 
2287 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2288 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2289 	cpl.cdw0 = event.raw;
2290 
2291 	aer_cb(nvme_ctrlr, &cpl);
2292 
2293 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2294 	poll_threads();
2295 
2296 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2297 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2298 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2299 
2300 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2301 	CU_ASSERT(rc == 0);
2302 
2303 	poll_threads();
2304 	spdk_delay_us(1000);
2305 	poll_threads();
2306 
2307 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2308 }
2309 
2310 static void
2311 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2312 			enum spdk_bdev_io_type io_type)
2313 {
2314 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2315 	struct nvme_io_path *io_path;
2316 	struct spdk_nvme_qpair *qpair;
2317 
2318 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2319 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2320 	qpair = io_path->qpair->qpair;
2321 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2322 
2323 	bdev_io->type = io_type;
2324 	bdev_io->internal.f.in_submit_request = true;
2325 
2326 	bdev_nvme_submit_request(ch, bdev_io);
2327 
2328 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2329 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2330 
2331 	poll_threads();
2332 
2333 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2334 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2335 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2336 }
2337 
2338 static void
2339 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2340 		   enum spdk_bdev_io_type io_type)
2341 {
2342 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2343 	struct nvme_io_path *io_path;
2344 	struct spdk_nvme_qpair *qpair;
2345 
2346 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2347 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2348 	qpair = io_path->qpair->qpair;
2349 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2350 
2351 	bdev_io->type = io_type;
2352 	bdev_io->internal.f.in_submit_request = true;
2353 
2354 	bdev_nvme_submit_request(ch, bdev_io);
2355 
2356 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2357 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2358 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2359 }
2360 
2361 static void
2362 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2363 {
2364 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2365 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2366 	struct ut_nvme_req *req;
2367 	struct nvme_io_path *io_path;
2368 	struct spdk_nvme_qpair *qpair;
2369 
2370 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2371 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2372 	qpair = io_path->qpair->qpair;
2373 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2374 
2375 	/* Only compare and write now. */
2376 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2377 	bdev_io->internal.f.in_submit_request = true;
2378 
2379 	bdev_nvme_submit_request(ch, bdev_io);
2380 
2381 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2382 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2383 	CU_ASSERT(bio->first_fused_submitted == true);
2384 
2385 	/* First outstanding request is compare operation. */
2386 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2387 	SPDK_CU_ASSERT_FATAL(req != NULL);
2388 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2389 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2390 
2391 	poll_threads();
2392 
2393 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2394 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2395 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2396 }
2397 
2398 static void
2399 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2400 			 struct spdk_nvme_ctrlr *ctrlr)
2401 {
2402 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2403 	bdev_io->internal.f.in_submit_request = true;
2404 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2405 
2406 	bdev_nvme_submit_request(ch, bdev_io);
2407 
2408 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2409 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2410 
2411 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2412 	poll_thread_times(1, 1);
2413 
2414 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2415 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2416 
2417 	poll_thread_times(0, 1);
2418 
2419 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2420 }
2421 
2422 static void
2423 test_submit_nvme_cmd(void)
2424 {
2425 	struct spdk_nvme_transport_id trid = {};
2426 	struct spdk_nvme_ctrlr *ctrlr;
2427 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2428 	struct nvme_ctrlr *nvme_ctrlr;
2429 	const int STRING_SIZE = 32;
2430 	const char *attached_names[STRING_SIZE];
2431 	struct nvme_bdev *bdev;
2432 	struct spdk_bdev_io *bdev_io;
2433 	struct spdk_io_channel *ch;
2434 	int rc;
2435 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2436 
2437 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2438 	bdev_opts.multipath = false;
2439 
2440 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2441 	ut_init_trid(&trid);
2442 
2443 	set_thread(1);
2444 
2445 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2446 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2447 
2448 	g_ut_attach_ctrlr_status = 0;
2449 	g_ut_attach_bdev_count = 1;
2450 
2451 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2452 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2453 	CU_ASSERT(rc == 0);
2454 
2455 	spdk_delay_us(1000);
2456 	poll_threads();
2457 
2458 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2459 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2460 
2461 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2462 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2463 
2464 	set_thread(0);
2465 
2466 	ch = spdk_get_io_channel(bdev);
2467 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2468 
2469 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2470 
2471 	bdev_io->u.bdev.iovs = NULL;
2472 
2473 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2474 
2475 	ut_bdev_io_set_buf(bdev_io);
2476 
2477 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2478 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2479 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2480 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2481 
2482 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2483 
2484 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2485 
2486 	/* Verify that ext NVME API is called when data is described by memory domain  */
2487 	g_ut_read_ext_called = false;
2488 	bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef;
2489 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2490 	CU_ASSERT(g_ut_read_ext_called == true);
2491 	g_ut_read_ext_called = false;
2492 	bdev_io->u.bdev.memory_domain = NULL;
2493 
2494 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2495 
2496 	free(bdev_io);
2497 
2498 	spdk_put_io_channel(ch);
2499 
2500 	poll_threads();
2501 
2502 	set_thread(1);
2503 
2504 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2505 	CU_ASSERT(rc == 0);
2506 
2507 	poll_threads();
2508 	spdk_delay_us(1000);
2509 	poll_threads();
2510 
2511 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2512 }
2513 
2514 static void
2515 test_add_remove_trid(void)
2516 {
2517 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2518 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2519 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2520 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2521 	const int STRING_SIZE = 32;
2522 	const char *attached_names[STRING_SIZE];
2523 	struct nvme_path_id *ctrid;
2524 	int rc;
2525 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2526 
2527 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2528 	bdev_opts.multipath = false;
2529 
2530 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2531 	ut_init_trid(&path1.trid);
2532 	ut_init_trid2(&path2.trid);
2533 	ut_init_trid3(&path3.trid);
2534 
2535 	set_thread(0);
2536 
2537 	g_ut_attach_ctrlr_status = 0;
2538 	g_ut_attach_bdev_count = 0;
2539 
2540 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2541 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2542 
2543 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2544 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2545 	CU_ASSERT(rc == 0);
2546 
2547 	spdk_delay_us(1000);
2548 	poll_threads();
2549 
2550 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2551 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2552 
2553 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2554 
2555 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2556 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2557 
2558 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2559 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2560 	CU_ASSERT(rc == 0);
2561 
2562 	spdk_delay_us(1000);
2563 	poll_threads();
2564 
2565 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2566 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2567 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2568 			break;
2569 		}
2570 	}
2571 	CU_ASSERT(ctrid != NULL);
2572 
2573 	/* trid3 is not in the registered list. */
2574 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2575 	CU_ASSERT(rc == -ENXIO);
2576 
2577 	/* trid2 is not used, and simply removed. */
2578 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2579 	CU_ASSERT(rc == 0);
2580 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2581 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2582 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2583 	}
2584 
2585 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2586 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2587 
2588 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2589 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2590 	CU_ASSERT(rc == 0);
2591 
2592 	spdk_delay_us(1000);
2593 	poll_threads();
2594 
2595 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2596 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2597 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2598 			break;
2599 		}
2600 	}
2601 	CU_ASSERT(ctrid != NULL);
2602 
2603 	/* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully.
2604 	 * If we add path2 again, path2 should be inserted between path1 and path3.
2605 	 * Then, we remove path2. It is not used, and simply removed.
2606 	 */
2607 	ctrid->last_failed_tsc = spdk_get_ticks() + 1;
2608 
2609 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2610 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2611 
2612 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2613 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2614 	CU_ASSERT(rc == 0);
2615 
2616 	spdk_delay_us(1000);
2617 	poll_threads();
2618 
2619 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2620 
2621 	ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link);
2622 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2623 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0);
2624 
2625 	ctrid = TAILQ_NEXT(ctrid, link);
2626 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2627 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0);
2628 
2629 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2630 	CU_ASSERT(rc == 0);
2631 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2632 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2633 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2634 	}
2635 
2636 	/* path1 is currently used and path3 is an alternative path.
2637 	 * If we remove path1, path is changed to path3.
2638 	 */
2639 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
2640 	CU_ASSERT(rc == 0);
2641 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2642 	CU_ASSERT(nvme_ctrlr->resetting == true);
2643 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2644 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2645 	}
2646 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2647 
2648 	poll_threads();
2649 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2650 	poll_threads();
2651 
2652 	CU_ASSERT(nvme_ctrlr->resetting == false);
2653 
2654 	/* path3 is the current and only path. If we remove path3, the corresponding
2655 	 * nvme_ctrlr is removed.
2656 	 */
2657 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2658 	CU_ASSERT(rc == 0);
2659 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2660 
2661 	poll_threads();
2662 	spdk_delay_us(1000);
2663 	poll_threads();
2664 
2665 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2666 
2667 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2668 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2669 
2670 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2671 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2672 	CU_ASSERT(rc == 0);
2673 
2674 	spdk_delay_us(1000);
2675 	poll_threads();
2676 
2677 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2678 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2679 
2680 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2681 
2682 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2683 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2684 
2685 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2686 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2687 	CU_ASSERT(rc == 0);
2688 
2689 	spdk_delay_us(1000);
2690 	poll_threads();
2691 
2692 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2693 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2694 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2695 			break;
2696 		}
2697 	}
2698 	CU_ASSERT(ctrid != NULL);
2699 
2700 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2701 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2702 	CU_ASSERT(rc == 0);
2703 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2704 
2705 	poll_threads();
2706 	spdk_delay_us(1000);
2707 	poll_threads();
2708 
2709 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2710 }
2711 
2712 static void
2713 test_abort(void)
2714 {
2715 	struct spdk_nvme_transport_id trid = {};
2716 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
2717 	struct spdk_nvme_ctrlr *ctrlr;
2718 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
2719 	struct nvme_ctrlr *nvme_ctrlr;
2720 	const int STRING_SIZE = 32;
2721 	const char *attached_names[STRING_SIZE];
2722 	struct nvme_bdev *bdev;
2723 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2724 	struct spdk_io_channel *ch1, *ch2;
2725 	struct nvme_bdev_channel *nbdev_ch1;
2726 	struct nvme_io_path *io_path1;
2727 	struct nvme_qpair *nvme_qpair1;
2728 	int rc;
2729 
2730 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2731 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2732 	 * are submitted on thread 1. Both should succeed.
2733 	 */
2734 
2735 	ut_init_trid(&trid);
2736 
2737 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2738 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2739 
2740 	g_ut_attach_ctrlr_status = 0;
2741 	g_ut_attach_bdev_count = 1;
2742 
2743 	set_thread(1);
2744 
2745 	opts.ctrlr_loss_timeout_sec = -1;
2746 	opts.reconnect_delay_sec = 1;
2747 	opts.multipath = false;
2748 
2749 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2750 				   attach_ctrlr_done, NULL, &dopts, &opts);
2751 	CU_ASSERT(rc == 0);
2752 
2753 	spdk_delay_us(1000);
2754 	poll_threads();
2755 
2756 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2757 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2758 
2759 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2760 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2761 
2762 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2763 	ut_bdev_io_set_buf(write_io);
2764 
2765 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2766 	ut_bdev_io_set_buf(fuse_io);
2767 
2768 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2769 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2770 
2771 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2772 
2773 	set_thread(0);
2774 
2775 	ch1 = spdk_get_io_channel(bdev);
2776 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2777 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2778 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2779 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2780 	nvme_qpair1 = io_path1->qpair;
2781 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2782 
2783 	set_thread(1);
2784 
2785 	ch2 = spdk_get_io_channel(bdev);
2786 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2787 
2788 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2789 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2790 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2791 
2792 	/* Aborting the already completed request should fail. */
2793 	write_io->internal.f.in_submit_request = true;
2794 	bdev_nvme_submit_request(ch1, write_io);
2795 	poll_threads();
2796 
2797 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2798 
2799 	abort_io->u.abort.bio_to_abort = write_io;
2800 	abort_io->internal.f.in_submit_request = true;
2801 
2802 	bdev_nvme_submit_request(ch1, abort_io);
2803 
2804 	poll_threads();
2805 
2806 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2807 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2808 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2809 
2810 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2811 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2812 
2813 	admin_io->internal.f.in_submit_request = true;
2814 	bdev_nvme_submit_request(ch1, admin_io);
2815 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2816 	poll_threads();
2817 
2818 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2819 
2820 	abort_io->u.abort.bio_to_abort = admin_io;
2821 	abort_io->internal.f.in_submit_request = true;
2822 
2823 	bdev_nvme_submit_request(ch2, abort_io);
2824 
2825 	poll_threads();
2826 
2827 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2828 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2829 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2830 
2831 	/* Aborting the write request should succeed. */
2832 	write_io->internal.f.in_submit_request = true;
2833 	bdev_nvme_submit_request(ch1, write_io);
2834 
2835 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2836 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2837 
2838 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2839 	abort_io->u.abort.bio_to_abort = write_io;
2840 	abort_io->internal.f.in_submit_request = true;
2841 
2842 	bdev_nvme_submit_request(ch1, abort_io);
2843 
2844 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2845 	poll_threads();
2846 
2847 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2848 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2849 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2850 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2851 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2852 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2853 
2854 	/* Aborting the fuse request should succeed. */
2855 	fuse_io->internal.f.in_submit_request = true;
2856 	bdev_nvme_submit_request(ch1, fuse_io);
2857 
2858 	CU_ASSERT(fuse_io->internal.f.in_submit_request == true);
2859 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2860 
2861 	abort_io->u.abort.bio_to_abort = fuse_io;
2862 	abort_io->internal.f.in_submit_request = true;
2863 
2864 	bdev_nvme_submit_request(ch1, abort_io);
2865 
2866 	spdk_delay_us(10000);
2867 	poll_threads();
2868 
2869 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2870 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2871 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2872 	CU_ASSERT(fuse_io->internal.f.in_submit_request == false);
2873 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2874 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2875 
2876 	/* Aborting the admin request should succeed. */
2877 	admin_io->internal.f.in_submit_request = true;
2878 	bdev_nvme_submit_request(ch1, admin_io);
2879 
2880 	CU_ASSERT(admin_io->internal.f.in_submit_request == true);
2881 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2882 
2883 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2884 	abort_io->u.abort.bio_to_abort = admin_io;
2885 	abort_io->internal.f.in_submit_request = true;
2886 
2887 	bdev_nvme_submit_request(ch2, abort_io);
2888 
2889 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2890 	poll_threads();
2891 
2892 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2893 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2894 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2895 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2896 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2897 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2898 
2899 	set_thread(0);
2900 
2901 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2902 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2903 	 * while resetting the nvme_ctrlr.
2904 	 */
2905 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2906 
2907 	poll_thread_times(0, 3);
2908 
2909 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2910 	CU_ASSERT(nvme_ctrlr->resetting == true);
2911 
2912 	write_io->internal.f.in_submit_request = true;
2913 
2914 	bdev_nvme_submit_request(ch1, write_io);
2915 
2916 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2917 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
2918 
2919 	/* Aborting the queued write request should succeed immediately. */
2920 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2921 	abort_io->u.abort.bio_to_abort = write_io;
2922 	abort_io->internal.f.in_submit_request = true;
2923 
2924 	bdev_nvme_submit_request(ch1, abort_io);
2925 
2926 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2927 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2928 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2929 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2930 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2931 
2932 	poll_threads();
2933 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2934 	poll_threads();
2935 
2936 	spdk_put_io_channel(ch1);
2937 
2938 	set_thread(1);
2939 
2940 	spdk_put_io_channel(ch2);
2941 
2942 	poll_threads();
2943 
2944 	free(write_io);
2945 	free(fuse_io);
2946 	free(admin_io);
2947 	free(abort_io);
2948 
2949 	set_thread(1);
2950 
2951 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2952 	CU_ASSERT(rc == 0);
2953 
2954 	poll_threads();
2955 	spdk_delay_us(1000);
2956 	poll_threads();
2957 
2958 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2959 }
2960 
2961 static void
2962 test_get_io_qpair(void)
2963 {
2964 	struct spdk_nvme_transport_id trid = {};
2965 	struct spdk_nvme_ctrlr ctrlr = {};
2966 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2967 	struct spdk_io_channel *ch;
2968 	struct nvme_ctrlr_channel *ctrlr_ch;
2969 	struct spdk_nvme_qpair *qpair;
2970 	int rc;
2971 
2972 	ut_init_trid(&trid);
2973 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2974 
2975 	set_thread(0);
2976 
2977 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2978 	CU_ASSERT(rc == 0);
2979 
2980 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2981 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2982 
2983 	ch = spdk_get_io_channel(nvme_ctrlr);
2984 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2985 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2986 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2987 
2988 	qpair = bdev_nvme_get_io_qpair(ch);
2989 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2990 
2991 	spdk_put_io_channel(ch);
2992 
2993 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2994 	CU_ASSERT(rc == 0);
2995 
2996 	poll_threads();
2997 	spdk_delay_us(1000);
2998 	poll_threads();
2999 
3000 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3001 }
3002 
3003 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
3004  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
3005  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
3006  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
3007  */
3008 static void
3009 test_bdev_unregister(void)
3010 {
3011 	struct spdk_nvme_transport_id trid = {};
3012 	struct spdk_nvme_ctrlr *ctrlr;
3013 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3014 	struct nvme_ctrlr *nvme_ctrlr;
3015 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3016 	const int STRING_SIZE = 32;
3017 	const char *attached_names[STRING_SIZE];
3018 	struct nvme_bdev *bdev1, *bdev2;
3019 	int rc;
3020 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3021 
3022 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3023 	bdev_opts.multipath = false;
3024 
3025 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3026 	ut_init_trid(&trid);
3027 
3028 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
3029 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3030 
3031 	g_ut_attach_ctrlr_status = 0;
3032 	g_ut_attach_bdev_count = 2;
3033 
3034 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3035 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3036 	CU_ASSERT(rc == 0);
3037 
3038 	spdk_delay_us(1000);
3039 	poll_threads();
3040 
3041 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3042 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3043 
3044 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
3045 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3046 
3047 	bdev1 = nvme_ns1->bdev;
3048 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3049 
3050 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
3051 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3052 
3053 	bdev2 = nvme_ns2->bdev;
3054 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3055 
3056 	bdev_nvme_destruct(&bdev1->disk);
3057 	bdev_nvme_destruct(&bdev2->disk);
3058 
3059 	poll_threads();
3060 
3061 	CU_ASSERT(nvme_ns1->bdev == NULL);
3062 	CU_ASSERT(nvme_ns2->bdev == NULL);
3063 
3064 	nvme_ctrlr->destruct = true;
3065 	_nvme_ctrlr_destruct(nvme_ctrlr);
3066 
3067 	poll_threads();
3068 	spdk_delay_us(1000);
3069 	poll_threads();
3070 
3071 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3072 }
3073 
3074 static void
3075 test_compare_ns(void)
3076 {
3077 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
3078 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
3079 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
3080 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
3081 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
3082 
3083 	/* No IDs are defined. */
3084 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3085 
3086 	/* Only EUI64 are defined and not matched. */
3087 	nsdata1.eui64 = 0xABCDEF0123456789;
3088 	nsdata2.eui64 = 0xBBCDEF0123456789;
3089 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3090 
3091 	/* Only EUI64 are defined and matched. */
3092 	nsdata2.eui64 = 0xABCDEF0123456789;
3093 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3094 
3095 	/* Only NGUID are defined and not matched. */
3096 	nsdata1.eui64 = 0x0;
3097 	nsdata2.eui64 = 0x0;
3098 	nsdata1.nguid[0] = 0x12;
3099 	nsdata2.nguid[0] = 0x10;
3100 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3101 
3102 	/* Only NGUID are defined and matched. */
3103 	nsdata2.nguid[0] = 0x12;
3104 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3105 
3106 	/* Only UUID are defined and not matched. */
3107 	nsdata1.nguid[0] = 0x0;
3108 	nsdata2.nguid[0] = 0x0;
3109 	ns1.uuid = &uuid1;
3110 	ns2.uuid = &uuid2;
3111 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3112 
3113 	/* Only one UUID is defined. */
3114 	ns1.uuid = NULL;
3115 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3116 
3117 	/* Only UUID are defined and matched. */
3118 	ns1.uuid = &uuid2;
3119 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3120 
3121 	/* All EUI64, NGUID, and UUID are defined and matched. */
3122 	nsdata1.eui64 = 0x123456789ABCDEF;
3123 	nsdata2.eui64 = 0x123456789ABCDEF;
3124 	nsdata1.nguid[15] = 0x34;
3125 	nsdata2.nguid[15] = 0x34;
3126 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3127 
3128 	/* CSI are not matched. */
3129 	ns1.csi = SPDK_NVME_CSI_ZNS;
3130 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3131 }
3132 
3133 static void
3134 test_init_ana_log_page(void)
3135 {
3136 	struct spdk_nvme_transport_id trid = {};
3137 	struct spdk_nvme_ctrlr *ctrlr;
3138 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3139 	struct nvme_ctrlr *nvme_ctrlr;
3140 	const int STRING_SIZE = 32;
3141 	const char *attached_names[STRING_SIZE];
3142 	int rc;
3143 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3144 
3145 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3146 	bdev_opts.multipath = false;
3147 
3148 	set_thread(0);
3149 
3150 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3151 	ut_init_trid(&trid);
3152 
3153 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
3154 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3155 
3156 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3157 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3158 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3159 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3160 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3161 
3162 	g_ut_attach_ctrlr_status = 0;
3163 	g_ut_attach_bdev_count = 5;
3164 
3165 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3166 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3167 	CU_ASSERT(rc == 0);
3168 
3169 	spdk_delay_us(1000);
3170 	poll_threads();
3171 
3172 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3173 	poll_threads();
3174 
3175 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3176 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3177 
3178 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3179 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3180 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3181 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3182 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3183 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3184 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3185 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3186 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3187 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3188 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3189 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3190 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3191 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3192 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3193 
3194 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3195 	CU_ASSERT(rc == 0);
3196 
3197 	poll_threads();
3198 	spdk_delay_us(1000);
3199 	poll_threads();
3200 
3201 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3202 }
3203 
3204 static void
3205 init_accel(void)
3206 {
3207 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3208 				sizeof(int), "accel_p");
3209 }
3210 
3211 static void
3212 fini_accel(void)
3213 {
3214 	spdk_io_device_unregister(g_accel_p, NULL);
3215 }
3216 
3217 static void
3218 test_get_memory_domains(void)
3219 {
3220 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3221 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3222 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3223 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3224 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3225 	struct spdk_memory_domain *domains[4] = {};
3226 	int rc = 0;
3227 
3228 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3229 
3230 	/* nvme controller doesn't have memory domains */
3231 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3232 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3233 	CU_ASSERT(rc == 0);
3234 	CU_ASSERT(domains[0] == NULL);
3235 	CU_ASSERT(domains[1] == NULL);
3236 
3237 	/* nvme controller has a memory domain */
3238 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3239 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3240 	CU_ASSERT(rc == 1);
3241 	CU_ASSERT(domains[0] != NULL);
3242 	memset(domains, 0, sizeof(domains));
3243 
3244 	/* multipath, 2 controllers report 1 memory domain each */
3245 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3246 
3247 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3248 	CU_ASSERT(rc == 2);
3249 	CU_ASSERT(domains[0] != NULL);
3250 	CU_ASSERT(domains[1] != NULL);
3251 	memset(domains, 0, sizeof(domains));
3252 
3253 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3254 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3255 	CU_ASSERT(rc == 2);
3256 
3257 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3258 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3259 	CU_ASSERT(rc == 2);
3260 	CU_ASSERT(domains[0] == NULL);
3261 	CU_ASSERT(domains[1] == NULL);
3262 
3263 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3264 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3265 	CU_ASSERT(rc == 2);
3266 	CU_ASSERT(domains[0] != NULL);
3267 	CU_ASSERT(domains[1] == NULL);
3268 	memset(domains, 0, sizeof(domains));
3269 
3270 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3271 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3272 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3273 	CU_ASSERT(rc == 4);
3274 	CU_ASSERT(domains[0] != NULL);
3275 	CU_ASSERT(domains[1] != NULL);
3276 	CU_ASSERT(domains[2] != NULL);
3277 	CU_ASSERT(domains[3] != NULL);
3278 	memset(domains, 0, sizeof(domains));
3279 
3280 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3281 	 * Array size is less than the number of memory domains */
3282 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3283 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3284 	CU_ASSERT(rc == 4);
3285 	CU_ASSERT(domains[0] != NULL);
3286 	CU_ASSERT(domains[1] != NULL);
3287 	CU_ASSERT(domains[2] != NULL);
3288 	CU_ASSERT(domains[3] == NULL);
3289 	memset(domains, 0, sizeof(domains));
3290 
3291 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3292 }
3293 
3294 static void
3295 test_reconnect_qpair(void)
3296 {
3297 	struct spdk_nvme_transport_id trid = {};
3298 	struct spdk_nvme_ctrlr *ctrlr;
3299 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3300 	struct nvme_ctrlr *nvme_ctrlr;
3301 	const int STRING_SIZE = 32;
3302 	const char *attached_names[STRING_SIZE];
3303 	struct nvme_bdev *bdev;
3304 	struct spdk_io_channel *ch1, *ch2;
3305 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3306 	struct nvme_io_path *io_path1, *io_path2;
3307 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3308 	int rc;
3309 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3310 
3311 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3312 	bdev_opts.multipath = false;
3313 
3314 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3315 	ut_init_trid(&trid);
3316 
3317 	set_thread(0);
3318 
3319 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3320 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3321 
3322 	g_ut_attach_ctrlr_status = 0;
3323 	g_ut_attach_bdev_count = 1;
3324 
3325 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3326 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3327 	CU_ASSERT(rc == 0);
3328 
3329 	spdk_delay_us(1000);
3330 	poll_threads();
3331 
3332 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3333 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3334 
3335 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3336 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3337 
3338 	ch1 = spdk_get_io_channel(bdev);
3339 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3340 
3341 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3342 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3343 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3344 	nvme_qpair1 = io_path1->qpair;
3345 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3346 
3347 	set_thread(1);
3348 
3349 	ch2 = spdk_get_io_channel(bdev);
3350 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3351 
3352 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3353 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3354 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3355 	nvme_qpair2 = io_path2->qpair;
3356 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3357 
3358 	/* If a qpair is disconnected, it is freed and then reconnected via
3359 	 * resetting the corresponding nvme_ctrlr.
3360 	 */
3361 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3362 	ctrlr->is_failed = true;
3363 
3364 	poll_thread_times(1, 3);
3365 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3366 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3367 	CU_ASSERT(nvme_ctrlr->resetting == true);
3368 
3369 	poll_thread_times(0, 3);
3370 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3371 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3372 	CU_ASSERT(ctrlr->is_failed == true);
3373 
3374 	poll_thread_times(1, 2);
3375 	poll_thread_times(0, 1);
3376 	CU_ASSERT(ctrlr->is_failed == false);
3377 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3378 
3379 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3380 	poll_thread_times(0, 2);
3381 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3382 
3383 	poll_thread_times(0, 1);
3384 	poll_thread_times(1, 1);
3385 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3386 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3387 	CU_ASSERT(nvme_ctrlr->resetting == true);
3388 
3389 	poll_thread_times(0, 2);
3390 	poll_thread_times(1, 1);
3391 	poll_thread_times(0, 1);
3392 	CU_ASSERT(nvme_ctrlr->resetting == false);
3393 
3394 	poll_threads();
3395 
3396 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3397 	 * fails, the qpair is just freed.
3398 	 */
3399 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3400 	ctrlr->is_failed = true;
3401 	ctrlr->fail_reset = true;
3402 
3403 	poll_thread_times(1, 3);
3404 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3405 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3406 	CU_ASSERT(nvme_ctrlr->resetting == true);
3407 
3408 	poll_thread_times(0, 3);
3409 	poll_thread_times(1, 1);
3410 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3411 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3412 	CU_ASSERT(ctrlr->is_failed == true);
3413 
3414 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3415 	poll_thread_times(0, 3);
3416 	poll_thread_times(1, 1);
3417 	poll_thread_times(0, 1);
3418 	CU_ASSERT(ctrlr->is_failed == true);
3419 	CU_ASSERT(nvme_ctrlr->resetting == false);
3420 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3421 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3422 
3423 	poll_threads();
3424 
3425 	spdk_put_io_channel(ch2);
3426 
3427 	set_thread(0);
3428 
3429 	spdk_put_io_channel(ch1);
3430 
3431 	poll_threads();
3432 
3433 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3434 	CU_ASSERT(rc == 0);
3435 
3436 	poll_threads();
3437 	spdk_delay_us(1000);
3438 	poll_threads();
3439 
3440 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3441 }
3442 
3443 static void
3444 test_create_bdev_ctrlr(void)
3445 {
3446 	struct nvme_path_id path1 = {}, path2 = {};
3447 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3448 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3449 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3450 	const int STRING_SIZE = 32;
3451 	const char *attached_names[STRING_SIZE];
3452 	int rc;
3453 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3454 
3455 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3456 	bdev_opts.multipath = true;
3457 
3458 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3459 	ut_init_trid(&path1.trid);
3460 	ut_init_trid2(&path2.trid);
3461 
3462 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3463 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3464 
3465 	g_ut_attach_ctrlr_status = 0;
3466 	g_ut_attach_bdev_count = 0;
3467 
3468 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3469 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3470 	CU_ASSERT(rc == 0);
3471 
3472 	spdk_delay_us(1000);
3473 	poll_threads();
3474 
3475 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3476 	poll_threads();
3477 
3478 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3479 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3480 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3481 
3482 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3483 	g_ut_attach_ctrlr_status = -EINVAL;
3484 
3485 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3486 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3487 
3488 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3489 
3490 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3491 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3492 	CU_ASSERT(rc == 0);
3493 
3494 	spdk_delay_us(1000);
3495 	poll_threads();
3496 
3497 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3498 	poll_threads();
3499 
3500 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3501 
3502 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3503 	g_ut_attach_ctrlr_status = 0;
3504 
3505 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3506 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3507 
3508 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3509 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3510 	CU_ASSERT(rc == 0);
3511 
3512 	spdk_delay_us(1000);
3513 	poll_threads();
3514 
3515 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3516 	poll_threads();
3517 
3518 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3519 
3520 	/* Delete two ctrlrs at once. */
3521 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3522 	CU_ASSERT(rc == 0);
3523 
3524 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3525 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3526 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3527 
3528 	poll_threads();
3529 	spdk_delay_us(1000);
3530 	poll_threads();
3531 
3532 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3533 
3534 	/* Add two ctrlrs and delete one by one. */
3535 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3536 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3537 
3538 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3539 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3540 
3541 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3542 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3543 	CU_ASSERT(rc == 0);
3544 
3545 	spdk_delay_us(1000);
3546 	poll_threads();
3547 
3548 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3549 	poll_threads();
3550 
3551 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3552 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3553 	CU_ASSERT(rc == 0);
3554 
3555 	spdk_delay_us(1000);
3556 	poll_threads();
3557 
3558 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3559 	poll_threads();
3560 
3561 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3562 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3563 
3564 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3565 	CU_ASSERT(rc == 0);
3566 
3567 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3568 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3569 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3570 
3571 	poll_threads();
3572 	spdk_delay_us(1000);
3573 	poll_threads();
3574 
3575 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3576 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3577 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3578 
3579 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3580 	CU_ASSERT(rc == 0);
3581 
3582 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3583 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3584 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3585 
3586 	poll_threads();
3587 	spdk_delay_us(1000);
3588 	poll_threads();
3589 
3590 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3591 }
3592 
3593 static struct nvme_ns *
3594 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3595 {
3596 	struct nvme_ns *nvme_ns;
3597 
3598 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3599 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3600 			return nvme_ns;
3601 		}
3602 	}
3603 
3604 	return NULL;
3605 }
3606 
3607 static void
3608 test_add_multi_ns_to_bdev(void)
3609 {
3610 	struct nvme_path_id path1 = {}, path2 = {};
3611 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3612 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3613 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3614 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3615 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3616 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3617 	const int STRING_SIZE = 32;
3618 	const char *attached_names[STRING_SIZE];
3619 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3620 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3621 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3622 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3623 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3624 	int rc;
3625 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3626 
3627 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3628 	bdev_opts.multipath = true;
3629 
3630 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3631 	ut_init_trid(&path1.trid);
3632 	ut_init_trid2(&path2.trid);
3633 
3634 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3635 
3636 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3637 	 * namespaces are populated.
3638 	 */
3639 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3640 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3641 
3642 	ctrlr1->ns[1].is_active = false;
3643 	ctrlr1->ns[4].is_active = false;
3644 	ctrlr1->ns[0].uuid = &uuid1;
3645 	ctrlr1->ns[2].uuid = &uuid3;
3646 	ctrlr1->ns[3].uuid = &uuid4;
3647 
3648 	g_ut_attach_ctrlr_status = 0;
3649 	g_ut_attach_bdev_count = 3;
3650 
3651 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3652 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3653 	CU_ASSERT(rc == 0);
3654 
3655 	spdk_delay_us(1000);
3656 	poll_threads();
3657 
3658 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3659 	poll_threads();
3660 
3661 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3662 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3663 	 * adding 4th namespace to a bdev should fail.
3664 	 */
3665 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3666 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3667 
3668 	ctrlr2->ns[2].is_active = false;
3669 	ctrlr2->ns[4].is_active = false;
3670 	ctrlr2->ns[0].uuid = &uuid1;
3671 	ctrlr2->ns[1].uuid = &uuid2;
3672 	ctrlr2->ns[3].uuid = &uuid44;
3673 
3674 	g_ut_attach_ctrlr_status = 0;
3675 	g_ut_attach_bdev_count = 2;
3676 
3677 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3678 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3679 	CU_ASSERT(rc == 0);
3680 
3681 	spdk_delay_us(1000);
3682 	poll_threads();
3683 
3684 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3685 	poll_threads();
3686 
3687 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3688 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3689 
3690 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3691 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3692 
3693 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3694 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3695 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3696 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3697 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3698 
3699 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3700 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3701 
3702 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3703 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3704 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3705 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3706 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3707 
3708 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3709 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3710 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3711 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3712 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3713 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3714 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3715 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3716 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3717 
3718 	CU_ASSERT(bdev1->ref == 2);
3719 	CU_ASSERT(bdev2->ref == 1);
3720 	CU_ASSERT(bdev3->ref == 1);
3721 	CU_ASSERT(bdev4->ref == 1);
3722 
3723 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3724 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3725 	CU_ASSERT(rc == 0);
3726 
3727 	poll_threads();
3728 	spdk_delay_us(1000);
3729 	poll_threads();
3730 
3731 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3732 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3733 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2);
3734 
3735 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3736 	CU_ASSERT(rc == 0);
3737 
3738 	poll_threads();
3739 	spdk_delay_us(1000);
3740 	poll_threads();
3741 
3742 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3743 
3744 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3745 	 * can be deleted when the bdev subsystem shutdown.
3746 	 */
3747 	g_ut_attach_bdev_count = 1;
3748 
3749 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3750 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3751 
3752 	ctrlr1->ns[0].uuid = &uuid1;
3753 
3754 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3755 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3756 	CU_ASSERT(rc == 0);
3757 
3758 	spdk_delay_us(1000);
3759 	poll_threads();
3760 
3761 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3762 	poll_threads();
3763 
3764 	ut_init_trid2(&path2.trid);
3765 
3766 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3767 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3768 
3769 	ctrlr2->ns[0].uuid = &uuid1;
3770 
3771 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3772 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3773 	CU_ASSERT(rc == 0);
3774 
3775 	spdk_delay_us(1000);
3776 	poll_threads();
3777 
3778 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3779 	poll_threads();
3780 
3781 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3782 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3783 
3784 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3785 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3786 
3787 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3788 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3789 
3790 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3791 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3792 
3793 	/* Check if a nvme_bdev has two nvme_ns. */
3794 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3795 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3796 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3797 
3798 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3799 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3800 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3801 
3802 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3803 	bdev_nvme_destruct(&bdev1->disk);
3804 
3805 	poll_threads();
3806 
3807 	CU_ASSERT(nvme_ns1->bdev == NULL);
3808 	CU_ASSERT(nvme_ns2->bdev == NULL);
3809 
3810 	nvme_ctrlr1->destruct = true;
3811 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3812 
3813 	poll_threads();
3814 	spdk_delay_us(1000);
3815 	poll_threads();
3816 
3817 	nvme_ctrlr2->destruct = true;
3818 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3819 
3820 	poll_threads();
3821 	spdk_delay_us(1000);
3822 	poll_threads();
3823 
3824 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3825 }
3826 
3827 static void
3828 test_add_multi_io_paths_to_nbdev_ch(void)
3829 {
3830 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3831 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3832 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3833 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3834 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3835 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3836 	const int STRING_SIZE = 32;
3837 	const char *attached_names[STRING_SIZE];
3838 	struct nvme_bdev *bdev;
3839 	struct spdk_io_channel *ch;
3840 	struct nvme_bdev_channel *nbdev_ch;
3841 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3842 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3843 	int rc;
3844 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3845 
3846 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3847 	bdev_opts.multipath = true;
3848 
3849 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3850 	ut_init_trid(&path1.trid);
3851 	ut_init_trid2(&path2.trid);
3852 	ut_init_trid3(&path3.trid);
3853 	g_ut_attach_ctrlr_status = 0;
3854 	g_ut_attach_bdev_count = 1;
3855 
3856 	set_thread(1);
3857 
3858 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3859 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3860 
3861 	ctrlr1->ns[0].uuid = &uuid1;
3862 
3863 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3864 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3865 	CU_ASSERT(rc == 0);
3866 
3867 	spdk_delay_us(1000);
3868 	poll_threads();
3869 
3870 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3871 	poll_threads();
3872 
3873 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3874 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3875 
3876 	ctrlr2->ns[0].uuid = &uuid1;
3877 
3878 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3879 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3880 	CU_ASSERT(rc == 0);
3881 
3882 	spdk_delay_us(1000);
3883 	poll_threads();
3884 
3885 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3886 	poll_threads();
3887 
3888 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3889 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3890 
3891 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3892 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3893 
3894 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3895 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3896 
3897 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3898 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3899 
3900 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3901 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3902 
3903 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3904 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3905 
3906 	set_thread(0);
3907 
3908 	ch = spdk_get_io_channel(bdev);
3909 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3910 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3911 
3912 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3913 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3914 
3915 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3916 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3917 
3918 	set_thread(1);
3919 
3920 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3921 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3922 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3923 
3924 	ctrlr3->ns[0].uuid = &uuid1;
3925 
3926 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3927 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3928 	CU_ASSERT(rc == 0);
3929 
3930 	spdk_delay_us(1000);
3931 	poll_threads();
3932 
3933 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3934 	poll_threads();
3935 
3936 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn);
3937 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3938 
3939 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3940 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3941 
3942 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3943 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3944 
3945 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3946 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3947 	CU_ASSERT(rc == 0);
3948 
3949 	poll_threads();
3950 	spdk_delay_us(1000);
3951 	poll_threads();
3952 
3953 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1);
3954 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3955 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3);
3956 
3957 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3958 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3959 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3960 
3961 	set_thread(0);
3962 
3963 	spdk_put_io_channel(ch);
3964 
3965 	poll_threads();
3966 
3967 	set_thread(1);
3968 
3969 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3970 	CU_ASSERT(rc == 0);
3971 
3972 	poll_threads();
3973 	spdk_delay_us(1000);
3974 	poll_threads();
3975 
3976 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3977 }
3978 
3979 static void
3980 test_admin_path(void)
3981 {
3982 	struct nvme_path_id path1 = {}, path2 = {};
3983 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3984 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3985 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3986 	const int STRING_SIZE = 32;
3987 	const char *attached_names[STRING_SIZE];
3988 	struct nvme_bdev *bdev;
3989 	struct spdk_io_channel *ch;
3990 	struct spdk_bdev_io *bdev_io;
3991 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3992 	int rc;
3993 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3994 
3995 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3996 	bdev_opts.multipath = true;
3997 
3998 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3999 	ut_init_trid(&path1.trid);
4000 	ut_init_trid2(&path2.trid);
4001 	g_ut_attach_ctrlr_status = 0;
4002 	g_ut_attach_bdev_count = 1;
4003 
4004 	set_thread(0);
4005 
4006 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4007 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4008 
4009 	ctrlr1->ns[0].uuid = &uuid1;
4010 
4011 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4012 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4013 	CU_ASSERT(rc == 0);
4014 
4015 	spdk_delay_us(1000);
4016 	poll_threads();
4017 
4018 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4019 	poll_threads();
4020 
4021 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4022 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4023 
4024 	ctrlr2->ns[0].uuid = &uuid1;
4025 
4026 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4027 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4028 	CU_ASSERT(rc == 0);
4029 
4030 	spdk_delay_us(1000);
4031 	poll_threads();
4032 
4033 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4034 	poll_threads();
4035 
4036 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4037 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4038 
4039 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4040 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4041 
4042 	ch = spdk_get_io_channel(bdev);
4043 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4044 
4045 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
4046 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4047 
4048 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
4049 	 * submitted to ctrlr2.
4050 	 */
4051 	ctrlr1->is_failed = true;
4052 	bdev_io->internal.f.in_submit_request = true;
4053 
4054 	bdev_nvme_submit_request(ch, bdev_io);
4055 
4056 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4057 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
4058 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4059 
4060 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4061 	poll_threads();
4062 
4063 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4064 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4065 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4066 
4067 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
4068 	ctrlr2->is_failed = true;
4069 	bdev_io->internal.f.in_submit_request = true;
4070 
4071 	bdev_nvme_submit_request(ch, bdev_io);
4072 
4073 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4074 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4075 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4076 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4077 
4078 	free(bdev_io);
4079 
4080 	spdk_put_io_channel(ch);
4081 
4082 	poll_threads();
4083 
4084 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4085 	CU_ASSERT(rc == 0);
4086 
4087 	poll_threads();
4088 	spdk_delay_us(1000);
4089 	poll_threads();
4090 
4091 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4092 }
4093 
4094 static struct nvme_io_path *
4095 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
4096 			struct nvme_ctrlr *nvme_ctrlr)
4097 {
4098 	struct nvme_io_path *io_path;
4099 
4100 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
4101 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
4102 			return io_path;
4103 		}
4104 	}
4105 
4106 	return NULL;
4107 }
4108 
4109 static void
4110 test_reset_bdev_ctrlr(void)
4111 {
4112 	struct nvme_path_id path1 = {}, path2 = {};
4113 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4114 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4115 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4116 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4117 	struct nvme_path_id *curr_path1, *curr_path2;
4118 	const int STRING_SIZE = 32;
4119 	const char *attached_names[STRING_SIZE];
4120 	struct nvme_bdev *bdev;
4121 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
4122 	struct nvme_bdev_io *first_bio;
4123 	struct spdk_io_channel *ch1, *ch2;
4124 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
4125 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
4126 	int rc;
4127 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4128 
4129 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4130 	bdev_opts.multipath = true;
4131 
4132 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4133 	ut_init_trid(&path1.trid);
4134 	ut_init_trid2(&path2.trid);
4135 	g_ut_attach_ctrlr_status = 0;
4136 	g_ut_attach_bdev_count = 1;
4137 
4138 	set_thread(0);
4139 
4140 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4141 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4142 
4143 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4144 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4145 	CU_ASSERT(rc == 0);
4146 
4147 	spdk_delay_us(1000);
4148 	poll_threads();
4149 
4150 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4151 	poll_threads();
4152 
4153 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4154 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4155 
4156 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4157 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4158 	CU_ASSERT(rc == 0);
4159 
4160 	spdk_delay_us(1000);
4161 	poll_threads();
4162 
4163 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4164 	poll_threads();
4165 
4166 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4167 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4168 
4169 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4170 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
4171 
4172 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
4173 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
4174 
4175 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4176 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
4177 
4178 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
4179 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
4180 
4181 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4182 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4183 
4184 	set_thread(0);
4185 
4186 	ch1 = spdk_get_io_channel(bdev);
4187 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
4188 
4189 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
4190 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
4191 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
4192 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
4193 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4194 
4195 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
4196 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4197 
4198 	set_thread(1);
4199 
4200 	ch2 = spdk_get_io_channel(bdev);
4201 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4202 
4203 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4204 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4205 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4206 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4207 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4208 
4209 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
4210 
4211 	/* The first reset request from bdev_io is submitted on thread 0.
4212 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4213 	 *
4214 	 * A few extra polls are necessary after resetting ctrlr1 to check
4215 	 * pending reset requests for ctrlr1.
4216 	 */
4217 	ctrlr1->is_failed = true;
4218 	curr_path1->last_failed_tsc = spdk_get_ticks();
4219 	ctrlr2->is_failed = true;
4220 	curr_path2->last_failed_tsc = spdk_get_ticks();
4221 
4222 	set_thread(0);
4223 
4224 	bdev_nvme_submit_request(ch1, first_bdev_io);
4225 
4226 	poll_thread_times(0, 1);
4227 	poll_thread_times(1, 1);
4228 	poll_thread_times(0, 2);
4229 	poll_thread_times(1, 1);
4230 	poll_thread_times(0, 1);
4231 
4232 	CU_ASSERT(first_bio->io_path == io_path11);
4233 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4234 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4235 
4236 	poll_thread_times(0, 3);
4237 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4238 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4239 
4240 	poll_thread_times(1, 2);
4241 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4242 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4243 	CU_ASSERT(ctrlr1->is_failed == true);
4244 
4245 	poll_thread_times(0, 1);
4246 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4247 	CU_ASSERT(ctrlr1->is_failed == false);
4248 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4249 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4250 
4251 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4252 	poll_thread_times(0, 2);
4253 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4254 
4255 	poll_thread_times(0, 1);
4256 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4257 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4258 
4259 	poll_thread_times(1, 1);
4260 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4261 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4262 
4263 	poll_thread_times(0, 2);
4264 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4265 	poll_thread_times(1, 1);
4266 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4267 	poll_thread_times(0, 2);
4268 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4269 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4270 	CU_ASSERT(first_bio->io_path == io_path12);
4271 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4272 
4273 	poll_thread_times(0, 3);
4274 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4275 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4276 
4277 	poll_thread_times(1, 2);
4278 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4279 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4280 	CU_ASSERT(ctrlr2->is_failed == true);
4281 
4282 	poll_thread_times(0, 1);
4283 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4284 	CU_ASSERT(ctrlr2->is_failed == false);
4285 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4286 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4287 
4288 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4289 	poll_thread_times(0, 2);
4290 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4291 
4292 	poll_thread_times(0, 1);
4293 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4294 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4295 
4296 	poll_thread_times(1, 2);
4297 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4298 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4299 
4300 	poll_thread_times(0, 2);
4301 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4302 	poll_thread_times(1, 1);
4303 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4304 	poll_thread_times(0, 2);
4305 	CU_ASSERT(first_bio->io_path == NULL);
4306 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4307 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4308 
4309 	poll_threads();
4310 
4311 	/* There is a race between two reset requests from bdev_io.
4312 	 *
4313 	 * The first reset request is submitted on thread 0, and the second reset
4314 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4315 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4316 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4317 	 * The second is pending on ctrlr2 again. After the first completes resetting
4318 	 * ctrl2, both complete successfully.
4319 	 */
4320 	ctrlr1->is_failed = true;
4321 	curr_path1->last_failed_tsc = spdk_get_ticks();
4322 	ctrlr2->is_failed = true;
4323 	curr_path2->last_failed_tsc = spdk_get_ticks();
4324 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4325 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4326 
4327 	set_thread(0);
4328 
4329 	bdev_nvme_submit_request(ch1, first_bdev_io);
4330 
4331 	set_thread(1);
4332 
4333 	bdev_nvme_submit_request(ch2, second_bdev_io);
4334 
4335 	poll_thread_times(0, 1);
4336 	poll_thread_times(1, 1);
4337 	poll_thread_times(0, 2);
4338 	poll_thread_times(1, 1);
4339 	poll_thread_times(0, 1);
4340 	poll_thread_times(1, 1);
4341 
4342 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4343 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4344 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4345 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4346 
4347 	poll_threads();
4348 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4349 	poll_threads();
4350 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4351 	poll_threads();
4352 
4353 	CU_ASSERT(ctrlr1->is_failed == false);
4354 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4355 	CU_ASSERT(ctrlr2->is_failed == false);
4356 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4357 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4358 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4359 
4360 	/* Reset of the first path succeeds, reset of the second path fails.
4361 	 * Since we have at least one working path we should not fail RESET IO.
4362 	 */
4363 	ctrlr1->is_failed = true;
4364 	curr_path1->last_failed_tsc = spdk_get_ticks();
4365 	ctrlr2->is_failed = true;
4366 	curr_path2->last_failed_tsc = spdk_get_ticks();
4367 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4368 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4369 
4370 	set_thread(0);
4371 	bdev_nvme_submit_request(ch1, first_bdev_io);
4372 
4373 	set_thread(1);
4374 	bdev_nvme_submit_request(ch2, second_bdev_io);
4375 
4376 	poll_thread_times(0, 1);
4377 	poll_thread_times(1, 1);
4378 	poll_thread_times(0, 2);
4379 	poll_thread_times(1, 1);
4380 	poll_thread_times(0, 1);
4381 	poll_thread_times(1, 1);
4382 
4383 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4384 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4385 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4386 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4387 
4388 	ctrlr2->fail_reset = true;
4389 
4390 	poll_threads();
4391 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4392 	poll_threads();
4393 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4394 	poll_threads();
4395 
4396 	CU_ASSERT(ctrlr1->is_failed == false);
4397 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4398 	CU_ASSERT(ctrlr2->is_failed == true);
4399 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4400 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4401 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4402 
4403 	/* Path 2 recovers */
4404 	ctrlr2->fail_reset = false;
4405 	poll_threads();
4406 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4407 	poll_threads();
4408 
4409 	CU_ASSERT(ctrlr2->is_failed == false);
4410 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4411 
4412 	/* Reset of the first path fails, reset of the second path succeeds.
4413 	 * Since we have at least one working path we should not fail RESET IO.
4414 	 */
4415 	ctrlr1->is_failed = true;
4416 	curr_path1->last_failed_tsc = spdk_get_ticks();
4417 	ctrlr2->is_failed = true;
4418 	curr_path2->last_failed_tsc = spdk_get_ticks();
4419 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4420 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4421 
4422 	set_thread(0);
4423 	bdev_nvme_submit_request(ch1, first_bdev_io);
4424 
4425 	set_thread(1);
4426 	bdev_nvme_submit_request(ch2, second_bdev_io);
4427 
4428 	poll_thread_times(0, 1);
4429 	poll_thread_times(1, 1);
4430 	poll_thread_times(0, 2);
4431 	poll_thread_times(1, 1);
4432 	poll_thread_times(0, 1);
4433 	poll_thread_times(1, 1);
4434 
4435 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4436 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4437 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4438 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4439 
4440 	ctrlr1->fail_reset = true;
4441 
4442 	poll_threads();
4443 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4444 	poll_threads();
4445 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4446 	poll_threads();
4447 
4448 	CU_ASSERT(ctrlr1->is_failed == true);
4449 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4450 	CU_ASSERT(ctrlr2->is_failed == false);
4451 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4452 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4453 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4454 
4455 	/* Path 1 recovers */
4456 	ctrlr1->fail_reset = false;
4457 	poll_threads();
4458 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4459 	poll_threads();
4460 
4461 	CU_ASSERT(ctrlr1->is_failed == false);
4462 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4463 
4464 	/* Reset of both paths fail.
4465 	 * Since we have no working paths we should fail RESET IO.
4466 	 */
4467 	ctrlr1->is_failed = true;
4468 	curr_path1->last_failed_tsc = spdk_get_ticks();
4469 	ctrlr2->is_failed = true;
4470 	curr_path2->last_failed_tsc = spdk_get_ticks();
4471 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4472 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4473 
4474 	set_thread(0);
4475 	bdev_nvme_submit_request(ch1, first_bdev_io);
4476 
4477 	set_thread(1);
4478 	bdev_nvme_submit_request(ch2, second_bdev_io);
4479 
4480 	poll_thread_times(0, 1);
4481 	poll_thread_times(1, 1);
4482 	poll_thread_times(0, 2);
4483 	poll_thread_times(1, 1);
4484 	poll_thread_times(0, 1);
4485 	poll_thread_times(1, 1);
4486 
4487 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4488 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4489 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4490 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4491 
4492 	ctrlr1->fail_reset = true;
4493 	ctrlr2->fail_reset = true;
4494 
4495 	poll_threads();
4496 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4497 	poll_threads();
4498 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4499 	poll_threads();
4500 
4501 	CU_ASSERT(ctrlr1->is_failed == true);
4502 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4503 	CU_ASSERT(ctrlr2->is_failed == true);
4504 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4505 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4506 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4507 
4508 	/* Paths 1 and 2 recover */
4509 	ctrlr1->fail_reset = false;
4510 	ctrlr2->fail_reset = false;
4511 	poll_threads();
4512 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4513 	poll_threads();
4514 
4515 	CU_ASSERT(ctrlr1->is_failed == false);
4516 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4517 	CU_ASSERT(ctrlr2->is_failed == false);
4518 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4519 
4520 	set_thread(0);
4521 
4522 	spdk_put_io_channel(ch1);
4523 
4524 	set_thread(1);
4525 
4526 	spdk_put_io_channel(ch2);
4527 
4528 	poll_threads();
4529 
4530 	set_thread(0);
4531 
4532 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4533 	CU_ASSERT(rc == 0);
4534 
4535 	poll_threads();
4536 	spdk_delay_us(1000);
4537 	poll_threads();
4538 
4539 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4540 
4541 	free(first_bdev_io);
4542 	free(second_bdev_io);
4543 }
4544 
4545 static void
4546 test_find_io_path(void)
4547 {
4548 	struct nvme_bdev_channel nbdev_ch = {
4549 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4550 	};
4551 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4552 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4553 	struct spdk_nvme_ns ns1 = {}, ns2 = {};
4554 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4555 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4556 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4557 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4558 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, };
4559 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4560 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4561 
4562 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4563 
4564 	/* Test if io_path whose ANA state is not accessible is excluded. */
4565 
4566 	nvme_qpair1.qpair = &qpair1;
4567 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4568 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4569 
4570 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4571 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4572 
4573 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4574 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4575 
4576 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4577 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4578 
4579 	nbdev_ch.current_io_path = NULL;
4580 
4581 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4582 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4583 
4584 	nbdev_ch.current_io_path = NULL;
4585 
4586 	/* Test if io_path whose qpair is resetting is excluded. */
4587 
4588 	nvme_qpair1.qpair = NULL;
4589 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4590 
4591 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4592 
4593 	/* Test if ANA optimized state or the first found ANA non-optimized state
4594 	 * is prioritized.
4595 	 */
4596 
4597 	nvme_qpair1.qpair = &qpair1;
4598 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4599 	nvme_qpair2.qpair = &qpair2;
4600 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4601 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4602 
4603 	nbdev_ch.current_io_path = NULL;
4604 
4605 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4606 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4607 
4608 	nbdev_ch.current_io_path = NULL;
4609 }
4610 
4611 static void
4612 test_retry_io_if_ana_state_is_updating(void)
4613 {
4614 	struct nvme_path_id path = {};
4615 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
4616 	struct spdk_nvme_ctrlr *ctrlr;
4617 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
4618 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4619 	struct nvme_ctrlr *nvme_ctrlr;
4620 	const int STRING_SIZE = 32;
4621 	const char *attached_names[STRING_SIZE];
4622 	struct nvme_bdev *bdev;
4623 	struct nvme_ns *nvme_ns;
4624 	struct spdk_bdev_io *bdev_io1;
4625 	struct spdk_io_channel *ch;
4626 	struct nvme_bdev_channel *nbdev_ch;
4627 	struct nvme_io_path *io_path;
4628 	struct nvme_qpair *nvme_qpair;
4629 	int rc;
4630 
4631 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4632 	ut_init_trid(&path.trid);
4633 
4634 	set_thread(0);
4635 
4636 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4637 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4638 
4639 	g_ut_attach_ctrlr_status = 0;
4640 	g_ut_attach_bdev_count = 1;
4641 
4642 	opts.ctrlr_loss_timeout_sec = -1;
4643 	opts.reconnect_delay_sec = 1;
4644 	opts.multipath = false;
4645 
4646 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4647 				   attach_ctrlr_done, NULL, &dopts, &opts);
4648 	CU_ASSERT(rc == 0);
4649 
4650 	spdk_delay_us(1000);
4651 	poll_threads();
4652 
4653 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4654 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4655 
4656 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
4657 	CU_ASSERT(nvme_ctrlr != NULL);
4658 
4659 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4660 	CU_ASSERT(bdev != NULL);
4661 
4662 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4663 	CU_ASSERT(nvme_ns != NULL);
4664 
4665 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4666 	ut_bdev_io_set_buf(bdev_io1);
4667 
4668 	ch = spdk_get_io_channel(bdev);
4669 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4670 
4671 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4672 
4673 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4674 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4675 
4676 	nvme_qpair = io_path->qpair;
4677 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4678 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4679 
4680 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4681 
4682 	/* If qpair is connected, I/O should succeed. */
4683 	bdev_io1->internal.f.in_submit_request = true;
4684 
4685 	bdev_nvme_submit_request(ch, bdev_io1);
4686 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4687 
4688 	poll_threads();
4689 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4690 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4691 
4692 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4693 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4694 	nbdev_ch->current_io_path = NULL;
4695 
4696 	bdev_io1->internal.f.in_submit_request = true;
4697 
4698 	bdev_nvme_submit_request(ch, bdev_io1);
4699 
4700 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4701 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4702 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4703 
4704 	/* ANA state became accessible while I/O was queued. */
4705 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4706 
4707 	spdk_delay_us(1000000);
4708 
4709 	poll_thread_times(0, 1);
4710 
4711 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4712 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4713 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4714 
4715 	poll_threads();
4716 
4717 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4718 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4719 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4720 
4721 	free(bdev_io1);
4722 
4723 	spdk_put_io_channel(ch);
4724 
4725 	poll_threads();
4726 
4727 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4728 	CU_ASSERT(rc == 0);
4729 
4730 	poll_threads();
4731 	spdk_delay_us(1000);
4732 	poll_threads();
4733 
4734 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4735 }
4736 
4737 static void
4738 test_retry_io_for_io_path_error(void)
4739 {
4740 	struct nvme_path_id path1 = {}, path2 = {};
4741 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4742 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4743 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4744 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4745 	const int STRING_SIZE = 32;
4746 	const char *attached_names[STRING_SIZE];
4747 	struct nvme_bdev *bdev;
4748 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4749 	struct spdk_bdev_io *bdev_io;
4750 	struct nvme_bdev_io *bio;
4751 	struct spdk_io_channel *ch;
4752 	struct nvme_bdev_channel *nbdev_ch;
4753 	struct nvme_io_path *io_path1, *io_path2;
4754 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4755 	struct ut_nvme_req *req;
4756 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4757 	int rc;
4758 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4759 
4760 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4761 	bdev_opts.multipath = true;
4762 
4763 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4764 	ut_init_trid(&path1.trid);
4765 	ut_init_trid2(&path2.trid);
4766 
4767 	g_opts.bdev_retry_count = 1;
4768 
4769 	set_thread(0);
4770 
4771 	g_ut_attach_ctrlr_status = 0;
4772 	g_ut_attach_bdev_count = 1;
4773 
4774 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4775 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4776 
4777 	ctrlr1->ns[0].uuid = &uuid1;
4778 
4779 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4780 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4781 	CU_ASSERT(rc == 0);
4782 
4783 	spdk_delay_us(1000);
4784 	poll_threads();
4785 
4786 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4787 	poll_threads();
4788 
4789 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4790 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4791 
4792 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4793 	CU_ASSERT(nvme_ctrlr1 != NULL);
4794 
4795 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4796 	CU_ASSERT(bdev != NULL);
4797 
4798 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4799 	CU_ASSERT(nvme_ns1 != NULL);
4800 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4801 
4802 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4803 	ut_bdev_io_set_buf(bdev_io);
4804 
4805 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4806 
4807 	ch = spdk_get_io_channel(bdev);
4808 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4809 
4810 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4811 
4812 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4813 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4814 
4815 	nvme_qpair1 = io_path1->qpair;
4816 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4817 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4818 
4819 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4820 
4821 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4822 	bdev_io->internal.f.in_submit_request = true;
4823 
4824 	bdev_nvme_submit_request(ch, bdev_io);
4825 
4826 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4827 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4828 
4829 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4830 	SPDK_CU_ASSERT_FATAL(req != NULL);
4831 
4832 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4833 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4834 	req->cpl.status.dnr = 1;
4835 
4836 	poll_thread_times(0, 1);
4837 
4838 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4839 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4840 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4841 
4842 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4843 	bdev_io->internal.f.in_submit_request = true;
4844 
4845 	bdev_nvme_submit_request(ch, bdev_io);
4846 
4847 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4848 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4849 
4850 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4851 	SPDK_CU_ASSERT_FATAL(req != NULL);
4852 
4853 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4854 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4855 
4856 	poll_thread_times(0, 1);
4857 
4858 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4859 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4860 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4861 
4862 	poll_threads();
4863 
4864 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4865 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4866 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4867 
4868 	/* Add io_path2 dynamically, and create a multipath configuration. */
4869 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4870 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4871 
4872 	ctrlr2->ns[0].uuid = &uuid1;
4873 
4874 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4875 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4876 	CU_ASSERT(rc == 0);
4877 
4878 	spdk_delay_us(1000);
4879 	poll_threads();
4880 
4881 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4882 	poll_threads();
4883 
4884 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4885 	CU_ASSERT(nvme_ctrlr2 != NULL);
4886 
4887 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4888 	CU_ASSERT(nvme_ns2 != NULL);
4889 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4890 
4891 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4892 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4893 
4894 	nvme_qpair2 = io_path2->qpair;
4895 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4896 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4897 
4898 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4899 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4900 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4901 	 */
4902 	bdev_io->internal.f.in_submit_request = true;
4903 
4904 	bdev_nvme_submit_request(ch, bdev_io);
4905 
4906 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4907 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4908 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4909 
4910 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4911 	SPDK_CU_ASSERT_FATAL(req != NULL);
4912 
4913 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4914 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4915 
4916 	poll_thread_times(0, 1);
4917 
4918 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4919 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4920 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4921 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4922 
4923 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4924 	nvme_qpair1->qpair = NULL;
4925 
4926 	poll_threads();
4927 
4928 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4929 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4930 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4931 
4932 	free(bdev_io);
4933 
4934 	spdk_put_io_channel(ch);
4935 
4936 	poll_threads();
4937 
4938 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4939 	CU_ASSERT(rc == 0);
4940 
4941 	poll_threads();
4942 	spdk_delay_us(1000);
4943 	poll_threads();
4944 
4945 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4946 
4947 	g_opts.bdev_retry_count = 0;
4948 }
4949 
4950 static void
4951 test_retry_io_count(void)
4952 {
4953 	struct nvme_path_id path = {};
4954 	struct spdk_nvme_ctrlr *ctrlr;
4955 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4956 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4957 	struct nvme_ctrlr *nvme_ctrlr;
4958 	const int STRING_SIZE = 32;
4959 	const char *attached_names[STRING_SIZE];
4960 	struct nvme_bdev *bdev;
4961 	struct nvme_ns *nvme_ns;
4962 	struct spdk_bdev_io *bdev_io;
4963 	struct nvme_bdev_io *bio;
4964 	struct spdk_io_channel *ch;
4965 	struct nvme_bdev_channel *nbdev_ch;
4966 	struct nvme_io_path *io_path;
4967 	struct nvme_qpair *nvme_qpair;
4968 	struct ut_nvme_req *req;
4969 	int rc;
4970 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4971 
4972 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4973 	bdev_opts.multipath = false;
4974 
4975 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4976 	ut_init_trid(&path.trid);
4977 
4978 	set_thread(0);
4979 
4980 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4981 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4982 
4983 	g_ut_attach_ctrlr_status = 0;
4984 	g_ut_attach_bdev_count = 1;
4985 
4986 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4987 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4988 	CU_ASSERT(rc == 0);
4989 
4990 	spdk_delay_us(1000);
4991 	poll_threads();
4992 
4993 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4994 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4995 
4996 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
4997 	CU_ASSERT(nvme_ctrlr != NULL);
4998 
4999 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5000 	CU_ASSERT(bdev != NULL);
5001 
5002 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5003 	CU_ASSERT(nvme_ns != NULL);
5004 
5005 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5006 	ut_bdev_io_set_buf(bdev_io);
5007 
5008 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5009 
5010 	ch = spdk_get_io_channel(bdev);
5011 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5012 
5013 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5014 
5015 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5016 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5017 
5018 	nvme_qpair = io_path->qpair;
5019 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5020 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5021 
5022 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5023 
5024 	/* If I/O is aborted by request, it should not be retried. */
5025 	g_opts.bdev_retry_count = 1;
5026 
5027 	bdev_io->internal.f.in_submit_request = true;
5028 
5029 	bdev_nvme_submit_request(ch, bdev_io);
5030 
5031 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5032 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5033 
5034 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5035 	SPDK_CU_ASSERT_FATAL(req != NULL);
5036 
5037 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5038 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5039 
5040 	poll_thread_times(0, 1);
5041 
5042 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5043 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5044 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
5045 
5046 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
5047 	 * the failed I/O should not be retried.
5048 	 */
5049 	g_opts.bdev_retry_count = 4;
5050 
5051 	bdev_io->internal.f.in_submit_request = true;
5052 
5053 	bdev_nvme_submit_request(ch, bdev_io);
5054 
5055 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5056 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5057 
5058 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5059 	SPDK_CU_ASSERT_FATAL(req != NULL);
5060 
5061 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5062 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5063 	bio->retry_count = 4;
5064 
5065 	poll_thread_times(0, 1);
5066 
5067 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5068 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5069 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
5070 
5071 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
5072 	g_opts.bdev_retry_count = -1;
5073 
5074 	bdev_io->internal.f.in_submit_request = true;
5075 
5076 	bdev_nvme_submit_request(ch, bdev_io);
5077 
5078 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5079 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5080 
5081 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5082 	SPDK_CU_ASSERT_FATAL(req != NULL);
5083 
5084 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5085 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5086 	bio->retry_count = 4;
5087 
5088 	poll_thread_times(0, 1);
5089 
5090 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5091 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5092 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5093 
5094 	poll_threads();
5095 
5096 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5097 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5098 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5099 
5100 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
5101 	 * the failed I/O should be retried.
5102 	 */
5103 	g_opts.bdev_retry_count = 4;
5104 
5105 	bdev_io->internal.f.in_submit_request = true;
5106 
5107 	bdev_nvme_submit_request(ch, bdev_io);
5108 
5109 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5110 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5111 
5112 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5113 	SPDK_CU_ASSERT_FATAL(req != NULL);
5114 
5115 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5116 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5117 	bio->retry_count = 3;
5118 
5119 	poll_thread_times(0, 1);
5120 
5121 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5122 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5123 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5124 
5125 	poll_threads();
5126 
5127 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5128 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5129 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5130 
5131 	free(bdev_io);
5132 
5133 	spdk_put_io_channel(ch);
5134 
5135 	poll_threads();
5136 
5137 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5138 	CU_ASSERT(rc == 0);
5139 
5140 	poll_threads();
5141 	spdk_delay_us(1000);
5142 	poll_threads();
5143 
5144 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5145 
5146 	g_opts.bdev_retry_count = 0;
5147 }
5148 
5149 static void
5150 test_concurrent_read_ana_log_page(void)
5151 {
5152 	struct spdk_nvme_transport_id trid = {};
5153 	struct spdk_nvme_ctrlr *ctrlr;
5154 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5155 	struct nvme_ctrlr *nvme_ctrlr;
5156 	const int STRING_SIZE = 32;
5157 	const char *attached_names[STRING_SIZE];
5158 	int rc;
5159 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5160 
5161 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5162 	bdev_opts.multipath = false;
5163 
5164 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5165 	ut_init_trid(&trid);
5166 
5167 	set_thread(0);
5168 
5169 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
5170 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5171 
5172 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5173 
5174 	g_ut_attach_ctrlr_status = 0;
5175 	g_ut_attach_bdev_count = 1;
5176 
5177 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
5178 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5179 	CU_ASSERT(rc == 0);
5180 
5181 	spdk_delay_us(1000);
5182 	poll_threads();
5183 
5184 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5185 	poll_threads();
5186 
5187 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5188 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5189 
5190 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5191 
5192 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5193 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5194 
5195 	/* Following read request should be rejected. */
5196 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5197 
5198 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5199 
5200 	set_thread(1);
5201 
5202 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5203 
5204 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5205 
5206 	/* Reset request while reading ANA log page should not be rejected. */
5207 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5208 	CU_ASSERT(rc == 0);
5209 
5210 	poll_threads();
5211 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5212 	poll_threads();
5213 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5214 	poll_threads();
5215 
5216 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5217 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5218 
5219 	/* Read ANA log page while resetting ctrlr should be rejected. */
5220 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5221 	CU_ASSERT(rc == 0);
5222 
5223 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5224 
5225 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5226 
5227 	poll_threads();
5228 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5229 	poll_threads();
5230 
5231 	set_thread(0);
5232 
5233 	/* It is possible that target sent ANA change for inactive namespaces.
5234 	 *
5235 	 * Previously, assert() was added because this case was unlikely.
5236 	 * However, assert() was hit in real environment.
5237 
5238 	 * Hence, remove assert() and add unit test case.
5239 	 *
5240 	 * Simulate this case by depopulating namespaces and then parsing ANA
5241 	 * log page created when all namespaces are active.
5242 	 * Then, check if parsing ANA log page completes successfully.
5243 	 */
5244 	nvme_ctrlr_depopulate_namespaces(nvme_ctrlr);
5245 
5246 	rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr);
5247 	CU_ASSERT(rc == 0);
5248 
5249 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5250 	CU_ASSERT(rc == 0);
5251 
5252 	poll_threads();
5253 	spdk_delay_us(1000);
5254 	poll_threads();
5255 
5256 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5257 }
5258 
5259 static void
5260 test_retry_io_for_ana_error(void)
5261 {
5262 	struct nvme_path_id path = {};
5263 	struct spdk_nvme_ctrlr *ctrlr;
5264 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5265 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5266 	struct nvme_ctrlr *nvme_ctrlr;
5267 	const int STRING_SIZE = 32;
5268 	const char *attached_names[STRING_SIZE];
5269 	struct nvme_bdev *bdev;
5270 	struct nvme_ns *nvme_ns;
5271 	struct spdk_bdev_io *bdev_io;
5272 	struct nvme_bdev_io *bio;
5273 	struct spdk_io_channel *ch;
5274 	struct nvme_bdev_channel *nbdev_ch;
5275 	struct nvme_io_path *io_path;
5276 	struct nvme_qpair *nvme_qpair;
5277 	struct ut_nvme_req *req;
5278 	uint64_t now;
5279 	int rc;
5280 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5281 
5282 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5283 	bdev_opts.multipath = false;
5284 
5285 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5286 	ut_init_trid(&path.trid);
5287 
5288 	g_opts.bdev_retry_count = 1;
5289 
5290 	set_thread(0);
5291 
5292 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
5293 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5294 
5295 	g_ut_attach_ctrlr_status = 0;
5296 	g_ut_attach_bdev_count = 1;
5297 
5298 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5299 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5300 	CU_ASSERT(rc == 0);
5301 
5302 	spdk_delay_us(1000);
5303 	poll_threads();
5304 
5305 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5306 	poll_threads();
5307 
5308 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5309 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5310 
5311 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5312 	CU_ASSERT(nvme_ctrlr != NULL);
5313 
5314 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5315 	CU_ASSERT(bdev != NULL);
5316 
5317 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5318 	CU_ASSERT(nvme_ns != NULL);
5319 
5320 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5321 	ut_bdev_io_set_buf(bdev_io);
5322 
5323 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5324 
5325 	ch = spdk_get_io_channel(bdev);
5326 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5327 
5328 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5329 
5330 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5331 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5332 
5333 	nvme_qpair = io_path->qpair;
5334 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5335 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5336 
5337 	now = spdk_get_ticks();
5338 
5339 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5340 
5341 	/* If I/O got ANA error, it should be queued, the corresponding namespace
5342 	 * should be freezed and its ANA state should be updated.
5343 	 */
5344 	bdev_io->internal.f.in_submit_request = true;
5345 
5346 	bdev_nvme_submit_request(ch, bdev_io);
5347 
5348 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5349 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5350 
5351 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5352 	SPDK_CU_ASSERT_FATAL(req != NULL);
5353 
5354 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5355 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
5356 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
5357 
5358 	poll_thread_times(0, 1);
5359 
5360 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5361 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5362 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5363 	/* I/O should be retried immediately. */
5364 	CU_ASSERT(bio->retry_ticks == now);
5365 	CU_ASSERT(nvme_ns->ana_state_updating == true);
5366 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5367 
5368 	poll_threads();
5369 
5370 	/* Namespace is inaccessible, and hence I/O should be queued again. */
5371 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5372 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5373 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5374 	/* I/O should be retried after a second if no I/O path was found but
5375 	 * any I/O path may become available.
5376 	 */
5377 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
5378 
5379 	/* Namespace should be unfreezed after completing to update its ANA state. */
5380 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5381 	poll_threads();
5382 
5383 	CU_ASSERT(nvme_ns->ana_state_updating == false);
5384 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5385 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5386 
5387 	/* Retry the queued I/O should succeed. */
5388 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
5389 	poll_threads();
5390 
5391 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5392 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5393 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5394 
5395 	free(bdev_io);
5396 
5397 	spdk_put_io_channel(ch);
5398 
5399 	poll_threads();
5400 
5401 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5402 	CU_ASSERT(rc == 0);
5403 
5404 	poll_threads();
5405 	spdk_delay_us(1000);
5406 	poll_threads();
5407 
5408 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5409 
5410 	g_opts.bdev_retry_count = 0;
5411 }
5412 
5413 static void
5414 test_check_io_error_resiliency_params(void)
5415 {
5416 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5417 	 * 3rd parameter is fast_io_fail_timeout_sec.
5418 	 */
5419 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5420 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5421 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5422 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5423 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5424 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5425 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5426 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5427 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5428 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5429 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5430 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5431 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5432 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5433 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5434 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5435 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5436 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5437 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5438 }
5439 
5440 static void
5441 test_retry_io_if_ctrlr_is_resetting(void)
5442 {
5443 	struct nvme_path_id path = {};
5444 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5445 	struct spdk_nvme_ctrlr *ctrlr;
5446 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5447 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5448 	struct nvme_ctrlr *nvme_ctrlr;
5449 	const int STRING_SIZE = 32;
5450 	const char *attached_names[STRING_SIZE];
5451 	struct nvme_bdev *bdev;
5452 	struct nvme_ns *nvme_ns;
5453 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5454 	struct spdk_io_channel *ch;
5455 	struct nvme_bdev_channel *nbdev_ch;
5456 	struct nvme_io_path *io_path;
5457 	struct nvme_qpair *nvme_qpair;
5458 	int rc;
5459 
5460 	g_opts.bdev_retry_count = 1;
5461 
5462 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5463 	ut_init_trid(&path.trid);
5464 
5465 	set_thread(0);
5466 
5467 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5468 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5469 
5470 	g_ut_attach_ctrlr_status = 0;
5471 	g_ut_attach_bdev_count = 1;
5472 
5473 	opts.ctrlr_loss_timeout_sec = -1;
5474 	opts.reconnect_delay_sec = 1;
5475 	opts.multipath = false;
5476 
5477 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5478 				   attach_ctrlr_done, NULL, &dopts, &opts);
5479 	CU_ASSERT(rc == 0);
5480 
5481 	spdk_delay_us(1000);
5482 	poll_threads();
5483 
5484 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5485 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5486 
5487 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5488 	CU_ASSERT(nvme_ctrlr != NULL);
5489 
5490 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5491 	CU_ASSERT(bdev != NULL);
5492 
5493 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5494 	CU_ASSERT(nvme_ns != NULL);
5495 
5496 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5497 	ut_bdev_io_set_buf(bdev_io1);
5498 
5499 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5500 	ut_bdev_io_set_buf(bdev_io2);
5501 
5502 	ch = spdk_get_io_channel(bdev);
5503 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5504 
5505 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5506 
5507 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5508 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5509 
5510 	nvme_qpair = io_path->qpair;
5511 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5512 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5513 
5514 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5515 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5516 
5517 	/* If qpair is connected, I/O should succeed. */
5518 	bdev_io1->internal.f.in_submit_request = true;
5519 
5520 	bdev_nvme_submit_request(ch, bdev_io1);
5521 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5522 
5523 	poll_threads();
5524 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5525 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5526 
5527 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5528 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5529 	 * while resetting the nvme_ctrlr.
5530 	 */
5531 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5532 	ctrlr->is_failed = true;
5533 
5534 	poll_thread_times(0, 5);
5535 
5536 	CU_ASSERT(nvme_qpair->qpair == NULL);
5537 	CU_ASSERT(nvme_ctrlr->resetting == true);
5538 	CU_ASSERT(ctrlr->is_failed == false);
5539 
5540 	bdev_io1->internal.f.in_submit_request = true;
5541 
5542 	bdev_nvme_submit_request(ch, bdev_io1);
5543 
5544 	spdk_delay_us(1);
5545 
5546 	bdev_io2->internal.f.in_submit_request = true;
5547 
5548 	bdev_nvme_submit_request(ch, bdev_io2);
5549 
5550 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5551 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5552 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5553 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(
5554 			  TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx,
5555 				     retry_link)));
5556 
5557 	poll_threads();
5558 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5559 	poll_threads();
5560 
5561 	CU_ASSERT(nvme_qpair->qpair != NULL);
5562 	CU_ASSERT(nvme_ctrlr->resetting == false);
5563 
5564 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5565 
5566 	poll_thread_times(0, 1);
5567 
5568 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5569 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5570 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5571 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5572 
5573 	poll_threads();
5574 
5575 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5576 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5577 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5578 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5579 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5580 
5581 	spdk_delay_us(1);
5582 
5583 	poll_thread_times(0, 1);
5584 
5585 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5586 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5587 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5588 
5589 	poll_threads();
5590 
5591 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5592 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == false);
5593 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5594 
5595 	free(bdev_io1);
5596 	free(bdev_io2);
5597 
5598 	spdk_put_io_channel(ch);
5599 
5600 	poll_threads();
5601 
5602 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5603 	CU_ASSERT(rc == 0);
5604 
5605 	poll_threads();
5606 	spdk_delay_us(1000);
5607 	poll_threads();
5608 
5609 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5610 
5611 	g_opts.bdev_retry_count = 0;
5612 }
5613 
5614 static void
5615 test_reconnect_ctrlr(void)
5616 {
5617 	struct spdk_nvme_transport_id trid = {};
5618 	struct spdk_nvme_ctrlr ctrlr = {};
5619 	struct nvme_ctrlr *nvme_ctrlr;
5620 	struct spdk_io_channel *ch1, *ch2;
5621 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5622 	int rc;
5623 
5624 	ut_init_trid(&trid);
5625 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5626 
5627 	set_thread(0);
5628 
5629 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5630 	CU_ASSERT(rc == 0);
5631 
5632 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5633 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5634 
5635 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5636 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5637 
5638 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5639 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5640 
5641 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5642 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5643 
5644 	set_thread(1);
5645 
5646 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5647 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5648 
5649 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5650 
5651 	/* Reset starts from thread 1. */
5652 	set_thread(1);
5653 
5654 	/* The reset should fail and a reconnect timer should be registered. */
5655 	ctrlr.fail_reset = true;
5656 	ctrlr.is_failed = true;
5657 
5658 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5659 	CU_ASSERT(rc == 0);
5660 	CU_ASSERT(nvme_ctrlr->resetting == true);
5661 	CU_ASSERT(ctrlr.is_failed == true);
5662 
5663 	poll_threads();
5664 
5665 	CU_ASSERT(nvme_ctrlr->resetting == false);
5666 	CU_ASSERT(ctrlr.is_failed == false);
5667 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5668 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5669 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5670 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5671 
5672 	/* A new reset starts from thread 0. */
5673 	set_thread(1);
5674 
5675 	/* The reset should cancel the reconnect timer and should start from reconnection.
5676 	 * Then, the reset should fail and a reconnect timer should be registered again.
5677 	 */
5678 	ctrlr.fail_reset = true;
5679 	ctrlr.is_failed = true;
5680 
5681 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5682 	CU_ASSERT(rc == 0);
5683 	CU_ASSERT(nvme_ctrlr->resetting == true);
5684 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5685 	CU_ASSERT(ctrlr.is_failed == true);
5686 
5687 	poll_threads();
5688 
5689 	CU_ASSERT(nvme_ctrlr->resetting == false);
5690 	CU_ASSERT(ctrlr.is_failed == false);
5691 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5692 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5693 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5694 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5695 
5696 	/* Then a reconnect retry should suceeed. */
5697 	ctrlr.fail_reset = false;
5698 
5699 	spdk_delay_us(SPDK_SEC_TO_USEC);
5700 	poll_thread_times(0, 1);
5701 
5702 	CU_ASSERT(nvme_ctrlr->resetting == true);
5703 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5704 
5705 	poll_threads();
5706 
5707 	CU_ASSERT(nvme_ctrlr->resetting == false);
5708 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5709 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5710 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5711 
5712 	/* The reset should fail and a reconnect timer should be registered. */
5713 	ctrlr.fail_reset = true;
5714 	ctrlr.is_failed = true;
5715 
5716 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5717 	CU_ASSERT(rc == 0);
5718 	CU_ASSERT(nvme_ctrlr->resetting == true);
5719 	CU_ASSERT(ctrlr.is_failed == true);
5720 
5721 	poll_threads();
5722 
5723 	CU_ASSERT(nvme_ctrlr->resetting == false);
5724 	CU_ASSERT(ctrlr.is_failed == false);
5725 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5726 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5727 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5728 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5729 
5730 	/* Then a reconnect retry should still fail. */
5731 	spdk_delay_us(SPDK_SEC_TO_USEC);
5732 	poll_thread_times(0, 1);
5733 
5734 	CU_ASSERT(nvme_ctrlr->resetting == true);
5735 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5736 
5737 	poll_threads();
5738 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5739 	poll_threads();
5740 
5741 	CU_ASSERT(nvme_ctrlr->resetting == false);
5742 	CU_ASSERT(ctrlr.is_failed == false);
5743 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5744 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5745 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5746 
5747 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5748 	spdk_delay_us(SPDK_SEC_TO_USEC);
5749 	poll_threads();
5750 
5751 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5752 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5753 	CU_ASSERT(nvme_ctrlr->destruct == true);
5754 
5755 	spdk_put_io_channel(ch2);
5756 
5757 	set_thread(0);
5758 
5759 	spdk_put_io_channel(ch1);
5760 
5761 	poll_threads();
5762 	spdk_delay_us(1000);
5763 	poll_threads();
5764 
5765 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5766 }
5767 
5768 static struct nvme_path_id *
5769 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5770 		       const struct spdk_nvme_transport_id *trid)
5771 {
5772 	struct nvme_path_id *p;
5773 
5774 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5775 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5776 			break;
5777 		}
5778 	}
5779 
5780 	return p;
5781 }
5782 
5783 static void
5784 test_retry_failover_ctrlr(void)
5785 {
5786 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5787 	struct spdk_nvme_ctrlr ctrlr = {};
5788 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5789 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5790 	struct spdk_io_channel *ch;
5791 	struct nvme_ctrlr_channel *ctrlr_ch;
5792 	int rc;
5793 
5794 	ut_init_trid(&trid1);
5795 	ut_init_trid2(&trid2);
5796 	ut_init_trid3(&trid3);
5797 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5798 
5799 	set_thread(0);
5800 
5801 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5802 	CU_ASSERT(rc == 0);
5803 
5804 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5805 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5806 
5807 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5808 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5809 
5810 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5811 	CU_ASSERT(rc == 0);
5812 
5813 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5814 	CU_ASSERT(rc == 0);
5815 
5816 	ch = spdk_get_io_channel(nvme_ctrlr);
5817 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5818 
5819 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5820 
5821 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5822 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5823 	CU_ASSERT(path_id1->last_failed_tsc == 0);
5824 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5825 
5826 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5827 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5828 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5829 
5830 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5831 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5832 
5833 	/* It is expected that connecting both of trid1, trid2, and trid3 fail,
5834 	 * and a reconnect timer is started. */
5835 	ctrlr.fail_reset = true;
5836 	ctrlr.is_failed = true;
5837 
5838 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5839 	CU_ASSERT(rc == 0);
5840 
5841 	poll_threads();
5842 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5843 	poll_threads();
5844 
5845 	CU_ASSERT(nvme_ctrlr->resetting == false);
5846 	CU_ASSERT(ctrlr.is_failed == false);
5847 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5848 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5849 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5850 	CU_ASSERT(path_id1->last_failed_tsc != 0);
5851 
5852 	CU_ASSERT(path_id2->last_failed_tsc != 0);
5853 	CU_ASSERT(path_id3->last_failed_tsc != 0);
5854 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5855 
5856 	/* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is
5857 	 * switched to trid2 but reset is not started.
5858 	 */
5859 	rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true);
5860 	CU_ASSERT(rc == -EALREADY);
5861 
5862 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL);
5863 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5864 
5865 	CU_ASSERT(nvme_ctrlr->resetting == false);
5866 
5867 	/* If reconnect succeeds, trid2 should be the active path_id */
5868 	ctrlr.fail_reset = false;
5869 
5870 	spdk_delay_us(SPDK_SEC_TO_USEC);
5871 	poll_thread_times(0, 1);
5872 
5873 	CU_ASSERT(nvme_ctrlr->resetting == true);
5874 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5875 
5876 	poll_threads();
5877 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5878 	poll_threads();
5879 
5880 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL);
5881 	CU_ASSERT(path_id2->last_failed_tsc == 0);
5882 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5883 	CU_ASSERT(nvme_ctrlr->resetting == false);
5884 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5885 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5886 
5887 	spdk_put_io_channel(ch);
5888 
5889 	poll_threads();
5890 
5891 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5892 	CU_ASSERT(rc == 0);
5893 
5894 	poll_threads();
5895 	spdk_delay_us(1000);
5896 	poll_threads();
5897 
5898 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5899 }
5900 
5901 static void
5902 test_fail_path(void)
5903 {
5904 	struct nvme_path_id path = {};
5905 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5906 	struct spdk_nvme_ctrlr *ctrlr;
5907 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5908 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5909 	struct nvme_ctrlr *nvme_ctrlr;
5910 	const int STRING_SIZE = 32;
5911 	const char *attached_names[STRING_SIZE];
5912 	struct nvme_bdev *bdev;
5913 	struct nvme_ns *nvme_ns;
5914 	struct spdk_bdev_io *bdev_io;
5915 	struct spdk_io_channel *ch;
5916 	struct nvme_bdev_channel *nbdev_ch;
5917 	struct nvme_io_path *io_path;
5918 	struct nvme_ctrlr_channel *ctrlr_ch;
5919 	int rc;
5920 
5921 	/* The test scenario is the following.
5922 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5923 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5924 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5925 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5926 	 *   comes first. The queued I/O is failed.
5927 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5928 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5929 	 */
5930 
5931 	g_opts.bdev_retry_count = 1;
5932 
5933 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5934 	ut_init_trid(&path.trid);
5935 
5936 	set_thread(0);
5937 
5938 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5939 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5940 
5941 	g_ut_attach_ctrlr_status = 0;
5942 	g_ut_attach_bdev_count = 1;
5943 
5944 	opts.ctrlr_loss_timeout_sec = 4;
5945 	opts.reconnect_delay_sec = 1;
5946 	opts.fast_io_fail_timeout_sec = 2;
5947 	opts.multipath = false;
5948 
5949 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5950 				   attach_ctrlr_done, NULL, &dopts, &opts);
5951 	CU_ASSERT(rc == 0);
5952 
5953 	spdk_delay_us(1000);
5954 	poll_threads();
5955 
5956 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5957 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5958 
5959 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5960 	CU_ASSERT(nvme_ctrlr != NULL);
5961 
5962 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5963 	CU_ASSERT(bdev != NULL);
5964 
5965 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5966 	CU_ASSERT(nvme_ns != NULL);
5967 
5968 	ch = spdk_get_io_channel(bdev);
5969 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5970 
5971 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5972 
5973 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5974 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5975 
5976 	ctrlr_ch = io_path->qpair->ctrlr_ch;
5977 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5978 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
5979 
5980 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5981 	ut_bdev_io_set_buf(bdev_io);
5982 
5983 
5984 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5985 	ctrlr->fail_reset = true;
5986 	ctrlr->is_failed = true;
5987 
5988 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5989 	CU_ASSERT(rc == 0);
5990 	CU_ASSERT(nvme_ctrlr->resetting == true);
5991 	CU_ASSERT(ctrlr->is_failed == true);
5992 
5993 	poll_threads();
5994 
5995 	CU_ASSERT(nvme_ctrlr->resetting == false);
5996 	CU_ASSERT(ctrlr->is_failed == false);
5997 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5998 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5999 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
6000 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6001 
6002 	/* I/O should be queued. */
6003 	bdev_io->internal.f.in_submit_request = true;
6004 
6005 	bdev_nvme_submit_request(ch, bdev_io);
6006 
6007 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6008 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6009 
6010 	/* After a second, the I/O should be still queued and the ctrlr should be
6011 	 * still recovering.
6012 	 */
6013 	spdk_delay_us(SPDK_SEC_TO_USEC);
6014 	poll_threads();
6015 
6016 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6017 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6018 
6019 	CU_ASSERT(nvme_ctrlr->resetting == false);
6020 	CU_ASSERT(ctrlr->is_failed == false);
6021 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6022 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6023 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6024 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6025 
6026 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6027 
6028 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
6029 	spdk_delay_us(SPDK_SEC_TO_USEC);
6030 	poll_threads();
6031 
6032 	CU_ASSERT(nvme_ctrlr->resetting == false);
6033 	CU_ASSERT(ctrlr->is_failed == false);
6034 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6035 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6036 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6037 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
6038 
6039 	/* Then within a second, pending I/O should be failed. */
6040 	spdk_delay_us(SPDK_SEC_TO_USEC);
6041 	poll_threads();
6042 
6043 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6044 	poll_threads();
6045 
6046 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6047 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6048 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
6049 
6050 	/* Another I/O submission should be failed immediately. */
6051 	bdev_io->internal.f.in_submit_request = true;
6052 
6053 	bdev_nvme_submit_request(ch, bdev_io);
6054 
6055 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6056 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6057 
6058 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
6059 	 * be deleted.
6060 	 */
6061 	spdk_delay_us(SPDK_SEC_TO_USEC);
6062 	poll_threads();
6063 
6064 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6065 	poll_threads();
6066 
6067 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
6068 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
6069 	CU_ASSERT(nvme_ctrlr->destruct == true);
6070 
6071 	spdk_put_io_channel(ch);
6072 
6073 	poll_threads();
6074 	spdk_delay_us(1000);
6075 	poll_threads();
6076 
6077 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6078 
6079 	free(bdev_io);
6080 
6081 	g_opts.bdev_retry_count = 0;
6082 }
6083 
6084 static void
6085 test_nvme_ns_cmp(void)
6086 {
6087 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
6088 
6089 	nvme_ns1.id = 0;
6090 	nvme_ns2.id = UINT32_MAX;
6091 
6092 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
6093 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
6094 }
6095 
6096 static void
6097 test_ana_transition(void)
6098 {
6099 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
6100 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
6101 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
6102 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
6103 
6104 	/* case 1: ANA transition timedout is canceled. */
6105 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6106 	nvme_ns.ana_transition_timedout = true;
6107 
6108 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6109 
6110 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6111 
6112 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
6113 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6114 
6115 	/* case 2: ANATT timer is kept. */
6116 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6117 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
6118 			      &nvme_ns,
6119 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6120 
6121 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6122 
6123 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6124 
6125 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6126 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
6127 
6128 	/* case 3: ANATT timer is stopped. */
6129 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6130 
6131 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6132 
6133 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6134 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6135 
6136 	/* ANATT timer is started. */
6137 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6138 
6139 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6140 
6141 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6142 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
6143 
6144 	/* ANATT timer is expired. */
6145 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6146 
6147 	poll_threads();
6148 
6149 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6150 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
6151 }
6152 
6153 static void
6154 _set_preferred_path_cb(void *cb_arg, int rc)
6155 {
6156 	bool *done = cb_arg;
6157 
6158 	*done = true;
6159 }
6160 
6161 static void
6162 test_set_preferred_path(void)
6163 {
6164 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
6165 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
6166 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6167 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6168 	const int STRING_SIZE = 32;
6169 	const char *attached_names[STRING_SIZE];
6170 	struct nvme_bdev *bdev;
6171 	struct spdk_io_channel *ch;
6172 	struct nvme_bdev_channel *nbdev_ch;
6173 	struct nvme_io_path *io_path;
6174 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6175 	const struct spdk_nvme_ctrlr_data *cdata;
6176 	bool done;
6177 	int rc;
6178 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6179 
6180 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6181 	bdev_opts.multipath = true;
6182 
6183 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6184 	ut_init_trid(&path1.trid);
6185 	ut_init_trid2(&path2.trid);
6186 	ut_init_trid3(&path3.trid);
6187 	g_ut_attach_ctrlr_status = 0;
6188 	g_ut_attach_bdev_count = 1;
6189 
6190 	set_thread(0);
6191 
6192 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6193 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6194 
6195 	ctrlr1->ns[0].uuid = &uuid1;
6196 
6197 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6198 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6199 	CU_ASSERT(rc == 0);
6200 
6201 	spdk_delay_us(1000);
6202 	poll_threads();
6203 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6204 	poll_threads();
6205 
6206 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6207 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6208 
6209 	ctrlr2->ns[0].uuid = &uuid1;
6210 
6211 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6212 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6213 	CU_ASSERT(rc == 0);
6214 
6215 	spdk_delay_us(1000);
6216 	poll_threads();
6217 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6218 	poll_threads();
6219 
6220 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
6221 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
6222 
6223 	ctrlr3->ns[0].uuid = &uuid1;
6224 
6225 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
6226 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6227 	CU_ASSERT(rc == 0);
6228 
6229 	spdk_delay_us(1000);
6230 	poll_threads();
6231 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6232 	poll_threads();
6233 
6234 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6235 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6236 
6237 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6238 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6239 
6240 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6241 
6242 	ch = spdk_get_io_channel(bdev);
6243 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6244 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6245 
6246 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6247 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6248 
6249 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6250 
6251 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
6252 	 * should return io_path to ctrlr2.
6253 	 */
6254 
6255 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
6256 	done = false;
6257 
6258 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6259 
6260 	poll_threads();
6261 	CU_ASSERT(done == true);
6262 
6263 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6264 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6265 
6266 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6267 
6268 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
6269 	 * acquired, find_io_path() should return io_path to ctrlr3.
6270 	 */
6271 
6272 	spdk_put_io_channel(ch);
6273 
6274 	poll_threads();
6275 
6276 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
6277 	done = false;
6278 
6279 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6280 
6281 	poll_threads();
6282 	CU_ASSERT(done == true);
6283 
6284 	ch = spdk_get_io_channel(bdev);
6285 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6286 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6287 
6288 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6289 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6290 
6291 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
6292 
6293 	spdk_put_io_channel(ch);
6294 
6295 	poll_threads();
6296 
6297 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6298 	CU_ASSERT(rc == 0);
6299 
6300 	poll_threads();
6301 	spdk_delay_us(1000);
6302 	poll_threads();
6303 
6304 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6305 }
6306 
6307 static void
6308 test_find_next_io_path(void)
6309 {
6310 	struct nvme_bdev_channel nbdev_ch = {
6311 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6312 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6313 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
6314 	};
6315 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6316 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6317 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6318 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6319 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6320 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6321 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6322 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6323 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6324 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6325 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6326 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6327 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6328 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6329 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6330 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6331 
6332 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6333 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6334 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6335 
6336 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
6337 	 * is covered in test_find_io_path.
6338 	 */
6339 
6340 	nbdev_ch.current_io_path = &io_path2;
6341 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6342 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6343 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6344 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6345 
6346 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6347 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6348 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6349 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6350 
6351 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6352 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6353 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6354 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6355 
6356 	nbdev_ch.current_io_path = &io_path3;
6357 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6358 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6359 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6360 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6361 
6362 	/* Test if next io_path is selected according to rr_min_io */
6363 
6364 	nbdev_ch.current_io_path = NULL;
6365 	nbdev_ch.rr_min_io = 2;
6366 	nbdev_ch.rr_counter = 0;
6367 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6368 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6369 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6370 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6371 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6372 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6373 
6374 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6375 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6376 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6377 }
6378 
6379 static void
6380 test_find_io_path_min_qd(void)
6381 {
6382 	struct nvme_bdev_channel nbdev_ch = {
6383 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6384 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6385 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
6386 	};
6387 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6388 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6389 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6390 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6391 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6392 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6393 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6394 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6395 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6396 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6397 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6398 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6399 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6400 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6401 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6402 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6403 
6404 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6405 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6406 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6407 
6408 	/* Test if the minimum io_outstanding or the ANA optimized state is
6409 	 * prioritized when using least queue depth selector
6410 	 */
6411 	qpair1.num_outstanding_reqs = 2;
6412 	qpair2.num_outstanding_reqs = 1;
6413 	qpair3.num_outstanding_reqs = 0;
6414 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6415 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6416 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6417 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6418 
6419 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6420 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6421 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6422 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6423 
6424 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6425 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6426 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6427 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6428 
6429 	qpair2.num_outstanding_reqs = 4;
6430 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6431 }
6432 
6433 static void
6434 test_disable_auto_failback(void)
6435 {
6436 	struct nvme_path_id path1 = {}, path2 = {};
6437 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6438 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6439 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6440 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6441 	struct nvme_ctrlr *nvme_ctrlr1;
6442 	const int STRING_SIZE = 32;
6443 	const char *attached_names[STRING_SIZE];
6444 	struct nvme_bdev *bdev;
6445 	struct spdk_io_channel *ch;
6446 	struct nvme_bdev_channel *nbdev_ch;
6447 	struct nvme_io_path *io_path;
6448 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6449 	const struct spdk_nvme_ctrlr_data *cdata;
6450 	bool done;
6451 	int rc;
6452 
6453 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6454 	ut_init_trid(&path1.trid);
6455 	ut_init_trid2(&path2.trid);
6456 	g_ut_attach_ctrlr_status = 0;
6457 	g_ut_attach_bdev_count = 1;
6458 
6459 	g_opts.disable_auto_failback = true;
6460 
6461 	opts.ctrlr_loss_timeout_sec = -1;
6462 	opts.reconnect_delay_sec = 1;
6463 	opts.multipath = true;
6464 
6465 	set_thread(0);
6466 
6467 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6468 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6469 
6470 	ctrlr1->ns[0].uuid = &uuid1;
6471 
6472 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6473 				   attach_ctrlr_done, NULL, &dopts, &opts);
6474 	CU_ASSERT(rc == 0);
6475 
6476 	spdk_delay_us(1000);
6477 	poll_threads();
6478 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6479 	poll_threads();
6480 
6481 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6482 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6483 
6484 	ctrlr2->ns[0].uuid = &uuid1;
6485 
6486 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6487 				   attach_ctrlr_done, NULL, &dopts, &opts);
6488 	CU_ASSERT(rc == 0);
6489 
6490 	spdk_delay_us(1000);
6491 	poll_threads();
6492 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6493 	poll_threads();
6494 
6495 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6496 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6497 
6498 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6499 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6500 
6501 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn);
6502 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6503 
6504 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6505 
6506 	ch = spdk_get_io_channel(bdev);
6507 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6508 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6509 
6510 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6511 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6512 
6513 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6514 
6515 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6516 	ctrlr1->fail_reset = true;
6517 	ctrlr1->is_failed = true;
6518 
6519 	bdev_nvme_reset_ctrlr(nvme_ctrlr1);
6520 
6521 	poll_threads();
6522 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6523 	poll_threads();
6524 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6525 	poll_threads();
6526 
6527 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6528 
6529 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6530 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6531 
6532 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6533 
6534 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6535 	 * Hence, io_path to ctrlr2 should still be used.
6536 	 */
6537 	ctrlr1->fail_reset = false;
6538 
6539 	spdk_delay_us(SPDK_SEC_TO_USEC);
6540 	poll_threads();
6541 
6542 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6543 
6544 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6545 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6546 
6547 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6548 
6549 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6550 	 * be used again.
6551 	 */
6552 
6553 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6554 	done = false;
6555 
6556 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6557 
6558 	poll_threads();
6559 	CU_ASSERT(done == true);
6560 
6561 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6562 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6563 
6564 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6565 
6566 	spdk_put_io_channel(ch);
6567 
6568 	poll_threads();
6569 
6570 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6571 	CU_ASSERT(rc == 0);
6572 
6573 	poll_threads();
6574 	spdk_delay_us(1000);
6575 	poll_threads();
6576 
6577 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6578 
6579 	g_opts.disable_auto_failback = false;
6580 }
6581 
6582 static void
6583 ut_set_multipath_policy_done(void *cb_arg, int rc)
6584 {
6585 	int *done = cb_arg;
6586 
6587 	SPDK_CU_ASSERT_FATAL(done != NULL);
6588 	*done = rc;
6589 }
6590 
6591 static void
6592 test_set_multipath_policy(void)
6593 {
6594 	struct nvme_path_id path1 = {}, path2 = {};
6595 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6596 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6597 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6598 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6599 	const int STRING_SIZE = 32;
6600 	const char *attached_names[STRING_SIZE];
6601 	struct nvme_bdev *bdev;
6602 	struct spdk_io_channel *ch;
6603 	struct nvme_bdev_channel *nbdev_ch;
6604 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6605 	int done;
6606 	int rc;
6607 
6608 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6609 	ut_init_trid(&path1.trid);
6610 	ut_init_trid2(&path2.trid);
6611 	g_ut_attach_ctrlr_status = 0;
6612 	g_ut_attach_bdev_count = 1;
6613 
6614 	g_opts.disable_auto_failback = true;
6615 
6616 	opts.ctrlr_loss_timeout_sec = -1;
6617 	opts.reconnect_delay_sec = 1;
6618 	opts.multipath = true;
6619 
6620 	set_thread(0);
6621 
6622 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6623 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6624 
6625 	ctrlr1->ns[0].uuid = &uuid1;
6626 
6627 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6628 				   attach_ctrlr_done, NULL, &dopts, &opts);
6629 	CU_ASSERT(rc == 0);
6630 
6631 	spdk_delay_us(1000);
6632 	poll_threads();
6633 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6634 	poll_threads();
6635 
6636 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6637 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6638 
6639 	ctrlr2->ns[0].uuid = &uuid1;
6640 
6641 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6642 				   attach_ctrlr_done, NULL, &dopts, &opts);
6643 	CU_ASSERT(rc == 0);
6644 
6645 	spdk_delay_us(1000);
6646 	poll_threads();
6647 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6648 	poll_threads();
6649 
6650 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6651 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6652 
6653 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6654 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6655 
6656 	/* If multipath policy is updated before getting any I/O channel,
6657 	 * an new I/O channel should have the update.
6658 	 */
6659 	done = -1;
6660 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6661 					    BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6662 					    ut_set_multipath_policy_done, &done);
6663 	poll_threads();
6664 	CU_ASSERT(done == 0);
6665 
6666 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6667 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6668 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6669 
6670 	ch = spdk_get_io_channel(bdev);
6671 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6672 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6673 
6674 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6675 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6676 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6677 
6678 	/* If multipath policy is updated while a I/O channel is active,
6679 	 * the update should be applied to the I/O channel immediately.
6680 	 */
6681 	done = -1;
6682 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6683 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6684 					    ut_set_multipath_policy_done, &done);
6685 	poll_threads();
6686 	CU_ASSERT(done == 0);
6687 
6688 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6689 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6690 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6691 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6692 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6693 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6694 
6695 	spdk_put_io_channel(ch);
6696 
6697 	poll_threads();
6698 
6699 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6700 	CU_ASSERT(rc == 0);
6701 
6702 	poll_threads();
6703 	spdk_delay_us(1000);
6704 	poll_threads();
6705 
6706 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6707 }
6708 
6709 static void
6710 test_uuid_generation(void)
6711 {
6712 	uint32_t nsid1 = 1, nsid2 = 2;
6713 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6714 	char sn3[21] = "                    ";
6715 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6716 	struct spdk_uuid uuid1, uuid2;
6717 	int rc;
6718 
6719 	/* Test case 1:
6720 	 * Serial numbers are the same, nsids are different.
6721 	 * Compare two generated UUID - they should be different. */
6722 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6723 	CU_ASSERT(rc == 0);
6724 	rc = nvme_generate_uuid(sn1, nsid2, &uuid2);
6725 	CU_ASSERT(rc == 0);
6726 
6727 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6728 
6729 	/* Test case 2:
6730 	 * Serial numbers differ only by one character, nsids are the same.
6731 	 * Compare two generated UUID - they should be different. */
6732 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6733 	CU_ASSERT(rc == 0);
6734 	rc = nvme_generate_uuid(sn2, nsid1, &uuid2);
6735 	CU_ASSERT(rc == 0);
6736 
6737 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6738 
6739 	/* Test case 3:
6740 	 * Serial number comprises only of space characters.
6741 	 * Validate the generated UUID. */
6742 	rc = nvme_generate_uuid(sn3, nsid1, &uuid1);
6743 	CU_ASSERT(rc == 0);
6744 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6745 
6746 }
6747 
6748 static void
6749 test_retry_io_to_same_path(void)
6750 {
6751 	struct nvme_path_id path1 = {}, path2 = {};
6752 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6753 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6754 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6755 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6756 	const int STRING_SIZE = 32;
6757 	const char *attached_names[STRING_SIZE];
6758 	struct nvme_bdev *bdev;
6759 	struct spdk_bdev_io *bdev_io;
6760 	struct nvme_bdev_io *bio;
6761 	struct spdk_io_channel *ch;
6762 	struct nvme_bdev_channel *nbdev_ch;
6763 	struct nvme_io_path *io_path1, *io_path2;
6764 	struct ut_nvme_req *req;
6765 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6766 	int done;
6767 	int rc;
6768 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6769 
6770 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6771 	bdev_opts.multipath = true;
6772 
6773 	g_opts.nvme_ioq_poll_period_us = 1;
6774 
6775 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6776 	ut_init_trid(&path1.trid);
6777 	ut_init_trid2(&path2.trid);
6778 	g_ut_attach_ctrlr_status = 0;
6779 	g_ut_attach_bdev_count = 1;
6780 
6781 	set_thread(0);
6782 
6783 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6784 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6785 
6786 	ctrlr1->ns[0].uuid = &uuid1;
6787 
6788 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6789 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6790 	CU_ASSERT(rc == 0);
6791 
6792 	spdk_delay_us(1000);
6793 	poll_threads();
6794 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6795 	poll_threads();
6796 
6797 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6798 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6799 
6800 	ctrlr2->ns[0].uuid = &uuid1;
6801 
6802 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6803 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6804 	CU_ASSERT(rc == 0);
6805 
6806 	spdk_delay_us(1000);
6807 	poll_threads();
6808 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6809 	poll_threads();
6810 
6811 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6812 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6813 
6814 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
6815 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6816 
6817 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
6818 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6819 
6820 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6821 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6822 
6823 	done = -1;
6824 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6825 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6826 	poll_threads();
6827 	CU_ASSERT(done == 0);
6828 
6829 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6830 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6831 	CU_ASSERT(bdev->rr_min_io == 1);
6832 
6833 	ch = spdk_get_io_channel(bdev);
6834 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6835 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6836 
6837 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6838 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6839 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6840 
6841 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6842 	ut_bdev_io_set_buf(bdev_io);
6843 
6844 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6845 
6846 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6847 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6848 
6849 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6850 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6851 
6852 	/* The 1st I/O should be submitted to io_path1. */
6853 	bdev_io->internal.f.in_submit_request = true;
6854 
6855 	bdev_nvme_submit_request(ch, bdev_io);
6856 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6857 	CU_ASSERT(bio->io_path == io_path1);
6858 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6859 
6860 	spdk_delay_us(1);
6861 
6862 	poll_threads();
6863 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6864 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6865 
6866 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6867 	 * policy is round-robin.
6868 	 */
6869 	bdev_io->internal.f.in_submit_request = true;
6870 
6871 	bdev_nvme_submit_request(ch, bdev_io);
6872 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6873 	CU_ASSERT(bio->io_path == io_path2);
6874 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6875 
6876 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6877 	SPDK_CU_ASSERT_FATAL(req != NULL);
6878 
6879 	/* Set retry count to non-zero. */
6880 	g_opts.bdev_retry_count = 2;
6881 
6882 	/* Inject an I/O error. */
6883 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6884 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6885 
6886 	/* The 2nd I/O should be queued to nbdev_ch. */
6887 	spdk_delay_us(1);
6888 	poll_thread_times(0, 1);
6889 
6890 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6891 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6892 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6893 
6894 	/* The 2nd I/O should keep caching io_path2. */
6895 	CU_ASSERT(bio->io_path == io_path2);
6896 
6897 	/* The 2nd I/O should be submitted to io_path2 again. */
6898 	poll_thread_times(0, 1);
6899 
6900 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6901 	CU_ASSERT(bio->io_path == io_path2);
6902 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6903 
6904 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6905 	SPDK_CU_ASSERT_FATAL(req != NULL);
6906 
6907 	/* Inject an I/O error again. */
6908 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6909 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6910 	req->cpl.status.crd = 1;
6911 
6912 	ctrlr2->cdata.crdt[1] = 1;
6913 
6914 	/* The 2nd I/O should be queued to nbdev_ch. */
6915 	spdk_delay_us(1);
6916 	poll_thread_times(0, 1);
6917 
6918 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6919 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6920 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6921 
6922 	/* The 2nd I/O should keep caching io_path2. */
6923 	CU_ASSERT(bio->io_path == io_path2);
6924 
6925 	/* Detach ctrlr2 dynamically. */
6926 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
6927 	CU_ASSERT(rc == 0);
6928 
6929 	spdk_delay_us(1000);
6930 	poll_threads();
6931 	spdk_delay_us(1000);
6932 	poll_threads();
6933 	spdk_delay_us(1000);
6934 	poll_threads();
6935 	spdk_delay_us(1000);
6936 	poll_threads();
6937 
6938 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
6939 
6940 	poll_threads();
6941 	spdk_delay_us(100000);
6942 	poll_threads();
6943 	spdk_delay_us(1);
6944 	poll_threads();
6945 
6946 	/* The 2nd I/O should succeed by io_path1. */
6947 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6948 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6949 	CU_ASSERT(bio->io_path == io_path1);
6950 
6951 	free(bdev_io);
6952 
6953 	spdk_put_io_channel(ch);
6954 
6955 	poll_threads();
6956 	spdk_delay_us(1);
6957 	poll_threads();
6958 
6959 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6960 	CU_ASSERT(rc == 0);
6961 
6962 	poll_threads();
6963 	spdk_delay_us(1000);
6964 	poll_threads();
6965 
6966 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
6967 
6968 	g_opts.nvme_ioq_poll_period_us = 0;
6969 	g_opts.bdev_retry_count = 0;
6970 }
6971 
6972 /* This case is to verify a fix for a complex race condition that
6973  * failover is lost if fabric connect command gets timeout while
6974  * controller is being reset.
6975  */
6976 static void
6977 test_race_between_reset_and_disconnected(void)
6978 {
6979 	struct spdk_nvme_transport_id trid = {};
6980 	struct spdk_nvme_ctrlr ctrlr = {};
6981 	struct nvme_ctrlr *nvme_ctrlr = NULL;
6982 	struct nvme_path_id *curr_trid;
6983 	struct spdk_io_channel *ch1, *ch2;
6984 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
6985 	int rc;
6986 
6987 	ut_init_trid(&trid);
6988 	TAILQ_INIT(&ctrlr.active_io_qpairs);
6989 
6990 	set_thread(0);
6991 
6992 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
6993 	CU_ASSERT(rc == 0);
6994 
6995 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
6996 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
6997 
6998 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
6999 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7000 
7001 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7002 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7003 
7004 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7005 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7006 
7007 	set_thread(1);
7008 
7009 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7010 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7011 
7012 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7013 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7014 
7015 	/* Reset starts from thread 1. */
7016 	set_thread(1);
7017 
7018 	nvme_ctrlr->resetting = false;
7019 	curr_trid->last_failed_tsc = spdk_get_ticks();
7020 	ctrlr.is_failed = true;
7021 
7022 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7023 	CU_ASSERT(rc == 0);
7024 	CU_ASSERT(nvme_ctrlr->resetting == true);
7025 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7026 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7027 
7028 	poll_thread_times(0, 3);
7029 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7030 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7031 
7032 	poll_thread_times(0, 1);
7033 	poll_thread_times(1, 1);
7034 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7035 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7036 	CU_ASSERT(ctrlr.is_failed == true);
7037 
7038 	poll_thread_times(1, 1);
7039 	poll_thread_times(0, 1);
7040 	CU_ASSERT(ctrlr.is_failed == false);
7041 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7042 
7043 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7044 	poll_thread_times(0, 2);
7045 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7046 
7047 	poll_thread_times(0, 1);
7048 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7049 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7050 
7051 	poll_thread_times(1, 1);
7052 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7053 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7054 	CU_ASSERT(nvme_ctrlr->resetting == true);
7055 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
7056 
7057 	poll_thread_times(0, 2);
7058 	CU_ASSERT(nvme_ctrlr->resetting == true);
7059 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7060 	poll_thread_times(1, 1);
7061 	CU_ASSERT(nvme_ctrlr->resetting == true);
7062 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7063 
7064 	/* Here is just one poll before _bdev_nvme_reset_complete() is executed.
7065 	 *
7066 	 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric
7067 	 * connect command is executed. If fabric connect command gets timeout,
7068 	 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until
7069 	 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false.
7070 	 *
7071 	 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr().
7072 	 */
7073 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
7074 	CU_ASSERT(rc == -EINPROGRESS);
7075 	CU_ASSERT(nvme_ctrlr->resetting == true);
7076 	CU_ASSERT(nvme_ctrlr->pending_failover == true);
7077 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7078 
7079 	poll_thread_times(0, 1);
7080 
7081 	CU_ASSERT(nvme_ctrlr->resetting == true);
7082 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7083 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
7084 
7085 	poll_threads();
7086 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7087 	poll_threads();
7088 
7089 	CU_ASSERT(nvme_ctrlr->resetting == false);
7090 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7091 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7092 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7093 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7094 
7095 	spdk_put_io_channel(ch2);
7096 
7097 	set_thread(0);
7098 
7099 	spdk_put_io_channel(ch1);
7100 
7101 	poll_threads();
7102 
7103 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7104 	CU_ASSERT(rc == 0);
7105 
7106 	poll_threads();
7107 	spdk_delay_us(1000);
7108 	poll_threads();
7109 
7110 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7111 }
7112 static void
7113 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
7114 {
7115 	int *_rc = (int *)cb_arg;
7116 
7117 	SPDK_CU_ASSERT_FATAL(_rc != NULL);
7118 	*_rc = rc;
7119 }
7120 
7121 static void
7122 test_ctrlr_op_rpc(void)
7123 {
7124 	struct spdk_nvme_transport_id trid = {};
7125 	struct spdk_nvme_ctrlr ctrlr = {};
7126 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7127 	struct nvme_path_id *curr_trid;
7128 	struct spdk_io_channel *ch1, *ch2;
7129 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7130 	int ctrlr_op_rc;
7131 	int rc;
7132 
7133 	ut_init_trid(&trid);
7134 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7135 
7136 	set_thread(0);
7137 
7138 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7139 	CU_ASSERT(rc == 0);
7140 
7141 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7142 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7143 
7144 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7145 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7146 
7147 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7148 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7149 
7150 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7151 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7152 
7153 	set_thread(1);
7154 
7155 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7156 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7157 
7158 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7159 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7160 
7161 	/* Reset starts from thread 1. */
7162 	set_thread(1);
7163 
7164 	/* Case 1: ctrlr is already being destructed. */
7165 	nvme_ctrlr->destruct = true;
7166 	ctrlr_op_rc = 0;
7167 
7168 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7169 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7170 
7171 	poll_threads();
7172 
7173 	CU_ASSERT(ctrlr_op_rc == -ENXIO);
7174 
7175 	/* Case 2: reset is in progress. */
7176 	nvme_ctrlr->destruct = false;
7177 	nvme_ctrlr->resetting = true;
7178 	ctrlr_op_rc = 0;
7179 
7180 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7181 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7182 
7183 	poll_threads();
7184 
7185 	CU_ASSERT(ctrlr_op_rc == -EBUSY);
7186 
7187 	/* Case 3: reset completes successfully. */
7188 	nvme_ctrlr->resetting = false;
7189 	curr_trid->last_failed_tsc = spdk_get_ticks();
7190 	ctrlr.is_failed = true;
7191 	ctrlr_op_rc = -1;
7192 
7193 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7194 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7195 
7196 	CU_ASSERT(nvme_ctrlr->resetting == true);
7197 	CU_ASSERT(ctrlr_op_rc == -1);
7198 
7199 	poll_threads();
7200 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7201 	poll_threads();
7202 
7203 	CU_ASSERT(nvme_ctrlr->resetting == false);
7204 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7205 	CU_ASSERT(ctrlr.is_failed == false);
7206 	CU_ASSERT(ctrlr_op_rc == 0);
7207 
7208 	/* Case 4: invalid operation. */
7209 	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
7210 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7211 
7212 	poll_threads();
7213 
7214 	CU_ASSERT(ctrlr_op_rc == -EINVAL);
7215 
7216 	spdk_put_io_channel(ch2);
7217 
7218 	set_thread(0);
7219 
7220 	spdk_put_io_channel(ch1);
7221 
7222 	poll_threads();
7223 
7224 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7225 	CU_ASSERT(rc == 0);
7226 
7227 	poll_threads();
7228 	spdk_delay_us(1000);
7229 	poll_threads();
7230 
7231 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7232 }
7233 
7234 static void
7235 test_bdev_ctrlr_op_rpc(void)
7236 {
7237 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
7238 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
7239 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7240 	struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL;
7241 	struct nvme_path_id *curr_trid1, *curr_trid2;
7242 	struct spdk_io_channel *ch11, *ch12, *ch21, *ch22;
7243 	struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22;
7244 	int ctrlr_op_rc;
7245 	int rc;
7246 
7247 	ut_init_trid(&trid1);
7248 	ut_init_trid2(&trid2);
7249 	TAILQ_INIT(&ctrlr1.active_io_qpairs);
7250 	TAILQ_INIT(&ctrlr2.active_io_qpairs);
7251 	ctrlr1.cdata.cmic.multi_ctrlr = 1;
7252 	ctrlr2.cdata.cmic.multi_ctrlr = 1;
7253 	ctrlr1.cdata.cntlid = 1;
7254 	ctrlr2.cdata.cntlid = 2;
7255 	ctrlr1.adminq.is_connected = true;
7256 	ctrlr2.adminq.is_connected = true;
7257 
7258 	set_thread(0);
7259 
7260 	rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL);
7261 	CU_ASSERT(rc == 0);
7262 
7263 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7264 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7265 
7266 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN);
7267 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
7268 
7269 	curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
7270 	SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL);
7271 
7272 	ch11 = spdk_get_io_channel(nvme_ctrlr1);
7273 	SPDK_CU_ASSERT_FATAL(ch11 != NULL);
7274 
7275 	ctrlr_ch11 = spdk_io_channel_get_ctx(ch11);
7276 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7277 
7278 	set_thread(1);
7279 
7280 	ch12 = spdk_get_io_channel(nvme_ctrlr1);
7281 	SPDK_CU_ASSERT_FATAL(ch12 != NULL);
7282 
7283 	ctrlr_ch12 = spdk_io_channel_get_ctx(ch12);
7284 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7285 
7286 	set_thread(0);
7287 
7288 	rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL);
7289 	CU_ASSERT(rc == 0);
7290 
7291 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN);
7292 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
7293 
7294 	curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
7295 	SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL);
7296 
7297 	ch21 = spdk_get_io_channel(nvme_ctrlr2);
7298 	SPDK_CU_ASSERT_FATAL(ch21 != NULL);
7299 
7300 	ctrlr_ch21 = spdk_io_channel_get_ctx(ch21);
7301 	CU_ASSERT(ctrlr_ch21->qpair != NULL);
7302 
7303 	set_thread(1);
7304 
7305 	ch22 = spdk_get_io_channel(nvme_ctrlr2);
7306 	SPDK_CU_ASSERT_FATAL(ch22 != NULL);
7307 
7308 	ctrlr_ch22 = spdk_io_channel_get_ctx(ch22);
7309 	CU_ASSERT(ctrlr_ch22->qpair != NULL);
7310 
7311 	/* Reset starts from thread 1. */
7312 	set_thread(1);
7313 
7314 	nvme_ctrlr1->resetting = false;
7315 	nvme_ctrlr2->resetting = false;
7316 	curr_trid1->last_failed_tsc = spdk_get_ticks();
7317 	curr_trid2->last_failed_tsc = spdk_get_ticks();
7318 	ctrlr_op_rc = -1;
7319 
7320 	nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET,
7321 			       ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7322 
7323 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7324 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7325 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7326 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7327 
7328 	poll_thread_times(0, 3);
7329 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7330 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7331 
7332 	poll_thread_times(0, 1);
7333 	poll_thread_times(1, 1);
7334 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7335 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7336 
7337 	poll_thread_times(1, 1);
7338 	poll_thread_times(0, 1);
7339 	CU_ASSERT(ctrlr1.adminq.is_connected == false);
7340 
7341 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7342 	poll_thread_times(0, 2);
7343 	CU_ASSERT(ctrlr1.adminq.is_connected == true);
7344 
7345 	poll_thread_times(0, 1);
7346 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7347 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7348 
7349 	poll_thread_times(1, 1);
7350 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7351 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7352 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7353 	CU_ASSERT(curr_trid1->last_failed_tsc != 0);
7354 
7355 	poll_thread_times(0, 2);
7356 	poll_thread_times(1, 1);
7357 	poll_thread_times(0, 1);
7358 	poll_thread_times(1, 1);
7359 	poll_thread_times(0, 1);
7360 	poll_thread_times(1, 1);
7361 	poll_thread_times(0, 1);
7362 
7363 	CU_ASSERT(nvme_ctrlr1->resetting == false);
7364 	CU_ASSERT(curr_trid1->last_failed_tsc == 0);
7365 	CU_ASSERT(nvme_ctrlr2->resetting == true);
7366 
7367 	poll_threads();
7368 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7369 	poll_threads();
7370 
7371 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7372 	CU_ASSERT(ctrlr_op_rc == 0);
7373 
7374 	set_thread(1);
7375 
7376 	spdk_put_io_channel(ch12);
7377 	spdk_put_io_channel(ch22);
7378 
7379 	set_thread(0);
7380 
7381 	spdk_put_io_channel(ch11);
7382 	spdk_put_io_channel(ch21);
7383 
7384 	poll_threads();
7385 
7386 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7387 	CU_ASSERT(rc == 0);
7388 
7389 	poll_threads();
7390 	spdk_delay_us(1000);
7391 	poll_threads();
7392 
7393 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7394 }
7395 
7396 static void
7397 test_disable_enable_ctrlr(void)
7398 {
7399 	struct spdk_nvme_transport_id trid = {};
7400 	struct spdk_nvme_ctrlr ctrlr = {};
7401 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7402 	struct nvme_path_id *curr_trid;
7403 	struct spdk_io_channel *ch1, *ch2;
7404 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7405 	int rc;
7406 
7407 	ut_init_trid(&trid);
7408 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7409 	ctrlr.adminq.is_connected = true;
7410 
7411 	set_thread(0);
7412 
7413 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7414 	CU_ASSERT(rc == 0);
7415 
7416 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7417 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7418 
7419 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7420 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7421 
7422 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7423 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7424 
7425 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7426 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7427 
7428 	set_thread(1);
7429 
7430 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7431 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7432 
7433 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7434 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7435 
7436 	/* Disable starts from thread 1. */
7437 	set_thread(1);
7438 
7439 	/* Case 1: ctrlr is already disabled. */
7440 	nvme_ctrlr->disabled = true;
7441 
7442 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7443 	CU_ASSERT(rc == -EALREADY);
7444 
7445 	/* Case 2: ctrlr is already being destructed. */
7446 	nvme_ctrlr->disabled = false;
7447 	nvme_ctrlr->destruct = true;
7448 
7449 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7450 	CU_ASSERT(rc == -ENXIO);
7451 
7452 	/* Case 3: reset is in progress. */
7453 	nvme_ctrlr->destruct = false;
7454 	nvme_ctrlr->resetting = true;
7455 
7456 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7457 	CU_ASSERT(rc == -EBUSY);
7458 
7459 	/* Case 4: disable completes successfully. */
7460 	nvme_ctrlr->resetting = false;
7461 
7462 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7463 	CU_ASSERT(rc == 0);
7464 	CU_ASSERT(nvme_ctrlr->resetting == true);
7465 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7466 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7467 
7468 	poll_thread_times(0, 3);
7469 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7470 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7471 
7472 	poll_thread_times(0, 1);
7473 	poll_thread_times(1, 1);
7474 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7475 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7476 
7477 	poll_thread_times(1, 1);
7478 	poll_thread_times(0, 1);
7479 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7480 	poll_thread_times(1, 1);
7481 	poll_thread_times(0, 1);
7482 	poll_thread_times(1, 1);
7483 	poll_thread_times(0, 1);
7484 	CU_ASSERT(nvme_ctrlr->resetting == false);
7485 	CU_ASSERT(nvme_ctrlr->disabled == true);
7486 
7487 	/* Case 5: enable completes successfully. */
7488 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7489 	CU_ASSERT(rc == 0);
7490 
7491 	CU_ASSERT(nvme_ctrlr->resetting == true);
7492 	CU_ASSERT(nvme_ctrlr->disabled == false);
7493 
7494 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7495 	poll_thread_times(0, 2);
7496 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7497 
7498 	poll_thread_times(0, 1);
7499 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7500 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7501 
7502 	poll_thread_times(1, 1);
7503 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7504 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7505 	CU_ASSERT(nvme_ctrlr->resetting == true);
7506 
7507 	poll_thread_times(0, 2);
7508 	CU_ASSERT(nvme_ctrlr->resetting == true);
7509 	poll_thread_times(1, 1);
7510 	CU_ASSERT(nvme_ctrlr->resetting == true);
7511 	poll_thread_times(0, 1);
7512 	CU_ASSERT(nvme_ctrlr->resetting == false);
7513 
7514 	/* Case 6: ctrlr is already enabled. */
7515 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7516 	CU_ASSERT(rc == -EALREADY);
7517 
7518 	set_thread(0);
7519 
7520 	/* Case 7: disable cancels delayed reconnect. */
7521 	nvme_ctrlr->opts.reconnect_delay_sec = 10;
7522 	ctrlr.fail_reset = true;
7523 
7524 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7525 	CU_ASSERT(rc == 0);
7526 
7527 	poll_threads();
7528 
7529 	CU_ASSERT(nvme_ctrlr->resetting == false);
7530 	CU_ASSERT(ctrlr.is_failed == false);
7531 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7532 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7533 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
7534 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
7535 
7536 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7537 	CU_ASSERT(rc == 0);
7538 
7539 	CU_ASSERT(nvme_ctrlr->resetting == true);
7540 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
7541 
7542 	poll_threads();
7543 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7544 	poll_threads();
7545 
7546 	CU_ASSERT(nvme_ctrlr->resetting == false);
7547 	CU_ASSERT(nvme_ctrlr->disabled == true);
7548 
7549 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7550 	CU_ASSERT(rc == 0);
7551 
7552 	CU_ASSERT(nvme_ctrlr->resetting == true);
7553 	CU_ASSERT(nvme_ctrlr->disabled == false);
7554 
7555 	poll_threads();
7556 
7557 	CU_ASSERT(nvme_ctrlr->resetting == false);
7558 
7559 	set_thread(1);
7560 
7561 	spdk_put_io_channel(ch2);
7562 
7563 	set_thread(0);
7564 
7565 	spdk_put_io_channel(ch1);
7566 
7567 	poll_threads();
7568 
7569 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7570 	CU_ASSERT(rc == 0);
7571 
7572 	poll_threads();
7573 	spdk_delay_us(1000);
7574 	poll_threads();
7575 
7576 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7577 }
7578 
7579 static void
7580 ut_delete_done(void *ctx, int rc)
7581 {
7582 	int *delete_done_rc = ctx;
7583 	*delete_done_rc = rc;
7584 }
7585 
7586 static void
7587 test_delete_ctrlr_done(void)
7588 {
7589 	struct spdk_nvme_transport_id trid = {};
7590 	struct spdk_nvme_ctrlr ctrlr = {};
7591 	int delete_done_rc = 0xDEADBEEF;
7592 	int rc;
7593 
7594 	ut_init_trid(&trid);
7595 
7596 	nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7597 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
7598 
7599 	rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc);
7600 	CU_ASSERT(rc == 0);
7601 
7602 	for (int i = 0; i < 20; i++) {
7603 		poll_threads();
7604 		if (delete_done_rc == 0) {
7605 			break;
7606 		}
7607 		spdk_delay_us(1000);
7608 	}
7609 
7610 	CU_ASSERT(delete_done_rc == 0);
7611 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7612 }
7613 
7614 static void
7615 test_ns_remove_during_reset(void)
7616 {
7617 	struct nvme_path_id path = {};
7618 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7619 	struct spdk_nvme_ctrlr *ctrlr;
7620 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7621 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7622 	struct nvme_ctrlr *nvme_ctrlr;
7623 	const int STRING_SIZE = 32;
7624 	const char *attached_names[STRING_SIZE];
7625 	struct nvme_bdev *bdev;
7626 	struct nvme_ns *nvme_ns;
7627 	union spdk_nvme_async_event_completion event = {};
7628 	struct spdk_nvme_cpl cpl = {};
7629 	int rc;
7630 
7631 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
7632 	ut_init_trid(&path.trid);
7633 
7634 	set_thread(0);
7635 
7636 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
7637 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7638 
7639 	g_ut_attach_ctrlr_status = 0;
7640 	g_ut_attach_bdev_count = 1;
7641 
7642 	opts.multipath = false;
7643 
7644 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
7645 				   attach_ctrlr_done, NULL, &dopts, &opts);
7646 	CU_ASSERT(rc == 0);
7647 
7648 	spdk_delay_us(1000);
7649 	poll_threads();
7650 
7651 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7652 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7653 
7654 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
7655 	CU_ASSERT(nvme_ctrlr != NULL);
7656 
7657 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
7658 	CU_ASSERT(bdev != NULL);
7659 
7660 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
7661 	CU_ASSERT(nvme_ns != NULL);
7662 
7663 	/* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist,
7664 	 * but nvme_ns->ns should be NULL.
7665 	 */
7666 
7667 	CU_ASSERT(ctrlr->ns[0].is_active == true);
7668 	ctrlr->ns[0].is_active = false;
7669 
7670 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7671 	CU_ASSERT(rc == 0);
7672 
7673 	poll_threads();
7674 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7675 	poll_threads();
7676 
7677 	CU_ASSERT(nvme_ctrlr->resetting == false);
7678 	CU_ASSERT(ctrlr->adminq.is_connected == true);
7679 
7680 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7681 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7682 	CU_ASSERT(nvme_ns->bdev == bdev);
7683 	CU_ASSERT(nvme_ns->ns == NULL);
7684 
7685 	/* Then, async event should fill nvme_ns->ns again. */
7686 
7687 	ctrlr->ns[0].is_active = true;
7688 
7689 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
7690 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
7691 	cpl.cdw0 = event.raw;
7692 
7693 	aer_cb(nvme_ctrlr, &cpl);
7694 
7695 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7696 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7697 	CU_ASSERT(nvme_ns->bdev == bdev);
7698 	CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]);
7699 
7700 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7701 	CU_ASSERT(rc == 0);
7702 
7703 	poll_threads();
7704 	spdk_delay_us(1000);
7705 	poll_threads();
7706 
7707 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7708 }
7709 
7710 static void
7711 test_io_path_is_current(void)
7712 {
7713 	struct nvme_bdev_channel nbdev_ch = {
7714 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
7715 	};
7716 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
7717 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
7718 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
7719 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, },
7720 	nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
7721 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {};
7722 	struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
7723 	struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
7724 	struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, };
7725 	struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, };
7726 	struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, };
7727 	struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, };
7728 	struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
7729 	struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
7730 	struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
7731 
7732 	/* io_path1 is deleting */
7733 	io_path1.nbdev_ch = NULL;
7734 
7735 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == false);
7736 
7737 	io_path1.nbdev_ch = &nbdev_ch;
7738 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
7739 	io_path2.nbdev_ch = &nbdev_ch;
7740 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
7741 	io_path3.nbdev_ch = &nbdev_ch;
7742 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
7743 
7744 	/* active/active: io_path is current if it is available and ANA optimized. */
7745 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7746 
7747 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7748 
7749 	/* active/active: io_path is not current if it is disconnected even if it is
7750 	 * ANA optimized.
7751 	 */
7752 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7753 
7754 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7755 
7756 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7757 
7758 	/* active/passive: io_path is current if it is available and cached.
7759 	 * (only ANA optimized path is cached for active/passive.)
7760 	 */
7761 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7762 	nbdev_ch.current_io_path = &io_path2;
7763 
7764 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7765 
7766 	/* active:passive: io_path is not current if it is disconnected even if it is cached */
7767 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7768 
7769 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7770 
7771 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7772 
7773 	/* active/active and active/passive: io_path is not current if it is ANA inaccessible. */
7774 	nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
7775 
7776 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7777 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7778 
7779 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7780 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7781 
7782 	/* active/active: non-optimized path is current only if there is no optimized path. */
7783 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7784 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7785 
7786 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7787 
7788 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7789 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7790 
7791 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7792 
7793 	/* active/passive: current is true if it is the first one when there is no optimized path. */
7794 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7795 	nbdev_ch.current_io_path = NULL;
7796 
7797 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == true);
7798 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7799 	CU_ASSERT(nvme_io_path_is_current(&io_path3) == false);
7800 }
7801 
7802 static void
7803 test_bdev_reset_abort_io(void)
7804 {
7805 	struct spdk_nvme_transport_id trid = {};
7806 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7807 	struct spdk_nvme_ctrlr *ctrlr;
7808 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7809 	struct nvme_ctrlr *nvme_ctrlr;
7810 	const int STRING_SIZE = 32;
7811 	const char *attached_names[STRING_SIZE];
7812 	struct nvme_bdev *bdev;
7813 	struct spdk_bdev_io *write_io, *read_io, *reset_io;
7814 	struct spdk_io_channel *ch1, *ch2;
7815 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
7816 	struct nvme_io_path *io_path1, *io_path2;
7817 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
7818 	int rc;
7819 
7820 	g_opts.bdev_retry_count = -1;
7821 
7822 	ut_init_trid(&trid);
7823 
7824 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
7825 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7826 
7827 	g_ut_attach_ctrlr_status = 0;
7828 	g_ut_attach_bdev_count = 1;
7829 
7830 	set_thread(1);
7831 
7832 	opts.ctrlr_loss_timeout_sec = -1;
7833 	opts.reconnect_delay_sec = 1;
7834 	opts.multipath = false;
7835 
7836 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
7837 				   attach_ctrlr_done, NULL, &dopts, &opts);
7838 	CU_ASSERT(rc == 0);
7839 
7840 	spdk_delay_us(1000);
7841 	poll_threads();
7842 
7843 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7844 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7845 
7846 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
7847 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
7848 
7849 	set_thread(0);
7850 
7851 	ch1 = spdk_get_io_channel(bdev);
7852 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7853 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
7854 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
7855 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
7856 	nvme_qpair1 = io_path1->qpair;
7857 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
7858 
7859 	set_thread(1);
7860 
7861 	ch2 = spdk_get_io_channel(bdev);
7862 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7863 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
7864 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
7865 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
7866 	nvme_qpair2 = io_path2->qpair;
7867 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
7868 
7869 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch1);
7870 	ut_bdev_io_set_buf(write_io);
7871 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7872 
7873 	read_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_READ, bdev, ch1);
7874 	ut_bdev_io_set_buf(read_io);
7875 	read_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7876 
7877 	reset_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
7878 
7879 	/* If qpair is disconnected, it is freed and then reconnected via resetting
7880 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
7881 	 * while resetting the nvme_ctrlr.
7882 	 */
7883 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7884 
7885 	poll_thread_times(0, 3);
7886 
7887 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7888 	CU_ASSERT(nvme_ctrlr->resetting == true);
7889 
7890 	set_thread(0);
7891 
7892 	write_io->internal.f.in_submit_request = true;
7893 
7894 	bdev_nvme_submit_request(ch1, write_io);
7895 
7896 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
7897 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
7898 
7899 	set_thread(1);
7900 
7901 	/* Submit a reset request to a bdev while resetting a nvme_ctrlr.
7902 	 * Further I/O queueing should be disabled and queued I/Os should be aborted.
7903 	 * Verify these behaviors.
7904 	 */
7905 	reset_io->internal.f.in_submit_request = true;
7906 
7907 	bdev_nvme_submit_request(ch2, reset_io);
7908 
7909 	poll_thread_times(0, 1);
7910 	poll_thread_times(1, 2);
7911 
7912 	CU_ASSERT(nbdev_ch1->resetting == true);
7913 
7914 	/* qpair1 should be still disconnected. */
7915 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7916 
7917 	set_thread(0);
7918 
7919 	read_io->internal.f.in_submit_request = true;
7920 
7921 	bdev_nvme_submit_request(ch1, read_io);
7922 
7923 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7924 
7925 	poll_thread_times(0, 1);
7926 
7927 	/* The I/O which was submitted during bdev_reset should fail immediately. */
7928 	CU_ASSERT(read_io->internal.f.in_submit_request == false);
7929 	CU_ASSERT(read_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
7930 
7931 	poll_threads();
7932 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7933 	poll_threads();
7934 
7935 	/* The completion of bdev_reset should ensure queued I/O is aborted. */
7936 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
7937 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
7938 
7939 	/* The reset request itself should complete with success. */
7940 	CU_ASSERT(reset_io->internal.f.in_submit_request == false);
7941 	CU_ASSERT(reset_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
7942 
7943 	set_thread(0);
7944 
7945 	spdk_put_io_channel(ch1);
7946 
7947 	set_thread(1);
7948 
7949 	spdk_put_io_channel(ch2);
7950 
7951 	poll_threads();
7952 
7953 	set_thread(0);
7954 
7955 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7956 	CU_ASSERT(rc == 0);
7957 
7958 	poll_threads();
7959 	spdk_delay_us(1000);
7960 	poll_threads();
7961 
7962 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7963 
7964 	free(write_io);
7965 	free(read_io);
7966 	free(reset_io);
7967 
7968 	g_opts.bdev_retry_count = 0;
7969 }
7970 
7971 int
7972 main(int argc, char **argv)
7973 {
7974 	CU_pSuite	suite = NULL;
7975 	unsigned int	num_failures;
7976 
7977 	CU_initialize_registry();
7978 
7979 	suite = CU_add_suite("nvme", NULL, NULL);
7980 
7981 	CU_ADD_TEST(suite, test_create_ctrlr);
7982 	CU_ADD_TEST(suite, test_reset_ctrlr);
7983 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
7984 	CU_ADD_TEST(suite, test_failover_ctrlr);
7985 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
7986 	CU_ADD_TEST(suite, test_pending_reset);
7987 	CU_ADD_TEST(suite, test_attach_ctrlr);
7988 	CU_ADD_TEST(suite, test_aer_cb);
7989 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
7990 	CU_ADD_TEST(suite, test_add_remove_trid);
7991 	CU_ADD_TEST(suite, test_abort);
7992 	CU_ADD_TEST(suite, test_get_io_qpair);
7993 	CU_ADD_TEST(suite, test_bdev_unregister);
7994 	CU_ADD_TEST(suite, test_compare_ns);
7995 	CU_ADD_TEST(suite, test_init_ana_log_page);
7996 	CU_ADD_TEST(suite, test_get_memory_domains);
7997 	CU_ADD_TEST(suite, test_reconnect_qpair);
7998 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
7999 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
8000 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
8001 	CU_ADD_TEST(suite, test_admin_path);
8002 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
8003 	CU_ADD_TEST(suite, test_find_io_path);
8004 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
8005 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
8006 	CU_ADD_TEST(suite, test_retry_io_count);
8007 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
8008 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
8009 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
8010 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
8011 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
8012 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
8013 	CU_ADD_TEST(suite, test_fail_path);
8014 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
8015 	CU_ADD_TEST(suite, test_ana_transition);
8016 	CU_ADD_TEST(suite, test_set_preferred_path);
8017 	CU_ADD_TEST(suite, test_find_next_io_path);
8018 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
8019 	CU_ADD_TEST(suite, test_disable_auto_failback);
8020 	CU_ADD_TEST(suite, test_set_multipath_policy);
8021 	CU_ADD_TEST(suite, test_uuid_generation);
8022 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
8023 	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
8024 	CU_ADD_TEST(suite, test_ctrlr_op_rpc);
8025 	CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc);
8026 	CU_ADD_TEST(suite, test_disable_enable_ctrlr);
8027 	CU_ADD_TEST(suite, test_delete_ctrlr_done);
8028 	CU_ADD_TEST(suite, test_ns_remove_during_reset);
8029 	CU_ADD_TEST(suite, test_io_path_is_current);
8030 	CU_ADD_TEST(suite, test_bdev_reset_abort_io);
8031 
8032 	allocate_threads(3);
8033 	set_thread(0);
8034 	bdev_nvme_library_init();
8035 	init_accel();
8036 
8037 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
8038 
8039 	set_thread(0);
8040 	bdev_nvme_library_fini();
8041 	fini_accel();
8042 	free_threads();
8043 
8044 	CU_cleanup_registry();
8045 
8046 	return num_failures;
8047 }
8048