xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 83ba9086796471697a4975a58f60e2392bccd08c)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 
12 #include "common/lib/ut_multithread.c"
13 
14 #include "bdev/nvme/bdev_nvme.c"
15 
16 #include "unit/lib/json_mock.c"
17 
18 #include "bdev/nvme/bdev_mdns_client.c"
19 
20 static void *g_accel_p = (void *)0xdeadbeaf;
21 
22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
23 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
24 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
25 	     spdk_nvme_remove_cb remove_cb), NULL);
26 
27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
28 		enum spdk_nvme_transport_type trtype));
29 
30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
31 	    NULL);
32 
33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
34 
35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
36 		struct spdk_nvme_transport_id *trid), 0);
37 
38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
39 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
40 
41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0);
43 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr),
44 	    SPDK_ENV_NUMA_ID_ANY);
45 
46 DEFINE_STUB(spdk_nvme_qpair_get_id, uint16_t, (struct spdk_nvme_qpair *qpair), 0);
47 
48 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
49 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
50 
51 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
52 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
53 
54 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
55 
56 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
57 		int error_code, const char *msg));
58 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *,
59 	    (struct spdk_jsonrpc_request *request), NULL);
60 DEFINE_STUB_V(spdk_jsonrpc_end_result,
61 	      (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w));
62 
63 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
64 		size_t opts_size));
65 
66 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
67 		size_t opts_size), 0);
68 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL);
69 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL);
70 
71 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
72 
73 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
74 					enum spdk_bdev_reset_stat_mode mode));
75 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
76 				      struct spdk_bdev_io_stat *add));
77 
78 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr));
79 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL);
80 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k));
81 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL);
82 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0);
83 
84 int
85 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
86 				   struct spdk_memory_domain **domains, int array_size)
87 {
88 	int i, min_array_size;
89 
90 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
91 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
92 		for (i = 0; i < min_array_size; i++) {
93 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
94 		}
95 	}
96 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
97 
98 	return 0;
99 }
100 
101 struct spdk_io_channel *
102 spdk_accel_get_io_channel(void)
103 {
104 	return spdk_get_io_channel(g_accel_p);
105 }
106 
107 void
108 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
109 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
110 {
111 	/* Avoid warning that opts is used uninitialised */
112 	memset(opts, 0, opts_size);
113 }
114 
115 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"
116 
117 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN};
118 
119 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
120 	    (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts);
121 
122 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
123 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
124 
125 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
126 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
127 
128 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
129 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
130 
131 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
132 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
133 
134 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
135 
136 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
137 
138 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
139 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
140 
141 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
142 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
143 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
144 
145 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
146 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
147 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
148 
149 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, (
150 		    struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
151 		    struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf,
152 		    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
153 		    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
154 		    spdk_nvme_req_next_sge_cb next_sge_fn), 0);
155 
156 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
157 		size_t *size), 0);
158 
159 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
160 
161 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
162 
163 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
164 
165 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
166 
167 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns),
168 	    SPDK_NVME_16B_GUARD_PI);
169 
170 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
171 
172 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
173 
174 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
175 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
176 
177 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
178 
179 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
180 		char *name, size_t *size), 0);
181 
182 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
183 	    (struct spdk_nvme_ns *ns), 0);
184 
185 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
186 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
187 
188 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
189 	    (struct spdk_nvme_ns *ns), 0);
190 
191 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
192 	    (struct spdk_nvme_ns *ns), 0);
193 
194 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
195 	    (struct spdk_nvme_ns *ns), 0);
196 
197 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
198 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
199 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
200 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
201 
202 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
203 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
204 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
205 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
206 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
207 
208 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
209 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
210 	     void *payload, uint32_t payload_size, uint64_t slba,
211 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
212 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
213 
214 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
215 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
216 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
217 
218 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
219 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
220 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
221 
222 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
223 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
224 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
225 
226 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
227 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
228 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
229 
230 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
231 
232 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
233 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
234 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
235 
236 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
237 	    (const struct spdk_nvme_status *status), NULL);
238 
239 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
240 	    (const struct spdk_nvme_status *status), NULL);
241 
242 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
243 
244 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
245 
246 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
247 
248 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
249 
250 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
251 
252 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
253 		struct iovec *iov,
254 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
255 DEFINE_STUB(spdk_accel_append_crc32c, int,
256 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst,
257 	     struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx,
258 	     uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
259 DEFINE_STUB(spdk_accel_append_copy, int,
260 	    (struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
261 	     struct iovec *dst_iovs, uint32_t dst_iovcnt,
262 	     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
263 	     struct iovec *src_iovs, uint32_t src_iovcnt,
264 	     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
265 	     spdk_accel_step_cb cb_fn, void *cb_arg), 0);
266 DEFINE_STUB_V(spdk_accel_sequence_finish,
267 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
268 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
269 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
270 DEFINE_STUB(spdk_nvme_qpair_authenticate, int,
271 	    (struct spdk_nvme_qpair *qpair, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
272 DEFINE_STUB(spdk_nvme_ctrlr_authenticate, int,
273 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
274 DEFINE_STUB(spdk_nvme_ctrlr_set_keys, int,
275 	    (struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr_key_opts *opts), 0);
276 
277 struct ut_nvme_req {
278 	uint16_t			opc;
279 	spdk_nvme_cmd_cb		cb_fn;
280 	void				*cb_arg;
281 	struct spdk_nvme_cpl		cpl;
282 	TAILQ_ENTRY(ut_nvme_req)	tailq;
283 };
284 
285 struct spdk_nvme_ns {
286 	struct spdk_nvme_ctrlr		*ctrlr;
287 	uint32_t			id;
288 	bool				is_active;
289 	struct spdk_uuid		*uuid;
290 	enum spdk_nvme_ana_state	ana_state;
291 	enum spdk_nvme_csi		csi;
292 };
293 
294 struct spdk_nvme_qpair {
295 	struct spdk_nvme_ctrlr		*ctrlr;
296 	uint8_t				failure_reason;
297 	bool				is_connected;
298 	bool				in_completion_context;
299 	bool				delete_after_completion_context;
300 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
301 	uint32_t			num_outstanding_reqs;
302 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
303 	struct spdk_nvme_poll_group	*poll_group;
304 	void				*poll_group_tailq_head;
305 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
306 };
307 
308 struct spdk_nvme_ctrlr {
309 	uint32_t			num_ns;
310 	struct spdk_nvme_ns		*ns;
311 	struct spdk_nvme_ns_data	*nsdata;
312 	struct spdk_nvme_qpair		adminq;
313 	struct spdk_nvme_ctrlr_data	cdata;
314 	bool				attached;
315 	bool				is_failed;
316 	bool				fail_reset;
317 	bool				is_removed;
318 	struct spdk_nvme_transport_id	trid;
319 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
320 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
321 	struct spdk_nvme_ctrlr_opts	opts;
322 };
323 
324 struct spdk_nvme_poll_group {
325 	void				*ctx;
326 	struct spdk_nvme_accel_fn_table	accel_fn_table;
327 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
328 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
329 };
330 
331 struct spdk_nvme_probe_ctx {
332 	struct spdk_nvme_transport_id	trid;
333 	void				*cb_ctx;
334 	spdk_nvme_attach_cb		attach_cb;
335 	struct spdk_nvme_ctrlr		*init_ctrlr;
336 };
337 
338 uint32_t
339 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
340 {
341 	uint32_t nsid;
342 
343 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
344 		if (ctrlr->ns[nsid - 1].is_active) {
345 			return nsid;
346 		}
347 	}
348 
349 	return 0;
350 }
351 
352 uint32_t
353 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
354 {
355 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
356 		if (ctrlr->ns[nsid - 1].is_active) {
357 			return nsid;
358 		}
359 	}
360 
361 	return 0;
362 }
363 
364 uint32_t
365 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
366 {
367 	return qpair->num_outstanding_reqs;
368 }
369 
370 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
371 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
372 			g_ut_attached_ctrlrs);
373 static int g_ut_attach_ctrlr_status;
374 static size_t g_ut_attach_bdev_count;
375 static int g_ut_register_bdev_status;
376 static struct spdk_bdev *g_ut_registered_bdev;
377 static uint16_t g_ut_cntlid;
378 static struct nvme_path_id g_any_path = {};
379 
380 static void
381 ut_init_trid(struct spdk_nvme_transport_id *trid)
382 {
383 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
384 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
385 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
386 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
387 }
388 
389 static void
390 ut_init_trid2(struct spdk_nvme_transport_id *trid)
391 {
392 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
393 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
394 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
395 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
396 }
397 
398 static void
399 ut_init_trid3(struct spdk_nvme_transport_id *trid)
400 {
401 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
402 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
403 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
404 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
405 }
406 
407 static int
408 cmp_int(int a, int b)
409 {
410 	return a - b;
411 }
412 
413 int
414 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
415 			       const struct spdk_nvme_transport_id *trid2)
416 {
417 	int cmp;
418 
419 	/* We assume trtype is TCP for now. */
420 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
421 
422 	cmp = cmp_int(trid1->trtype, trid2->trtype);
423 	if (cmp) {
424 		return cmp;
425 	}
426 
427 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
428 	if (cmp) {
429 		return cmp;
430 	}
431 
432 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
433 	if (cmp) {
434 		return cmp;
435 	}
436 
437 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
438 	if (cmp) {
439 		return cmp;
440 	}
441 
442 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
443 	if (cmp) {
444 		return cmp;
445 	}
446 
447 	return 0;
448 }
449 
450 static struct spdk_nvme_ctrlr *
451 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
452 		bool ana_reporting, bool multipath)
453 {
454 	struct spdk_nvme_ctrlr *ctrlr;
455 	uint32_t i;
456 
457 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
458 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
459 			/* There is a ctrlr whose trid matches. */
460 			return NULL;
461 		}
462 	}
463 
464 	ctrlr = calloc(1, sizeof(*ctrlr));
465 	if (ctrlr == NULL) {
466 		return NULL;
467 	}
468 
469 	ctrlr->attached = true;
470 	ctrlr->adminq.ctrlr = ctrlr;
471 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
472 	ctrlr->adminq.is_connected = true;
473 
474 	if (num_ns != 0) {
475 		ctrlr->num_ns = num_ns;
476 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
477 		if (ctrlr->ns == NULL) {
478 			free(ctrlr);
479 			return NULL;
480 		}
481 
482 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
483 		if (ctrlr->nsdata == NULL) {
484 			free(ctrlr->ns);
485 			free(ctrlr);
486 			return NULL;
487 		}
488 
489 		for (i = 0; i < num_ns; i++) {
490 			ctrlr->ns[i].id = i + 1;
491 			ctrlr->ns[i].ctrlr = ctrlr;
492 			ctrlr->ns[i].is_active = true;
493 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
494 			ctrlr->nsdata[i].nsze = 1024;
495 			ctrlr->nsdata[i].nmic.can_share = multipath;
496 		}
497 
498 		ctrlr->cdata.nn = num_ns;
499 		ctrlr->cdata.mnan = num_ns;
500 		ctrlr->cdata.nanagrpid = num_ns;
501 	}
502 
503 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
504 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
505 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
506 	ctrlr->trid = *trid;
507 	TAILQ_INIT(&ctrlr->active_io_qpairs);
508 
509 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
510 
511 	return ctrlr;
512 }
513 
514 static void
515 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
516 {
517 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
518 
519 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
520 	free(ctrlr->nsdata);
521 	free(ctrlr->ns);
522 	free(ctrlr);
523 }
524 
525 static int
526 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
527 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
528 {
529 	struct ut_nvme_req *req;
530 
531 	req = calloc(1, sizeof(*req));
532 	if (req == NULL) {
533 		return -ENOMEM;
534 	}
535 
536 	req->opc = opc;
537 	req->cb_fn = cb_fn;
538 	req->cb_arg = cb_arg;
539 
540 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
541 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
542 
543 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
544 	qpair->num_outstanding_reqs++;
545 
546 	return 0;
547 }
548 
549 static struct ut_nvme_req *
550 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
551 {
552 	struct ut_nvme_req *req;
553 
554 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
555 		if (req->cb_arg == cb_arg) {
556 			break;
557 		}
558 	}
559 
560 	return req;
561 }
562 
563 static struct spdk_bdev_io *
564 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
565 		 struct spdk_io_channel *ch)
566 {
567 	struct spdk_bdev_io *bdev_io;
568 
569 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
570 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
571 	bdev_io->type = type;
572 	bdev_io->bdev = &nbdev->disk;
573 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
574 
575 	return bdev_io;
576 }
577 
578 static void
579 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
580 {
581 	bdev_io->u.bdev.iovs = &bdev_io->iov;
582 	bdev_io->u.bdev.iovcnt = 1;
583 
584 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
585 	bdev_io->iov.iov_len = 4096;
586 }
587 
588 static void
589 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
590 {
591 	if (ctrlr->is_failed) {
592 		free(ctrlr);
593 		return;
594 	}
595 
596 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
597 	if (probe_ctx->cb_ctx) {
598 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
599 	}
600 
601 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
602 
603 	if (probe_ctx->attach_cb) {
604 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
605 	}
606 }
607 
608 int
609 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
610 {
611 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
612 
613 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
614 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
615 			continue;
616 		}
617 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
618 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
619 	}
620 
621 	free(probe_ctx);
622 
623 	return 0;
624 }
625 
626 struct spdk_nvme_probe_ctx *
627 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
628 			const struct spdk_nvme_ctrlr_opts *opts,
629 			spdk_nvme_attach_cb attach_cb)
630 {
631 	struct spdk_nvme_probe_ctx *probe_ctx;
632 
633 	if (trid == NULL) {
634 		return NULL;
635 	}
636 
637 	probe_ctx = calloc(1, sizeof(*probe_ctx));
638 	if (probe_ctx == NULL) {
639 		return NULL;
640 	}
641 
642 	probe_ctx->trid = *trid;
643 	probe_ctx->cb_ctx = (void *)opts;
644 	probe_ctx->attach_cb = attach_cb;
645 
646 	return probe_ctx;
647 }
648 
649 int
650 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
651 {
652 	if (ctrlr->attached) {
653 		ut_detach_ctrlr(ctrlr);
654 	}
655 
656 	return 0;
657 }
658 
659 int
660 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
661 {
662 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
663 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
664 
665 	return 0;
666 }
667 
668 int
669 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
670 {
671 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
672 }
673 
674 void
675 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
676 {
677 	memset(opts, 0, opts_size);
678 
679 	snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN);
680 }
681 
682 const struct spdk_nvme_ctrlr_data *
683 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
684 {
685 	return &ctrlr->cdata;
686 }
687 
688 uint16_t
689 spdk_nvme_ctrlr_get_id(struct spdk_nvme_ctrlr *ctrlr)
690 {
691 	return ctrlr->cdata.cntlid;
692 }
693 
694 uint32_t
695 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
696 {
697 	return ctrlr->num_ns;
698 }
699 
700 struct spdk_nvme_ns *
701 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
702 {
703 	if (nsid < 1 || nsid > ctrlr->num_ns) {
704 		return NULL;
705 	}
706 
707 	return &ctrlr->ns[nsid - 1];
708 }
709 
710 bool
711 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
712 {
713 	if (nsid < 1 || nsid > ctrlr->num_ns) {
714 		return false;
715 	}
716 
717 	return ctrlr->ns[nsid - 1].is_active;
718 }
719 
720 union spdk_nvme_csts_register
721 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
722 {
723 	union spdk_nvme_csts_register csts;
724 
725 	csts.raw = 0;
726 
727 	return csts;
728 }
729 
730 union spdk_nvme_vs_register
731 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
732 {
733 	union spdk_nvme_vs_register vs;
734 
735 	vs.raw = 0;
736 
737 	return vs;
738 }
739 
740 struct spdk_nvme_qpair *
741 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
742 			       const struct spdk_nvme_io_qpair_opts *user_opts,
743 			       size_t opts_size)
744 {
745 	struct spdk_nvme_qpair *qpair;
746 
747 	qpair = calloc(1, sizeof(*qpair));
748 	if (qpair == NULL) {
749 		return NULL;
750 	}
751 
752 	qpair->ctrlr = ctrlr;
753 	TAILQ_INIT(&qpair->outstanding_reqs);
754 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
755 
756 	return qpair;
757 }
758 
759 static void
760 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
761 {
762 	struct spdk_nvme_poll_group *group = qpair->poll_group;
763 
764 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
765 
766 	qpair->poll_group_tailq_head = &group->connected_qpairs;
767 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
768 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
769 }
770 
771 static void
772 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
773 {
774 	struct spdk_nvme_poll_group *group = qpair->poll_group;
775 
776 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
777 
778 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
779 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
780 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
781 }
782 
783 int
784 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
785 				 struct spdk_nvme_qpair *qpair)
786 {
787 	if (qpair->is_connected) {
788 		return -EISCONN;
789 	}
790 
791 	qpair->is_connected = true;
792 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
793 
794 	if (qpair->poll_group) {
795 		nvme_poll_group_connect_qpair(qpair);
796 	}
797 
798 	return 0;
799 }
800 
801 void
802 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
803 {
804 	if (!qpair->is_connected) {
805 		return;
806 	}
807 
808 	qpair->is_connected = false;
809 
810 	if (qpair->poll_group != NULL) {
811 		nvme_poll_group_disconnect_qpair(qpair);
812 	}
813 }
814 
815 int
816 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
817 {
818 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
819 
820 	if (qpair->in_completion_context) {
821 		qpair->delete_after_completion_context = true;
822 		return 0;
823 	}
824 
825 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
826 
827 	if (qpair->poll_group != NULL) {
828 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
829 	}
830 
831 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
832 
833 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
834 
835 	free(qpair);
836 
837 	return 0;
838 }
839 
840 int
841 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
842 {
843 	if (ctrlr->fail_reset) {
844 		ctrlr->is_failed = true;
845 		return -EIO;
846 	}
847 
848 	ctrlr->adminq.is_connected = true;
849 	return 0;
850 }
851 
852 void
853 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
854 {
855 }
856 
857 int
858 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
859 {
860 	if (ctrlr->is_removed) {
861 		return -ENXIO;
862 	}
863 
864 	ctrlr->adminq.is_connected = false;
865 	ctrlr->is_failed = false;
866 
867 	return 0;
868 }
869 
870 void
871 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
872 {
873 	ctrlr->is_failed = true;
874 }
875 
876 bool
877 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
878 {
879 	return ctrlr->is_failed;
880 }
881 
882 spdk_nvme_qp_failure_reason
883 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
884 {
885 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
886 }
887 
888 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
889 				 sizeof(uint32_t))
890 static void
891 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
892 {
893 	struct spdk_nvme_ana_page ana_hdr;
894 	char _ana_desc[UT_ANA_DESC_SIZE];
895 	struct spdk_nvme_ana_group_descriptor *ana_desc;
896 	struct spdk_nvme_ns *ns;
897 	uint32_t i;
898 
899 	memset(&ana_hdr, 0, sizeof(ana_hdr));
900 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
901 
902 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
903 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
904 
905 	buf += sizeof(ana_hdr);
906 	length -= sizeof(ana_hdr);
907 
908 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
909 
910 	for (i = 0; i < ctrlr->num_ns; i++) {
911 		ns = &ctrlr->ns[i];
912 
913 		if (!ns->is_active) {
914 			continue;
915 		}
916 
917 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
918 
919 		ana_desc->ana_group_id = ns->id;
920 		ana_desc->num_of_nsid = 1;
921 		ana_desc->ana_state = ns->ana_state;
922 		ana_desc->nsid[0] = ns->id;
923 
924 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
925 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
926 
927 		buf += UT_ANA_DESC_SIZE;
928 		length -= UT_ANA_DESC_SIZE;
929 	}
930 }
931 
932 int
933 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
934 				 uint8_t log_page, uint32_t nsid,
935 				 void *payload, uint32_t payload_size,
936 				 uint64_t offset,
937 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
938 {
939 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
940 		SPDK_CU_ASSERT_FATAL(offset == 0);
941 		ut_create_ana_log_page(ctrlr, payload, payload_size);
942 	}
943 
944 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
945 				      cb_fn, cb_arg);
946 }
947 
948 int
949 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
950 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
951 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
952 {
953 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
954 }
955 
956 int
957 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
958 			      void *cmd_cb_arg,
959 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
960 {
961 	struct ut_nvme_req *req = NULL, *abort_req;
962 
963 	if (qpair == NULL) {
964 		qpair = &ctrlr->adminq;
965 	}
966 
967 	abort_req = calloc(1, sizeof(*abort_req));
968 	if (abort_req == NULL) {
969 		return -ENOMEM;
970 	}
971 
972 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
973 		if (req->cb_arg == cmd_cb_arg) {
974 			break;
975 		}
976 	}
977 
978 	if (req == NULL) {
979 		free(abort_req);
980 		return -ENOENT;
981 	}
982 
983 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
984 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
985 
986 	abort_req->opc = SPDK_NVME_OPC_ABORT;
987 	abort_req->cb_fn = cb_fn;
988 	abort_req->cb_arg = cb_arg;
989 
990 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
991 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
992 	abort_req->cpl.cdw0 = 0;
993 
994 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
995 	ctrlr->adminq.num_outstanding_reqs++;
996 
997 	return 0;
998 }
999 
1000 int32_t
1001 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
1002 {
1003 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
1004 }
1005 
1006 uint32_t
1007 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
1008 {
1009 	return ns->id;
1010 }
1011 
1012 struct spdk_nvme_ctrlr *
1013 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
1014 {
1015 	return ns->ctrlr;
1016 }
1017 
1018 static inline struct spdk_nvme_ns_data *
1019 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
1020 {
1021 	return &ns->ctrlr->nsdata[ns->id - 1];
1022 }
1023 
1024 const struct spdk_nvme_ns_data *
1025 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
1026 {
1027 	return _nvme_ns_get_data(ns);
1028 }
1029 
1030 uint64_t
1031 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
1032 {
1033 	return _nvme_ns_get_data(ns)->nsze;
1034 }
1035 
1036 const struct spdk_uuid *
1037 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
1038 {
1039 	return ns->uuid;
1040 }
1041 
1042 enum spdk_nvme_csi
1043 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
1044 	return ns->csi;
1045 }
1046 
1047 int
1048 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1049 			      void *metadata, uint64_t lba, uint32_t lba_count,
1050 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1051 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1052 {
1053 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1054 }
1055 
1056 int
1057 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1058 			       void *buffer, void *metadata, uint64_t lba,
1059 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1060 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1061 {
1062 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1063 }
1064 
1065 int
1066 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1067 			       uint64_t lba, uint32_t lba_count,
1068 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1069 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1070 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1071 			       uint16_t apptag_mask, uint16_t apptag)
1072 {
1073 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1074 }
1075 
1076 int
1077 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1078 				uint64_t lba, uint32_t lba_count,
1079 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1080 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1081 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1082 				uint16_t apptag_mask, uint16_t apptag)
1083 {
1084 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1085 }
1086 
1087 static bool g_ut_readv_ext_called;
1088 int
1089 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1090 			   uint64_t lba, uint32_t lba_count,
1091 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1092 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1093 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1094 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1095 {
1096 	g_ut_readv_ext_called = true;
1097 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1098 }
1099 
1100 static bool g_ut_read_ext_called;
1101 int
1102 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1103 			  uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1104 			  struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1105 {
1106 	g_ut_read_ext_called = true;
1107 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1108 }
1109 
1110 static bool g_ut_writev_ext_called;
1111 int
1112 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1113 			    uint64_t lba, uint32_t lba_count,
1114 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1115 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1116 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1117 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1118 {
1119 	g_ut_writev_ext_called = true;
1120 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1121 }
1122 
1123 static bool g_ut_write_ext_called;
1124 int
1125 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1126 			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1127 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1128 {
1129 	g_ut_write_ext_called = true;
1130 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1131 }
1132 
1133 int
1134 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1135 				  uint64_t lba, uint32_t lba_count,
1136 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1137 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1138 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1139 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1140 {
1141 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1142 }
1143 
1144 int
1145 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1146 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1147 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1148 {
1149 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1150 }
1151 
1152 int
1153 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1154 			      uint64_t lba, uint32_t lba_count,
1155 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1156 			      uint32_t io_flags)
1157 {
1158 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1159 }
1160 
1161 int
1162 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1163 		      const struct spdk_nvme_scc_source_range *ranges,
1164 		      uint16_t num_ranges, uint64_t dest_lba,
1165 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1166 {
1167 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1168 }
1169 
1170 struct spdk_nvme_poll_group *
1171 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1172 {
1173 	struct spdk_nvme_poll_group *group;
1174 
1175 	group = calloc(1, sizeof(*group));
1176 	if (group == NULL) {
1177 		return NULL;
1178 	}
1179 
1180 	group->ctx = ctx;
1181 	if (table != NULL) {
1182 		group->accel_fn_table = *table;
1183 	}
1184 	TAILQ_INIT(&group->connected_qpairs);
1185 	TAILQ_INIT(&group->disconnected_qpairs);
1186 
1187 	return group;
1188 }
1189 
1190 int
1191 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1192 {
1193 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1194 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1195 		return -EBUSY;
1196 	}
1197 
1198 	free(group);
1199 
1200 	return 0;
1201 }
1202 
1203 spdk_nvme_qp_failure_reason
1204 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1205 {
1206 	return qpair->failure_reason;
1207 }
1208 
1209 bool
1210 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair)
1211 {
1212 	return qpair->is_connected;
1213 }
1214 
1215 int32_t
1216 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1217 				    uint32_t max_completions)
1218 {
1219 	struct ut_nvme_req *req, *tmp;
1220 	uint32_t num_completions = 0;
1221 
1222 	if (!qpair->is_connected) {
1223 		return -ENXIO;
1224 	}
1225 
1226 	qpair->in_completion_context = true;
1227 
1228 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1229 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1230 		qpair->num_outstanding_reqs--;
1231 
1232 		req->cb_fn(req->cb_arg, &req->cpl);
1233 
1234 		free(req);
1235 		num_completions++;
1236 	}
1237 
1238 	qpair->in_completion_context = false;
1239 	if (qpair->delete_after_completion_context) {
1240 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1241 	}
1242 
1243 	return num_completions;
1244 }
1245 
1246 int64_t
1247 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1248 		uint32_t completions_per_qpair,
1249 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1250 {
1251 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1252 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1253 
1254 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1255 
1256 	if (disconnected_qpair_cb == NULL) {
1257 		return -EINVAL;
1258 	}
1259 
1260 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1261 		disconnected_qpair_cb(qpair, group->ctx);
1262 	}
1263 
1264 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1265 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1266 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1267 			/* Bump the number of completions so this counts as "busy" */
1268 			num_completions++;
1269 			continue;
1270 		}
1271 
1272 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1273 				    completions_per_qpair);
1274 		if (local_completions < 0 && error_reason == 0) {
1275 			error_reason = local_completions;
1276 		} else {
1277 			num_completions += local_completions;
1278 			assert(num_completions >= 0);
1279 		}
1280 	}
1281 
1282 	return error_reason ? error_reason : num_completions;
1283 }
1284 
1285 int
1286 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1287 			 struct spdk_nvme_qpair *qpair)
1288 {
1289 	CU_ASSERT(!qpair->is_connected);
1290 
1291 	qpair->poll_group = group;
1292 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1293 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1294 
1295 	return 0;
1296 }
1297 
1298 int
1299 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1300 			    struct spdk_nvme_qpair *qpair)
1301 {
1302 	CU_ASSERT(!qpair->is_connected);
1303 
1304 	if (qpair->poll_group == NULL) {
1305 		return -ENOENT;
1306 	}
1307 
1308 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1309 
1310 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1311 
1312 	qpair->poll_group = NULL;
1313 	qpair->poll_group_tailq_head = NULL;
1314 
1315 	return 0;
1316 }
1317 
1318 int
1319 spdk_bdev_register(struct spdk_bdev *bdev)
1320 {
1321 	g_ut_registered_bdev = bdev;
1322 
1323 	return g_ut_register_bdev_status;
1324 }
1325 
1326 void
1327 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1328 {
1329 	int rc;
1330 
1331 	rc = bdev->fn_table->destruct(bdev->ctxt);
1332 
1333 	if (bdev == g_ut_registered_bdev) {
1334 		g_ut_registered_bdev = NULL;
1335 	}
1336 
1337 	if (rc <= 0 && cb_fn != NULL) {
1338 		cb_fn(cb_arg, rc);
1339 	}
1340 }
1341 
1342 int
1343 spdk_bdev_open_ext(const char *bdev_name, bool write,
1344 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1345 		   struct spdk_bdev_desc **desc)
1346 {
1347 	if (g_ut_registered_bdev == NULL ||
1348 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1349 		return -ENODEV;
1350 	}
1351 
1352 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1353 
1354 	return 0;
1355 }
1356 
1357 struct spdk_bdev *
1358 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1359 {
1360 	return (struct spdk_bdev *)desc;
1361 }
1362 
1363 int
1364 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1365 {
1366 	bdev->blockcnt = size;
1367 
1368 	return 0;
1369 }
1370 
1371 struct spdk_io_channel *
1372 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1373 {
1374 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1375 }
1376 
1377 struct spdk_thread *
1378 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
1379 {
1380 	return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io));
1381 }
1382 
1383 void
1384 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1385 {
1386 	bdev_io->internal.status = status;
1387 	bdev_io->internal.f.in_submit_request = false;
1388 }
1389 
1390 void
1391 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1392 {
1393 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1394 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1395 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1396 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1397 	} else {
1398 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1399 	}
1400 
1401 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1402 	bdev_io->internal.error.nvme.sct = sct;
1403 	bdev_io->internal.error.nvme.sc = sc;
1404 
1405 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1406 }
1407 
1408 void
1409 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1410 {
1411 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1412 
1413 	ut_bdev_io_set_buf(bdev_io);
1414 
1415 	cb(ch, bdev_io, true);
1416 }
1417 
1418 static void
1419 test_create_ctrlr(void)
1420 {
1421 	struct spdk_nvme_transport_id trid = {};
1422 	struct spdk_nvme_ctrlr ctrlr = {};
1423 	int rc;
1424 
1425 	ut_init_trid(&trid);
1426 
1427 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1428 	CU_ASSERT(rc == 0);
1429 
1430 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1431 
1432 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1433 	CU_ASSERT(rc == 0);
1434 
1435 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1436 
1437 	poll_threads();
1438 	spdk_delay_us(1000);
1439 	poll_threads();
1440 
1441 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1442 }
1443 
1444 static void
1445 ut_check_hotplug_on_reset(void *cb_arg, int rc)
1446 {
1447 	bool *detect_remove = cb_arg;
1448 
1449 	CU_ASSERT(rc != 0);
1450 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1451 
1452 	*detect_remove = true;
1453 }
1454 
1455 static void
1456 test_reset_ctrlr(void)
1457 {
1458 	struct spdk_nvme_transport_id trid = {};
1459 	struct spdk_nvme_ctrlr ctrlr = {};
1460 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1461 	struct nvme_path_id *curr_trid;
1462 	struct spdk_io_channel *ch1, *ch2;
1463 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1464 	bool detect_remove;
1465 	int rc;
1466 
1467 	ut_init_trid(&trid);
1468 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1469 
1470 	set_thread(0);
1471 
1472 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1473 	CU_ASSERT(rc == 0);
1474 
1475 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1476 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1477 
1478 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1479 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1480 
1481 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1482 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1483 
1484 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1485 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1486 
1487 	set_thread(1);
1488 
1489 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1490 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1491 
1492 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1493 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1494 
1495 	/* Reset starts from thread 1. */
1496 	set_thread(1);
1497 
1498 	/* Case 1: ctrlr is already being destructed. */
1499 	nvme_ctrlr->destruct = true;
1500 
1501 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1502 	CU_ASSERT(rc == -ENXIO);
1503 
1504 	/* Case 2: reset is in progress. */
1505 	nvme_ctrlr->destruct = false;
1506 	nvme_ctrlr->resetting = true;
1507 
1508 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1509 	CU_ASSERT(rc == -EBUSY);
1510 
1511 	/* Case 3: reset completes successfully. */
1512 	nvme_ctrlr->resetting = false;
1513 	curr_trid->last_failed_tsc = spdk_get_ticks();
1514 	ctrlr.is_failed = true;
1515 
1516 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1517 	CU_ASSERT(rc == 0);
1518 	CU_ASSERT(nvme_ctrlr->resetting == true);
1519 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1520 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1521 
1522 	poll_thread_times(0, 3);
1523 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1524 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1525 
1526 	poll_thread_times(0, 1);
1527 	poll_thread_times(1, 1);
1528 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1529 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1530 	CU_ASSERT(ctrlr.is_failed == true);
1531 
1532 	poll_thread_times(1, 1);
1533 	poll_thread_times(0, 1);
1534 	CU_ASSERT(ctrlr.is_failed == false);
1535 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1536 
1537 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1538 	poll_thread_times(0, 2);
1539 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1540 
1541 	poll_thread_times(0, 1);
1542 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1543 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1544 
1545 	poll_thread_times(1, 1);
1546 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1547 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1548 	CU_ASSERT(nvme_ctrlr->resetting == true);
1549 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1550 
1551 	poll_thread_times(0, 2);
1552 	CU_ASSERT(nvme_ctrlr->resetting == true);
1553 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1554 	poll_thread_times(1, 1);
1555 	CU_ASSERT(nvme_ctrlr->resetting == true);
1556 	poll_thread_times(0, 1);
1557 	CU_ASSERT(nvme_ctrlr->resetting == false);
1558 
1559 	/* Case 4: ctrlr is already removed. */
1560 	ctrlr.is_removed = true;
1561 
1562 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1563 	CU_ASSERT(rc == 0);
1564 
1565 	detect_remove = false;
1566 	nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset;
1567 	nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove;
1568 
1569 	poll_threads();
1570 
1571 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
1572 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
1573 	CU_ASSERT(detect_remove == true);
1574 
1575 	ctrlr.is_removed = false;
1576 
1577 	spdk_put_io_channel(ch2);
1578 
1579 	set_thread(0);
1580 
1581 	spdk_put_io_channel(ch1);
1582 
1583 	poll_threads();
1584 
1585 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1586 	CU_ASSERT(rc == 0);
1587 
1588 	poll_threads();
1589 	spdk_delay_us(1000);
1590 	poll_threads();
1591 
1592 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1593 }
1594 
1595 static void
1596 test_race_between_reset_and_destruct_ctrlr(void)
1597 {
1598 	struct spdk_nvme_transport_id trid = {};
1599 	struct spdk_nvme_ctrlr ctrlr = {};
1600 	struct nvme_ctrlr *nvme_ctrlr;
1601 	struct spdk_io_channel *ch1, *ch2;
1602 	int rc;
1603 
1604 	ut_init_trid(&trid);
1605 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1606 
1607 	set_thread(0);
1608 
1609 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1610 	CU_ASSERT(rc == 0);
1611 
1612 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1613 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1614 
1615 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1616 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1617 
1618 	set_thread(1);
1619 
1620 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1621 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1622 
1623 	/* Reset starts from thread 1. */
1624 	set_thread(1);
1625 
1626 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1627 	CU_ASSERT(rc == 0);
1628 	CU_ASSERT(nvme_ctrlr->resetting == true);
1629 
1630 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1631 	set_thread(0);
1632 
1633 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1634 	CU_ASSERT(rc == 0);
1635 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1636 	CU_ASSERT(nvme_ctrlr->destruct == true);
1637 	CU_ASSERT(nvme_ctrlr->resetting == true);
1638 
1639 	poll_threads();
1640 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1641 	poll_threads();
1642 
1643 	/* Reset completed but ctrlr is not still destructed yet. */
1644 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1645 	CU_ASSERT(nvme_ctrlr->destruct == true);
1646 	CU_ASSERT(nvme_ctrlr->resetting == false);
1647 
1648 	/* New reset request is rejected. */
1649 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1650 	CU_ASSERT(rc == -ENXIO);
1651 
1652 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1653 	 * However there are two channels and destruct is not completed yet.
1654 	 */
1655 	poll_threads();
1656 
1657 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1658 
1659 	set_thread(0);
1660 
1661 	spdk_put_io_channel(ch1);
1662 
1663 	set_thread(1);
1664 
1665 	spdk_put_io_channel(ch2);
1666 
1667 	poll_threads();
1668 	spdk_delay_us(1000);
1669 	poll_threads();
1670 
1671 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1672 }
1673 
1674 static void
1675 test_failover_ctrlr(void)
1676 {
1677 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1678 	struct spdk_nvme_ctrlr ctrlr = {};
1679 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1680 	struct nvme_path_id *curr_trid, *next_trid;
1681 	struct spdk_io_channel *ch1, *ch2;
1682 	int rc;
1683 
1684 	ut_init_trid(&trid1);
1685 	ut_init_trid2(&trid2);
1686 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1687 
1688 	set_thread(0);
1689 
1690 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1691 	CU_ASSERT(rc == 0);
1692 
1693 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1694 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1695 
1696 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1697 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1698 
1699 	set_thread(1);
1700 
1701 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1702 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1703 
1704 	/* First, test one trid case. */
1705 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1706 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1707 
1708 	/* Failover starts from thread 1. */
1709 	set_thread(1);
1710 
1711 	/* Case 1: ctrlr is already being destructed. */
1712 	nvme_ctrlr->destruct = true;
1713 
1714 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1715 	CU_ASSERT(rc == -ENXIO);
1716 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1717 
1718 	/* Case 2: reset is in progress. */
1719 	nvme_ctrlr->destruct = false;
1720 	nvme_ctrlr->resetting = true;
1721 
1722 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1723 	CU_ASSERT(rc == -EINPROGRESS);
1724 
1725 	/* Case 3: reset completes successfully. */
1726 	nvme_ctrlr->resetting = false;
1727 
1728 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1729 	CU_ASSERT(rc == 0);
1730 
1731 	CU_ASSERT(nvme_ctrlr->resetting == true);
1732 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1733 
1734 	poll_threads();
1735 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1736 	poll_threads();
1737 
1738 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1739 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1740 
1741 	CU_ASSERT(nvme_ctrlr->resetting == false);
1742 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1743 
1744 	set_thread(0);
1745 
1746 	/* Second, test two trids case. */
1747 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1748 	CU_ASSERT(rc == 0);
1749 
1750 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1751 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1752 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1753 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1754 
1755 	/* Failover starts from thread 1. */
1756 	set_thread(1);
1757 
1758 	/* Case 4: reset is in progress. */
1759 	nvme_ctrlr->resetting = true;
1760 
1761 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1762 	CU_ASSERT(rc == -EINPROGRESS);
1763 
1764 	/* Case 5: failover completes successfully. */
1765 	nvme_ctrlr->resetting = false;
1766 
1767 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1768 	CU_ASSERT(rc == 0);
1769 
1770 	CU_ASSERT(nvme_ctrlr->resetting == true);
1771 
1772 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1773 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1774 	CU_ASSERT(next_trid != curr_trid);
1775 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1776 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1777 
1778 	poll_threads();
1779 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1780 	poll_threads();
1781 
1782 	CU_ASSERT(nvme_ctrlr->resetting == false);
1783 
1784 	spdk_put_io_channel(ch2);
1785 
1786 	set_thread(0);
1787 
1788 	spdk_put_io_channel(ch1);
1789 
1790 	poll_threads();
1791 
1792 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1793 	CU_ASSERT(rc == 0);
1794 
1795 	poll_threads();
1796 	spdk_delay_us(1000);
1797 	poll_threads();
1798 
1799 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1800 }
1801 
1802 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1803  *
1804  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1805  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1806  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1807  * have been active, i.e., the head of the list until the failover completed.
1808  * However trid3 was inserted to the head of the list by mistake.
1809  *
1810  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1811  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1812  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1813  * may be executed repeatedly before failover is executed. Hence this bug is real.
1814  *
1815  * The following test verifies the fix.
1816  */
1817 static void
1818 test_race_between_failover_and_add_secondary_trid(void)
1819 {
1820 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1821 	struct spdk_nvme_ctrlr ctrlr = {};
1822 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1823 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1824 	struct spdk_io_channel *ch1, *ch2;
1825 	int rc;
1826 
1827 	ut_init_trid(&trid1);
1828 	ut_init_trid2(&trid2);
1829 	ut_init_trid3(&trid3);
1830 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1831 
1832 	set_thread(0);
1833 
1834 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1835 	CU_ASSERT(rc == 0);
1836 
1837 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1838 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1839 
1840 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1841 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1842 
1843 	set_thread(1);
1844 
1845 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1846 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1847 
1848 	set_thread(0);
1849 
1850 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1851 	CU_ASSERT(rc == 0);
1852 
1853 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1854 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1855 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1856 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1857 	path_id2 = TAILQ_NEXT(path_id1, link);
1858 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1859 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1860 
1861 	ctrlr.fail_reset = true;
1862 
1863 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1864 	CU_ASSERT(rc == 0);
1865 
1866 	poll_threads();
1867 
1868 	CU_ASSERT(path_id1->last_failed_tsc != 0);
1869 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1870 
1871 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1872 	CU_ASSERT(rc == 0);
1873 
1874 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1875 	CU_ASSERT(rc == 0);
1876 
1877 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1878 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1879 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1880 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1881 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1882 	path_id3 = TAILQ_NEXT(path_id2, link);
1883 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1884 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1885 
1886 	poll_threads();
1887 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1888 	poll_threads();
1889 
1890 	spdk_put_io_channel(ch1);
1891 
1892 	set_thread(1);
1893 
1894 	spdk_put_io_channel(ch2);
1895 
1896 	poll_threads();
1897 
1898 	set_thread(0);
1899 
1900 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1901 	CU_ASSERT(rc == 0);
1902 
1903 	poll_threads();
1904 	spdk_delay_us(1000);
1905 	poll_threads();
1906 
1907 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1908 }
1909 
1910 static void
1911 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1912 {
1913 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1914 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1915 }
1916 
1917 static void
1918 test_pending_reset(void)
1919 {
1920 	struct spdk_nvme_transport_id trid = {};
1921 	struct spdk_nvme_ctrlr *ctrlr;
1922 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
1923 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1924 	const int STRING_SIZE = 32;
1925 	const char *attached_names[STRING_SIZE];
1926 	struct nvme_bdev *bdev;
1927 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1928 	struct spdk_io_channel *ch1, *ch2;
1929 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1930 	struct nvme_io_path *io_path1, *io_path2;
1931 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1932 	int rc;
1933 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
1934 
1935 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
1936 	bdev_opts.multipath = false;
1937 
1938 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1939 	ut_init_trid(&trid);
1940 
1941 	set_thread(0);
1942 
1943 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1944 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1945 
1946 	g_ut_attach_ctrlr_status = 0;
1947 	g_ut_attach_bdev_count = 1;
1948 
1949 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1950 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
1951 	CU_ASSERT(rc == 0);
1952 
1953 	spdk_delay_us(1000);
1954 	poll_threads();
1955 
1956 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1957 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1958 
1959 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1960 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1961 
1962 	ch1 = spdk_get_io_channel(bdev);
1963 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1964 
1965 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1966 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1967 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1968 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1969 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1970 
1971 	set_thread(1);
1972 
1973 	ch2 = spdk_get_io_channel(bdev);
1974 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1975 
1976 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1977 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1978 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1979 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1980 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1981 
1982 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1983 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1984 
1985 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1986 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1987 
1988 	/* The first reset request is submitted on thread 1, and the second reset request
1989 	 * is submitted on thread 0 while processing the first request.
1990 	 */
1991 	bdev_nvme_submit_request(ch2, first_bdev_io);
1992 
1993 	poll_thread_times(0, 1);
1994 	poll_thread_times(1, 2);
1995 
1996 	CU_ASSERT(nvme_ctrlr->resetting == true);
1997 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1998 
1999 	set_thread(0);
2000 
2001 	bdev_nvme_submit_request(ch1, second_bdev_io);
2002 
2003 	poll_thread_times(0, 1);
2004 	poll_thread_times(1, 1);
2005 	poll_thread_times(0, 2);
2006 	poll_thread_times(1, 1);
2007 	poll_thread_times(0, 1);
2008 
2009 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io);
2010 
2011 	poll_threads();
2012 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2013 	poll_threads();
2014 
2015 	CU_ASSERT(nvme_ctrlr->resetting == false);
2016 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2017 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2018 
2019 	/* The first reset request is submitted on thread 1, and the second reset request
2020 	 * is submitted on thread 0 while processing the first request.
2021 	 *
2022 	 * The difference from the above scenario is that the controller is removed while
2023 	 * processing the first request. Hence both reset requests should fail.
2024 	 */
2025 	set_thread(1);
2026 
2027 	bdev_nvme_submit_request(ch2, first_bdev_io);
2028 
2029 	poll_thread_times(0, 1);
2030 	poll_thread_times(1, 2);
2031 
2032 	CU_ASSERT(nvme_ctrlr->resetting == true);
2033 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
2034 
2035 	set_thread(0);
2036 
2037 	bdev_nvme_submit_request(ch1, second_bdev_io);
2038 
2039 	poll_thread_times(0, 1);
2040 	poll_thread_times(1, 1);
2041 	poll_thread_times(0, 2);
2042 	poll_thread_times(1, 1);
2043 	poll_thread_times(0, 1);
2044 
2045 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io);
2046 
2047 	ctrlr->fail_reset = true;
2048 
2049 	poll_threads();
2050 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2051 	poll_threads();
2052 
2053 	CU_ASSERT(nvme_ctrlr->resetting == false);
2054 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2055 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2056 
2057 	spdk_put_io_channel(ch1);
2058 
2059 	set_thread(1);
2060 
2061 	spdk_put_io_channel(ch2);
2062 
2063 	poll_threads();
2064 
2065 	set_thread(0);
2066 
2067 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2068 	CU_ASSERT(rc == 0);
2069 
2070 	poll_threads();
2071 	spdk_delay_us(1000);
2072 	poll_threads();
2073 
2074 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2075 
2076 	free(first_bdev_io);
2077 	free(second_bdev_io);
2078 }
2079 
2080 static void
2081 test_attach_ctrlr(void)
2082 {
2083 	struct spdk_nvme_transport_id trid = {};
2084 	struct spdk_nvme_ctrlr *ctrlr;
2085 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2086 	struct nvme_ctrlr *nvme_ctrlr;
2087 	const int STRING_SIZE = 32;
2088 	const char *attached_names[STRING_SIZE];
2089 	struct nvme_bdev *nbdev;
2090 	int rc;
2091 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2092 
2093 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2094 	bdev_opts.multipath = false;
2095 
2096 	set_thread(0);
2097 
2098 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2099 	ut_init_trid(&trid);
2100 
2101 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
2102 	 * by probe polling.
2103 	 */
2104 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2105 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2106 
2107 	ctrlr->is_failed = true;
2108 	g_ut_attach_ctrlr_status = -EIO;
2109 	g_ut_attach_bdev_count = 0;
2110 
2111 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2112 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2113 	CU_ASSERT(rc == 0);
2114 
2115 	spdk_delay_us(1000);
2116 	poll_threads();
2117 
2118 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2119 
2120 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2121 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2122 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2123 
2124 	g_ut_attach_ctrlr_status = 0;
2125 
2126 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2127 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2128 	CU_ASSERT(rc == 0);
2129 
2130 	spdk_delay_us(1000);
2131 	poll_threads();
2132 
2133 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2134 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2135 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2136 
2137 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2138 	CU_ASSERT(rc == 0);
2139 
2140 	poll_threads();
2141 	spdk_delay_us(1000);
2142 	poll_threads();
2143 
2144 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2145 
2146 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2147 	 * one nvme_bdev is created.
2148 	 */
2149 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2150 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2151 
2152 	g_ut_attach_bdev_count = 1;
2153 
2154 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2155 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2156 	CU_ASSERT(rc == 0);
2157 
2158 	spdk_delay_us(1000);
2159 	poll_threads();
2160 
2161 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2162 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2163 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2164 
2165 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2166 	attached_names[0] = NULL;
2167 
2168 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2169 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2170 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2171 
2172 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2173 	CU_ASSERT(rc == 0);
2174 
2175 	poll_threads();
2176 	spdk_delay_us(1000);
2177 	poll_threads();
2178 
2179 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2180 
2181 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2182 	 * created because creating one nvme_bdev failed.
2183 	 */
2184 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2185 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2186 
2187 	g_ut_register_bdev_status = -EINVAL;
2188 	g_ut_attach_bdev_count = 0;
2189 
2190 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2191 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2192 	CU_ASSERT(rc == 0);
2193 
2194 	spdk_delay_us(1000);
2195 	poll_threads();
2196 
2197 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2198 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2199 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2200 
2201 	CU_ASSERT(attached_names[0] == NULL);
2202 
2203 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2204 	CU_ASSERT(rc == 0);
2205 
2206 	poll_threads();
2207 	spdk_delay_us(1000);
2208 	poll_threads();
2209 
2210 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2211 
2212 	g_ut_register_bdev_status = 0;
2213 }
2214 
2215 static void
2216 test_aer_cb(void)
2217 {
2218 	struct spdk_nvme_transport_id trid = {};
2219 	struct spdk_nvme_ctrlr *ctrlr;
2220 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2221 	struct nvme_ctrlr *nvme_ctrlr;
2222 	struct nvme_bdev *bdev;
2223 	const int STRING_SIZE = 32;
2224 	const char *attached_names[STRING_SIZE];
2225 	union spdk_nvme_async_event_completion event = {};
2226 	struct spdk_nvme_cpl cpl = {};
2227 	int rc;
2228 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2229 
2230 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2231 	bdev_opts.multipath = false;
2232 
2233 	set_thread(0);
2234 
2235 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2236 	ut_init_trid(&trid);
2237 
2238 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2239 	 * namespaces are populated.
2240 	 */
2241 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2242 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2243 
2244 	ctrlr->ns[0].is_active = false;
2245 
2246 	g_ut_attach_ctrlr_status = 0;
2247 	g_ut_attach_bdev_count = 3;
2248 
2249 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2250 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2251 	CU_ASSERT(rc == 0);
2252 
2253 	spdk_delay_us(1000);
2254 	poll_threads();
2255 
2256 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2257 	poll_threads();
2258 
2259 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2260 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2261 
2262 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2263 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2264 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2265 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2266 
2267 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2268 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2269 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2270 
2271 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2272 	 * change the size of the 4th namespace.
2273 	 */
2274 	ctrlr->ns[0].is_active = true;
2275 	ctrlr->ns[2].is_active = false;
2276 	ctrlr->nsdata[3].nsze = 2048;
2277 
2278 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2279 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2280 	cpl.cdw0 = event.raw;
2281 
2282 	aer_cb(nvme_ctrlr, &cpl);
2283 
2284 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2285 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2286 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2287 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2288 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2289 
2290 	/* Change ANA state of active namespaces. */
2291 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2292 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2293 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2294 
2295 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2296 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2297 	cpl.cdw0 = event.raw;
2298 
2299 	aer_cb(nvme_ctrlr, &cpl);
2300 
2301 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2302 	poll_threads();
2303 
2304 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2305 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2306 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2307 
2308 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2309 	CU_ASSERT(rc == 0);
2310 
2311 	poll_threads();
2312 	spdk_delay_us(1000);
2313 	poll_threads();
2314 
2315 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2316 }
2317 
2318 static void
2319 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2320 			enum spdk_bdev_io_type io_type)
2321 {
2322 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2323 	struct nvme_io_path *io_path;
2324 	struct spdk_nvme_qpair *qpair;
2325 
2326 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2327 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2328 	qpair = io_path->qpair->qpair;
2329 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2330 
2331 	bdev_io->type = io_type;
2332 	bdev_io->internal.f.in_submit_request = true;
2333 
2334 	bdev_nvme_submit_request(ch, bdev_io);
2335 
2336 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2337 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2338 
2339 	poll_threads();
2340 
2341 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2342 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2343 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2344 }
2345 
2346 static void
2347 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2348 		   enum spdk_bdev_io_type io_type)
2349 {
2350 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2351 	struct nvme_io_path *io_path;
2352 	struct spdk_nvme_qpair *qpair;
2353 
2354 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2355 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2356 	qpair = io_path->qpair->qpair;
2357 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2358 
2359 	bdev_io->type = io_type;
2360 	bdev_io->internal.f.in_submit_request = true;
2361 
2362 	bdev_nvme_submit_request(ch, bdev_io);
2363 
2364 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2365 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2366 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2367 }
2368 
2369 static void
2370 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2371 {
2372 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2373 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2374 	struct ut_nvme_req *req;
2375 	struct nvme_io_path *io_path;
2376 	struct spdk_nvme_qpair *qpair;
2377 
2378 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2379 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2380 	qpair = io_path->qpair->qpair;
2381 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2382 
2383 	/* Only compare and write now. */
2384 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2385 	bdev_io->internal.f.in_submit_request = true;
2386 
2387 	bdev_nvme_submit_request(ch, bdev_io);
2388 
2389 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2390 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2391 	CU_ASSERT(bio->first_fused_submitted == true);
2392 
2393 	/* First outstanding request is compare operation. */
2394 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2395 	SPDK_CU_ASSERT_FATAL(req != NULL);
2396 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2397 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2398 
2399 	poll_threads();
2400 
2401 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2402 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2403 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2404 }
2405 
2406 static void
2407 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2408 			 struct spdk_nvme_ctrlr *ctrlr)
2409 {
2410 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2411 	bdev_io->internal.f.in_submit_request = true;
2412 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2413 
2414 	bdev_nvme_submit_request(ch, bdev_io);
2415 
2416 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2417 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2418 
2419 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2420 	poll_thread_times(1, 1);
2421 
2422 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2423 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2424 
2425 	poll_thread_times(0, 1);
2426 
2427 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2428 }
2429 
2430 static void
2431 test_submit_nvme_cmd(void)
2432 {
2433 	struct spdk_nvme_transport_id trid = {};
2434 	struct spdk_nvme_ctrlr *ctrlr;
2435 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2436 	struct nvme_ctrlr *nvme_ctrlr;
2437 	const int STRING_SIZE = 32;
2438 	const char *attached_names[STRING_SIZE];
2439 	struct nvme_bdev *bdev;
2440 	struct spdk_bdev_io *bdev_io;
2441 	struct spdk_io_channel *ch;
2442 	int rc;
2443 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2444 
2445 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2446 	bdev_opts.multipath = false;
2447 
2448 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2449 	ut_init_trid(&trid);
2450 
2451 	set_thread(1);
2452 
2453 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2454 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2455 
2456 	g_ut_attach_ctrlr_status = 0;
2457 	g_ut_attach_bdev_count = 1;
2458 
2459 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2460 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2461 	CU_ASSERT(rc == 0);
2462 
2463 	spdk_delay_us(1000);
2464 	poll_threads();
2465 
2466 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2467 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2468 
2469 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2470 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2471 
2472 	set_thread(0);
2473 
2474 	ch = spdk_get_io_channel(bdev);
2475 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2476 
2477 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2478 
2479 	bdev_io->u.bdev.iovs = NULL;
2480 
2481 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2482 
2483 	ut_bdev_io_set_buf(bdev_io);
2484 
2485 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2486 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2487 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2488 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2489 
2490 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2491 
2492 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2493 
2494 	/* Verify that ext NVME API is called when data is described by memory domain  */
2495 	g_ut_read_ext_called = false;
2496 	bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef;
2497 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2498 	CU_ASSERT(g_ut_read_ext_called == true);
2499 	g_ut_read_ext_called = false;
2500 	bdev_io->u.bdev.memory_domain = NULL;
2501 
2502 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2503 
2504 	free(bdev_io);
2505 
2506 	spdk_put_io_channel(ch);
2507 
2508 	poll_threads();
2509 
2510 	set_thread(1);
2511 
2512 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2513 	CU_ASSERT(rc == 0);
2514 
2515 	poll_threads();
2516 	spdk_delay_us(1000);
2517 	poll_threads();
2518 
2519 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2520 }
2521 
2522 static void
2523 test_add_remove_trid(void)
2524 {
2525 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2526 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2527 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2528 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2529 	const int STRING_SIZE = 32;
2530 	const char *attached_names[STRING_SIZE];
2531 	struct nvme_path_id *ctrid;
2532 	int rc;
2533 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2534 
2535 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2536 	bdev_opts.multipath = false;
2537 
2538 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2539 	ut_init_trid(&path1.trid);
2540 	ut_init_trid2(&path2.trid);
2541 	ut_init_trid3(&path3.trid);
2542 
2543 	set_thread(0);
2544 
2545 	g_ut_attach_ctrlr_status = 0;
2546 	g_ut_attach_bdev_count = 0;
2547 
2548 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2549 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2550 
2551 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2552 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2553 	CU_ASSERT(rc == 0);
2554 
2555 	spdk_delay_us(1000);
2556 	poll_threads();
2557 
2558 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2559 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2560 
2561 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2562 
2563 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2564 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2565 
2566 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2567 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2568 	CU_ASSERT(rc == 0);
2569 
2570 	spdk_delay_us(1000);
2571 	poll_threads();
2572 
2573 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2574 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2575 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2576 			break;
2577 		}
2578 	}
2579 	CU_ASSERT(ctrid != NULL);
2580 
2581 	/* trid3 is not in the registered list. */
2582 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2583 	CU_ASSERT(rc == -ENXIO);
2584 
2585 	/* trid2 is not used, and simply removed. */
2586 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2587 	CU_ASSERT(rc == 0);
2588 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2589 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2590 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2591 	}
2592 
2593 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2594 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2595 
2596 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2597 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2598 	CU_ASSERT(rc == 0);
2599 
2600 	spdk_delay_us(1000);
2601 	poll_threads();
2602 
2603 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2604 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2605 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2606 			break;
2607 		}
2608 	}
2609 	CU_ASSERT(ctrid != NULL);
2610 
2611 	/* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully.
2612 	 * If we add path2 again, path2 should be inserted between path1 and path3.
2613 	 * Then, we remove path2. It is not used, and simply removed.
2614 	 */
2615 	ctrid->last_failed_tsc = spdk_get_ticks() + 1;
2616 
2617 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2618 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2619 
2620 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2621 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2622 	CU_ASSERT(rc == 0);
2623 
2624 	spdk_delay_us(1000);
2625 	poll_threads();
2626 
2627 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2628 
2629 	ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link);
2630 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2631 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0);
2632 
2633 	ctrid = TAILQ_NEXT(ctrid, link);
2634 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2635 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0);
2636 
2637 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2638 	CU_ASSERT(rc == 0);
2639 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2640 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2641 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2642 	}
2643 
2644 	/* path1 is currently used and path3 is an alternative path.
2645 	 * If we remove path1, path is changed to path3.
2646 	 */
2647 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
2648 	CU_ASSERT(rc == 0);
2649 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2650 	CU_ASSERT(nvme_ctrlr->resetting == true);
2651 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2652 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2653 	}
2654 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2655 
2656 	poll_threads();
2657 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2658 	poll_threads();
2659 
2660 	CU_ASSERT(nvme_ctrlr->resetting == false);
2661 
2662 	/* path3 is the current and only path. If we remove path3, the corresponding
2663 	 * nvme_ctrlr is removed.
2664 	 */
2665 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2666 	CU_ASSERT(rc == 0);
2667 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2668 
2669 	poll_threads();
2670 	spdk_delay_us(1000);
2671 	poll_threads();
2672 
2673 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2674 
2675 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2676 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2677 
2678 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2679 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2680 	CU_ASSERT(rc == 0);
2681 
2682 	spdk_delay_us(1000);
2683 	poll_threads();
2684 
2685 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2686 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2687 
2688 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2689 
2690 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2691 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2692 
2693 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2694 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2695 	CU_ASSERT(rc == 0);
2696 
2697 	spdk_delay_us(1000);
2698 	poll_threads();
2699 
2700 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2701 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2702 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2703 			break;
2704 		}
2705 	}
2706 	CU_ASSERT(ctrid != NULL);
2707 
2708 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2709 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2710 	CU_ASSERT(rc == 0);
2711 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2712 
2713 	poll_threads();
2714 	spdk_delay_us(1000);
2715 	poll_threads();
2716 
2717 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2718 }
2719 
2720 static void
2721 test_abort(void)
2722 {
2723 	struct spdk_nvme_transport_id trid = {};
2724 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
2725 	struct spdk_nvme_ctrlr *ctrlr;
2726 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
2727 	struct nvme_ctrlr *nvme_ctrlr;
2728 	const int STRING_SIZE = 32;
2729 	const char *attached_names[STRING_SIZE];
2730 	struct nvme_bdev *bdev;
2731 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2732 	struct spdk_io_channel *ch1, *ch2;
2733 	struct nvme_bdev_channel *nbdev_ch1;
2734 	struct nvme_io_path *io_path1;
2735 	struct nvme_qpair *nvme_qpair1;
2736 	int rc;
2737 
2738 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2739 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2740 	 * are submitted on thread 1. Both should succeed.
2741 	 */
2742 
2743 	ut_init_trid(&trid);
2744 
2745 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2746 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2747 
2748 	g_ut_attach_ctrlr_status = 0;
2749 	g_ut_attach_bdev_count = 1;
2750 
2751 	set_thread(1);
2752 
2753 	opts.ctrlr_loss_timeout_sec = -1;
2754 	opts.reconnect_delay_sec = 1;
2755 	opts.multipath = false;
2756 
2757 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2758 				   attach_ctrlr_done, NULL, &dopts, &opts);
2759 	CU_ASSERT(rc == 0);
2760 
2761 	spdk_delay_us(1000);
2762 	poll_threads();
2763 
2764 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2765 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2766 
2767 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2768 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2769 
2770 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2771 	ut_bdev_io_set_buf(write_io);
2772 
2773 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2774 	ut_bdev_io_set_buf(fuse_io);
2775 
2776 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2777 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2778 
2779 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2780 
2781 	set_thread(0);
2782 
2783 	ch1 = spdk_get_io_channel(bdev);
2784 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2785 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2786 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2787 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2788 	nvme_qpair1 = io_path1->qpair;
2789 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2790 
2791 	set_thread(1);
2792 
2793 	ch2 = spdk_get_io_channel(bdev);
2794 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2795 
2796 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2797 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2798 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2799 
2800 	/* Aborting the already completed request should fail. */
2801 	write_io->internal.f.in_submit_request = true;
2802 	bdev_nvme_submit_request(ch1, write_io);
2803 	poll_threads();
2804 
2805 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2806 
2807 	abort_io->u.abort.bio_to_abort = write_io;
2808 	abort_io->internal.f.in_submit_request = true;
2809 
2810 	bdev_nvme_submit_request(ch1, abort_io);
2811 
2812 	poll_threads();
2813 
2814 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2815 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2816 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2817 
2818 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2819 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2820 
2821 	admin_io->internal.f.in_submit_request = true;
2822 	bdev_nvme_submit_request(ch1, admin_io);
2823 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2824 	poll_threads();
2825 
2826 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2827 
2828 	abort_io->u.abort.bio_to_abort = admin_io;
2829 	abort_io->internal.f.in_submit_request = true;
2830 
2831 	bdev_nvme_submit_request(ch2, abort_io);
2832 
2833 	poll_threads();
2834 
2835 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2836 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2837 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2838 
2839 	/* Aborting the write request should succeed. */
2840 	write_io->internal.f.in_submit_request = true;
2841 	bdev_nvme_submit_request(ch1, write_io);
2842 
2843 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2844 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2845 
2846 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2847 	abort_io->u.abort.bio_to_abort = write_io;
2848 	abort_io->internal.f.in_submit_request = true;
2849 
2850 	bdev_nvme_submit_request(ch1, abort_io);
2851 
2852 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2853 	poll_threads();
2854 
2855 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2856 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2857 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2858 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2859 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2860 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2861 
2862 	/* Aborting the fuse request should succeed. */
2863 	fuse_io->internal.f.in_submit_request = true;
2864 	bdev_nvme_submit_request(ch1, fuse_io);
2865 
2866 	CU_ASSERT(fuse_io->internal.f.in_submit_request == true);
2867 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2868 
2869 	abort_io->u.abort.bio_to_abort = fuse_io;
2870 	abort_io->internal.f.in_submit_request = true;
2871 
2872 	bdev_nvme_submit_request(ch1, abort_io);
2873 
2874 	spdk_delay_us(10000);
2875 	poll_threads();
2876 
2877 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2878 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2879 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2880 	CU_ASSERT(fuse_io->internal.f.in_submit_request == false);
2881 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2882 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2883 
2884 	/* Aborting the admin request should succeed. */
2885 	admin_io->internal.f.in_submit_request = true;
2886 	bdev_nvme_submit_request(ch1, admin_io);
2887 
2888 	CU_ASSERT(admin_io->internal.f.in_submit_request == true);
2889 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2890 
2891 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2892 	abort_io->u.abort.bio_to_abort = admin_io;
2893 	abort_io->internal.f.in_submit_request = true;
2894 
2895 	bdev_nvme_submit_request(ch2, abort_io);
2896 
2897 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2898 	poll_threads();
2899 
2900 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2901 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2902 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2903 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2904 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2905 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2906 
2907 	set_thread(0);
2908 
2909 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2910 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2911 	 * while resetting the nvme_ctrlr.
2912 	 */
2913 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2914 
2915 	poll_thread_times(0, 3);
2916 
2917 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2918 	CU_ASSERT(nvme_ctrlr->resetting == true);
2919 
2920 	write_io->internal.f.in_submit_request = true;
2921 
2922 	bdev_nvme_submit_request(ch1, write_io);
2923 
2924 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2925 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
2926 
2927 	/* Aborting the queued write request should succeed immediately. */
2928 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2929 	abort_io->u.abort.bio_to_abort = write_io;
2930 	abort_io->internal.f.in_submit_request = true;
2931 
2932 	bdev_nvme_submit_request(ch1, abort_io);
2933 
2934 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2935 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2936 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2937 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2938 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2939 
2940 	poll_threads();
2941 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2942 	poll_threads();
2943 
2944 	spdk_put_io_channel(ch1);
2945 
2946 	set_thread(1);
2947 
2948 	spdk_put_io_channel(ch2);
2949 
2950 	poll_threads();
2951 
2952 	free(write_io);
2953 	free(fuse_io);
2954 	free(admin_io);
2955 	free(abort_io);
2956 
2957 	set_thread(1);
2958 
2959 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2960 	CU_ASSERT(rc == 0);
2961 
2962 	poll_threads();
2963 	spdk_delay_us(1000);
2964 	poll_threads();
2965 
2966 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2967 }
2968 
2969 static void
2970 test_get_io_qpair(void)
2971 {
2972 	struct spdk_nvme_transport_id trid = {};
2973 	struct spdk_nvme_ctrlr ctrlr = {};
2974 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2975 	struct spdk_io_channel *ch;
2976 	struct nvme_ctrlr_channel *ctrlr_ch;
2977 	struct spdk_nvme_qpair *qpair;
2978 	int rc;
2979 
2980 	ut_init_trid(&trid);
2981 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2982 
2983 	set_thread(0);
2984 
2985 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2986 	CU_ASSERT(rc == 0);
2987 
2988 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2989 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2990 
2991 	ch = spdk_get_io_channel(nvme_ctrlr);
2992 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2993 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2994 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2995 
2996 	qpair = bdev_nvme_get_io_qpair(ch);
2997 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2998 
2999 	spdk_put_io_channel(ch);
3000 
3001 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3002 	CU_ASSERT(rc == 0);
3003 
3004 	poll_threads();
3005 	spdk_delay_us(1000);
3006 	poll_threads();
3007 
3008 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3009 }
3010 
3011 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
3012  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
3013  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
3014  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
3015  */
3016 static void
3017 test_bdev_unregister(void)
3018 {
3019 	struct spdk_nvme_transport_id trid = {};
3020 	struct spdk_nvme_ctrlr *ctrlr;
3021 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3022 	struct nvme_ctrlr *nvme_ctrlr;
3023 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3024 	const int STRING_SIZE = 32;
3025 	const char *attached_names[STRING_SIZE];
3026 	struct nvme_bdev *bdev1, *bdev2;
3027 	int rc;
3028 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3029 
3030 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3031 	bdev_opts.multipath = false;
3032 
3033 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3034 	ut_init_trid(&trid);
3035 
3036 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
3037 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3038 
3039 	g_ut_attach_ctrlr_status = 0;
3040 	g_ut_attach_bdev_count = 2;
3041 
3042 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3043 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3044 	CU_ASSERT(rc == 0);
3045 
3046 	spdk_delay_us(1000);
3047 	poll_threads();
3048 
3049 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3050 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3051 
3052 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
3053 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3054 
3055 	bdev1 = nvme_ns1->bdev;
3056 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3057 
3058 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
3059 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3060 
3061 	bdev2 = nvme_ns2->bdev;
3062 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3063 
3064 	bdev_nvme_destruct(&bdev1->disk);
3065 	bdev_nvme_destruct(&bdev2->disk);
3066 
3067 	poll_threads();
3068 
3069 	CU_ASSERT(nvme_ns1->bdev == NULL);
3070 	CU_ASSERT(nvme_ns2->bdev == NULL);
3071 
3072 	nvme_ctrlr->destruct = true;
3073 	_nvme_ctrlr_destruct(nvme_ctrlr);
3074 
3075 	poll_threads();
3076 	spdk_delay_us(1000);
3077 	poll_threads();
3078 
3079 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3080 }
3081 
3082 static void
3083 test_compare_ns(void)
3084 {
3085 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
3086 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
3087 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
3088 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
3089 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
3090 
3091 	/* No IDs are defined. */
3092 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3093 
3094 	/* Only EUI64 are defined and not matched. */
3095 	nsdata1.eui64 = 0xABCDEF0123456789;
3096 	nsdata2.eui64 = 0xBBCDEF0123456789;
3097 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3098 
3099 	/* Only EUI64 are defined and matched. */
3100 	nsdata2.eui64 = 0xABCDEF0123456789;
3101 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3102 
3103 	/* Only NGUID are defined and not matched. */
3104 	nsdata1.eui64 = 0x0;
3105 	nsdata2.eui64 = 0x0;
3106 	nsdata1.nguid[0] = 0x12;
3107 	nsdata2.nguid[0] = 0x10;
3108 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3109 
3110 	/* Only NGUID are defined and matched. */
3111 	nsdata2.nguid[0] = 0x12;
3112 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3113 
3114 	/* Only UUID are defined and not matched. */
3115 	nsdata1.nguid[0] = 0x0;
3116 	nsdata2.nguid[0] = 0x0;
3117 	ns1.uuid = &uuid1;
3118 	ns2.uuid = &uuid2;
3119 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3120 
3121 	/* Only one UUID is defined. */
3122 	ns1.uuid = NULL;
3123 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3124 
3125 	/* Only UUID are defined and matched. */
3126 	ns1.uuid = &uuid2;
3127 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3128 
3129 	/* All EUI64, NGUID, and UUID are defined and matched. */
3130 	nsdata1.eui64 = 0x123456789ABCDEF;
3131 	nsdata2.eui64 = 0x123456789ABCDEF;
3132 	nsdata1.nguid[15] = 0x34;
3133 	nsdata2.nguid[15] = 0x34;
3134 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3135 
3136 	/* CSI are not matched. */
3137 	ns1.csi = SPDK_NVME_CSI_ZNS;
3138 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3139 }
3140 
3141 static void
3142 test_init_ana_log_page(void)
3143 {
3144 	struct spdk_nvme_transport_id trid = {};
3145 	struct spdk_nvme_ctrlr *ctrlr;
3146 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3147 	struct nvme_ctrlr *nvme_ctrlr;
3148 	const int STRING_SIZE = 32;
3149 	const char *attached_names[STRING_SIZE];
3150 	int rc;
3151 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3152 
3153 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3154 	bdev_opts.multipath = false;
3155 
3156 	set_thread(0);
3157 
3158 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3159 	ut_init_trid(&trid);
3160 
3161 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
3162 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3163 
3164 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3165 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3166 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3167 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3168 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3169 
3170 	g_ut_attach_ctrlr_status = 0;
3171 	g_ut_attach_bdev_count = 5;
3172 
3173 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3174 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3175 	CU_ASSERT(rc == 0);
3176 
3177 	spdk_delay_us(1000);
3178 	poll_threads();
3179 
3180 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3181 	poll_threads();
3182 
3183 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3184 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3185 
3186 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3187 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3188 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3189 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3190 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3191 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3192 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3193 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3194 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3195 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3196 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3197 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3198 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3199 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3200 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3201 
3202 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3203 	CU_ASSERT(rc == 0);
3204 
3205 	poll_threads();
3206 	spdk_delay_us(1000);
3207 	poll_threads();
3208 
3209 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3210 }
3211 
3212 static void
3213 init_accel(void)
3214 {
3215 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3216 				sizeof(int), "accel_p");
3217 }
3218 
3219 static void
3220 fini_accel(void)
3221 {
3222 	spdk_io_device_unregister(g_accel_p, NULL);
3223 }
3224 
3225 static void
3226 test_get_memory_domains(void)
3227 {
3228 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3229 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3230 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3231 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3232 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3233 	struct spdk_memory_domain *domains[4] = {};
3234 	int rc = 0;
3235 
3236 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3237 
3238 	/* nvme controller doesn't have memory domains */
3239 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3240 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3241 	CU_ASSERT(rc == 0);
3242 	CU_ASSERT(domains[0] == NULL);
3243 	CU_ASSERT(domains[1] == NULL);
3244 
3245 	/* nvme controller has a memory domain */
3246 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3247 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3248 	CU_ASSERT(rc == 1);
3249 	CU_ASSERT(domains[0] != NULL);
3250 	memset(domains, 0, sizeof(domains));
3251 
3252 	/* multipath, 2 controllers report 1 memory domain each */
3253 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3254 
3255 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3256 	CU_ASSERT(rc == 2);
3257 	CU_ASSERT(domains[0] != NULL);
3258 	CU_ASSERT(domains[1] != NULL);
3259 	memset(domains, 0, sizeof(domains));
3260 
3261 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3262 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3263 	CU_ASSERT(rc == 2);
3264 
3265 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3266 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3267 	CU_ASSERT(rc == 2);
3268 	CU_ASSERT(domains[0] == NULL);
3269 	CU_ASSERT(domains[1] == NULL);
3270 
3271 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3272 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3273 	CU_ASSERT(rc == 2);
3274 	CU_ASSERT(domains[0] != NULL);
3275 	CU_ASSERT(domains[1] == NULL);
3276 	memset(domains, 0, sizeof(domains));
3277 
3278 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3279 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3280 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3281 	CU_ASSERT(rc == 4);
3282 	CU_ASSERT(domains[0] != NULL);
3283 	CU_ASSERT(domains[1] != NULL);
3284 	CU_ASSERT(domains[2] != NULL);
3285 	CU_ASSERT(domains[3] != NULL);
3286 	memset(domains, 0, sizeof(domains));
3287 
3288 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3289 	 * Array size is less than the number of memory domains */
3290 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3291 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3292 	CU_ASSERT(rc == 4);
3293 	CU_ASSERT(domains[0] != NULL);
3294 	CU_ASSERT(domains[1] != NULL);
3295 	CU_ASSERT(domains[2] != NULL);
3296 	CU_ASSERT(domains[3] == NULL);
3297 	memset(domains, 0, sizeof(domains));
3298 
3299 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3300 }
3301 
3302 static void
3303 test_reconnect_qpair(void)
3304 {
3305 	struct spdk_nvme_transport_id trid = {};
3306 	struct spdk_nvme_ctrlr *ctrlr;
3307 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3308 	struct nvme_ctrlr *nvme_ctrlr;
3309 	const int STRING_SIZE = 32;
3310 	const char *attached_names[STRING_SIZE];
3311 	struct nvme_bdev *bdev;
3312 	struct spdk_io_channel *ch1, *ch2;
3313 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3314 	struct nvme_io_path *io_path1, *io_path2;
3315 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3316 	int rc;
3317 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3318 
3319 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3320 	bdev_opts.multipath = false;
3321 
3322 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3323 	ut_init_trid(&trid);
3324 
3325 	set_thread(0);
3326 
3327 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3328 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3329 
3330 	g_ut_attach_ctrlr_status = 0;
3331 	g_ut_attach_bdev_count = 1;
3332 
3333 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3334 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3335 	CU_ASSERT(rc == 0);
3336 
3337 	spdk_delay_us(1000);
3338 	poll_threads();
3339 
3340 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3341 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3342 
3343 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3344 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3345 
3346 	ch1 = spdk_get_io_channel(bdev);
3347 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3348 
3349 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3350 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3351 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3352 	nvme_qpair1 = io_path1->qpair;
3353 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3354 
3355 	set_thread(1);
3356 
3357 	ch2 = spdk_get_io_channel(bdev);
3358 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3359 
3360 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3361 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3362 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3363 	nvme_qpair2 = io_path2->qpair;
3364 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3365 
3366 	/* If a qpair is disconnected, it is freed and then reconnected via
3367 	 * resetting the corresponding nvme_ctrlr.
3368 	 */
3369 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3370 	ctrlr->is_failed = true;
3371 
3372 	poll_thread_times(1, 3);
3373 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3374 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3375 	CU_ASSERT(nvme_ctrlr->resetting == true);
3376 
3377 	poll_thread_times(0, 3);
3378 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3379 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3380 	CU_ASSERT(ctrlr->is_failed == true);
3381 
3382 	poll_thread_times(1, 2);
3383 	poll_thread_times(0, 1);
3384 	CU_ASSERT(ctrlr->is_failed == false);
3385 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3386 
3387 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3388 	poll_thread_times(0, 2);
3389 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3390 
3391 	poll_thread_times(0, 1);
3392 	poll_thread_times(1, 1);
3393 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3394 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3395 	CU_ASSERT(nvme_ctrlr->resetting == true);
3396 
3397 	poll_thread_times(0, 2);
3398 	poll_thread_times(1, 1);
3399 	poll_thread_times(0, 1);
3400 	CU_ASSERT(nvme_ctrlr->resetting == false);
3401 
3402 	poll_threads();
3403 
3404 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3405 	 * fails, the qpair is just freed.
3406 	 */
3407 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3408 	ctrlr->is_failed = true;
3409 	ctrlr->fail_reset = true;
3410 
3411 	poll_thread_times(1, 3);
3412 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3413 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3414 	CU_ASSERT(nvme_ctrlr->resetting == true);
3415 
3416 	poll_thread_times(0, 3);
3417 	poll_thread_times(1, 1);
3418 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3419 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3420 	CU_ASSERT(ctrlr->is_failed == true);
3421 
3422 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3423 	poll_thread_times(0, 3);
3424 	poll_thread_times(1, 1);
3425 	poll_thread_times(0, 1);
3426 	CU_ASSERT(ctrlr->is_failed == true);
3427 	CU_ASSERT(nvme_ctrlr->resetting == false);
3428 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3429 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3430 
3431 	poll_threads();
3432 
3433 	spdk_put_io_channel(ch2);
3434 
3435 	set_thread(0);
3436 
3437 	spdk_put_io_channel(ch1);
3438 
3439 	poll_threads();
3440 
3441 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3442 	CU_ASSERT(rc == 0);
3443 
3444 	poll_threads();
3445 	spdk_delay_us(1000);
3446 	poll_threads();
3447 
3448 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3449 }
3450 
3451 static void
3452 test_create_bdev_ctrlr(void)
3453 {
3454 	struct nvme_path_id path1 = {}, path2 = {};
3455 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3456 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3457 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3458 	const int STRING_SIZE = 32;
3459 	const char *attached_names[STRING_SIZE];
3460 	int rc;
3461 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3462 
3463 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3464 	bdev_opts.multipath = true;
3465 
3466 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3467 	ut_init_trid(&path1.trid);
3468 	ut_init_trid2(&path2.trid);
3469 
3470 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3471 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3472 
3473 	g_ut_attach_ctrlr_status = 0;
3474 	g_ut_attach_bdev_count = 0;
3475 
3476 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3477 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3478 	CU_ASSERT(rc == 0);
3479 
3480 	spdk_delay_us(1000);
3481 	poll_threads();
3482 
3483 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3484 	poll_threads();
3485 
3486 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3487 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3488 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3489 
3490 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3491 	g_ut_attach_ctrlr_status = -EINVAL;
3492 
3493 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3494 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3495 
3496 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3497 
3498 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3499 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3500 	CU_ASSERT(rc == 0);
3501 
3502 	spdk_delay_us(1000);
3503 	poll_threads();
3504 
3505 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3506 	poll_threads();
3507 
3508 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3509 
3510 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3511 	g_ut_attach_ctrlr_status = 0;
3512 
3513 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3514 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3515 
3516 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3517 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3518 	CU_ASSERT(rc == 0);
3519 
3520 	spdk_delay_us(1000);
3521 	poll_threads();
3522 
3523 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3524 	poll_threads();
3525 
3526 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3527 
3528 	/* Delete two ctrlrs at once. */
3529 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3530 	CU_ASSERT(rc == 0);
3531 
3532 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3533 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3534 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3535 
3536 	poll_threads();
3537 	spdk_delay_us(1000);
3538 	poll_threads();
3539 
3540 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3541 
3542 	/* Add two ctrlrs and delete one by one. */
3543 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3544 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3545 
3546 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3547 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3548 
3549 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3550 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3551 	CU_ASSERT(rc == 0);
3552 
3553 	spdk_delay_us(1000);
3554 	poll_threads();
3555 
3556 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3557 	poll_threads();
3558 
3559 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3560 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3561 	CU_ASSERT(rc == 0);
3562 
3563 	spdk_delay_us(1000);
3564 	poll_threads();
3565 
3566 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3567 	poll_threads();
3568 
3569 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3570 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3571 
3572 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3573 	CU_ASSERT(rc == 0);
3574 
3575 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3576 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3577 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3578 
3579 	poll_threads();
3580 	spdk_delay_us(1000);
3581 	poll_threads();
3582 
3583 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3584 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3585 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3586 
3587 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3588 	CU_ASSERT(rc == 0);
3589 
3590 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3591 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3592 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3593 
3594 	poll_threads();
3595 	spdk_delay_us(1000);
3596 	poll_threads();
3597 
3598 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3599 }
3600 
3601 static struct nvme_ns *
3602 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3603 {
3604 	struct nvme_ns *nvme_ns;
3605 
3606 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3607 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3608 			return nvme_ns;
3609 		}
3610 	}
3611 
3612 	return NULL;
3613 }
3614 
3615 static void
3616 test_add_multi_ns_to_bdev(void)
3617 {
3618 	struct nvme_path_id path1 = {}, path2 = {};
3619 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3620 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3621 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3622 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3623 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3624 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3625 	const int STRING_SIZE = 32;
3626 	const char *attached_names[STRING_SIZE];
3627 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3628 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3629 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3630 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3631 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3632 	int rc;
3633 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3634 
3635 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3636 	bdev_opts.multipath = true;
3637 
3638 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3639 	ut_init_trid(&path1.trid);
3640 	ut_init_trid2(&path2.trid);
3641 
3642 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3643 
3644 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3645 	 * namespaces are populated.
3646 	 */
3647 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3648 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3649 
3650 	ctrlr1->ns[1].is_active = false;
3651 	ctrlr1->ns[4].is_active = false;
3652 	ctrlr1->ns[0].uuid = &uuid1;
3653 	ctrlr1->ns[2].uuid = &uuid3;
3654 	ctrlr1->ns[3].uuid = &uuid4;
3655 
3656 	g_ut_attach_ctrlr_status = 0;
3657 	g_ut_attach_bdev_count = 3;
3658 
3659 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3660 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3661 	CU_ASSERT(rc == 0);
3662 
3663 	spdk_delay_us(1000);
3664 	poll_threads();
3665 
3666 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3667 	poll_threads();
3668 
3669 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3670 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3671 	 * adding 4th namespace to a bdev should fail.
3672 	 */
3673 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3674 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3675 
3676 	ctrlr2->ns[2].is_active = false;
3677 	ctrlr2->ns[4].is_active = false;
3678 	ctrlr2->ns[0].uuid = &uuid1;
3679 	ctrlr2->ns[1].uuid = &uuid2;
3680 	ctrlr2->ns[3].uuid = &uuid44;
3681 
3682 	g_ut_attach_ctrlr_status = 0;
3683 	g_ut_attach_bdev_count = 2;
3684 
3685 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3686 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3687 	CU_ASSERT(rc == 0);
3688 
3689 	spdk_delay_us(1000);
3690 	poll_threads();
3691 
3692 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3693 	poll_threads();
3694 
3695 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3696 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3697 
3698 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3699 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3700 
3701 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3702 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3703 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3704 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3705 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3706 
3707 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3708 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3709 
3710 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3711 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3712 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3713 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3714 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3715 
3716 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3717 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3718 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3719 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3720 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3721 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3722 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3723 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3724 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3725 
3726 	CU_ASSERT(bdev1->ref == 2);
3727 	CU_ASSERT(bdev2->ref == 1);
3728 	CU_ASSERT(bdev3->ref == 1);
3729 	CU_ASSERT(bdev4->ref == 1);
3730 
3731 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3732 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3733 	CU_ASSERT(rc == 0);
3734 
3735 	poll_threads();
3736 	spdk_delay_us(1000);
3737 	poll_threads();
3738 
3739 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3740 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3741 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2);
3742 
3743 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3744 	CU_ASSERT(rc == 0);
3745 
3746 	poll_threads();
3747 	spdk_delay_us(1000);
3748 	poll_threads();
3749 
3750 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3751 
3752 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3753 	 * can be deleted when the bdev subsystem shutdown.
3754 	 */
3755 	g_ut_attach_bdev_count = 1;
3756 
3757 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3758 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3759 
3760 	ctrlr1->ns[0].uuid = &uuid1;
3761 
3762 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3763 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3764 	CU_ASSERT(rc == 0);
3765 
3766 	spdk_delay_us(1000);
3767 	poll_threads();
3768 
3769 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3770 	poll_threads();
3771 
3772 	ut_init_trid2(&path2.trid);
3773 
3774 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3775 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3776 
3777 	ctrlr2->ns[0].uuid = &uuid1;
3778 
3779 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3780 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3781 	CU_ASSERT(rc == 0);
3782 
3783 	spdk_delay_us(1000);
3784 	poll_threads();
3785 
3786 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3787 	poll_threads();
3788 
3789 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3790 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3791 
3792 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3793 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3794 
3795 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3796 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3797 
3798 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3799 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3800 
3801 	/* Check if a nvme_bdev has two nvme_ns. */
3802 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3803 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3804 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3805 
3806 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3807 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3808 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3809 
3810 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3811 	bdev_nvme_destruct(&bdev1->disk);
3812 
3813 	poll_threads();
3814 
3815 	CU_ASSERT(nvme_ns1->bdev == NULL);
3816 	CU_ASSERT(nvme_ns2->bdev == NULL);
3817 
3818 	nvme_ctrlr1->destruct = true;
3819 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3820 
3821 	poll_threads();
3822 	spdk_delay_us(1000);
3823 	poll_threads();
3824 
3825 	nvme_ctrlr2->destruct = true;
3826 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3827 
3828 	poll_threads();
3829 	spdk_delay_us(1000);
3830 	poll_threads();
3831 
3832 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3833 }
3834 
3835 static void
3836 test_add_multi_io_paths_to_nbdev_ch(void)
3837 {
3838 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3839 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3840 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3841 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3842 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3843 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3844 	const int STRING_SIZE = 32;
3845 	const char *attached_names[STRING_SIZE];
3846 	struct nvme_bdev *bdev;
3847 	struct spdk_io_channel *ch;
3848 	struct nvme_bdev_channel *nbdev_ch;
3849 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3850 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3851 	int rc;
3852 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3853 
3854 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3855 	bdev_opts.multipath = true;
3856 
3857 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3858 	ut_init_trid(&path1.trid);
3859 	ut_init_trid2(&path2.trid);
3860 	ut_init_trid3(&path3.trid);
3861 	g_ut_attach_ctrlr_status = 0;
3862 	g_ut_attach_bdev_count = 1;
3863 
3864 	set_thread(1);
3865 
3866 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3867 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3868 
3869 	ctrlr1->ns[0].uuid = &uuid1;
3870 
3871 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3872 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3873 	CU_ASSERT(rc == 0);
3874 
3875 	spdk_delay_us(1000);
3876 	poll_threads();
3877 
3878 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3879 	poll_threads();
3880 
3881 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3882 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3883 
3884 	ctrlr2->ns[0].uuid = &uuid1;
3885 
3886 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3887 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3888 	CU_ASSERT(rc == 0);
3889 
3890 	spdk_delay_us(1000);
3891 	poll_threads();
3892 
3893 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3894 	poll_threads();
3895 
3896 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3897 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3898 
3899 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3900 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3901 
3902 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3903 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3904 
3905 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3906 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3907 
3908 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3909 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3910 
3911 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3912 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3913 
3914 	set_thread(0);
3915 
3916 	ch = spdk_get_io_channel(bdev);
3917 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3918 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3919 
3920 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3921 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3922 
3923 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3924 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3925 
3926 	set_thread(1);
3927 
3928 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3929 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3930 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3931 
3932 	ctrlr3->ns[0].uuid = &uuid1;
3933 
3934 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3935 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3936 	CU_ASSERT(rc == 0);
3937 
3938 	spdk_delay_us(1000);
3939 	poll_threads();
3940 
3941 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3942 	poll_threads();
3943 
3944 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn);
3945 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3946 
3947 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3948 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3949 
3950 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3951 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3952 
3953 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3954 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3955 	CU_ASSERT(rc == 0);
3956 
3957 	poll_threads();
3958 	spdk_delay_us(1000);
3959 	poll_threads();
3960 
3961 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1);
3962 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3963 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3);
3964 
3965 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3966 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3967 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3968 
3969 	set_thread(0);
3970 
3971 	spdk_put_io_channel(ch);
3972 
3973 	poll_threads();
3974 
3975 	set_thread(1);
3976 
3977 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3978 	CU_ASSERT(rc == 0);
3979 
3980 	poll_threads();
3981 	spdk_delay_us(1000);
3982 	poll_threads();
3983 
3984 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3985 }
3986 
3987 static void
3988 test_admin_path(void)
3989 {
3990 	struct nvme_path_id path1 = {}, path2 = {};
3991 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3992 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3993 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3994 	const int STRING_SIZE = 32;
3995 	const char *attached_names[STRING_SIZE];
3996 	struct nvme_bdev *bdev;
3997 	struct spdk_io_channel *ch;
3998 	struct spdk_bdev_io *bdev_io;
3999 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4000 	int rc;
4001 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4002 
4003 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4004 	bdev_opts.multipath = true;
4005 
4006 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4007 	ut_init_trid(&path1.trid);
4008 	ut_init_trid2(&path2.trid);
4009 	g_ut_attach_ctrlr_status = 0;
4010 	g_ut_attach_bdev_count = 1;
4011 
4012 	set_thread(0);
4013 
4014 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4015 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4016 
4017 	ctrlr1->ns[0].uuid = &uuid1;
4018 
4019 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4020 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4021 	CU_ASSERT(rc == 0);
4022 
4023 	spdk_delay_us(1000);
4024 	poll_threads();
4025 
4026 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4027 	poll_threads();
4028 
4029 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4030 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4031 
4032 	ctrlr2->ns[0].uuid = &uuid1;
4033 
4034 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4035 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4036 	CU_ASSERT(rc == 0);
4037 
4038 	spdk_delay_us(1000);
4039 	poll_threads();
4040 
4041 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4042 	poll_threads();
4043 
4044 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4045 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4046 
4047 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4048 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4049 
4050 	ch = spdk_get_io_channel(bdev);
4051 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4052 
4053 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
4054 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4055 
4056 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
4057 	 * submitted to ctrlr2.
4058 	 */
4059 	ctrlr1->is_failed = true;
4060 	bdev_io->internal.f.in_submit_request = true;
4061 
4062 	bdev_nvme_submit_request(ch, bdev_io);
4063 
4064 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4065 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
4066 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4067 
4068 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4069 	poll_threads();
4070 
4071 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4072 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4073 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4074 
4075 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
4076 	ctrlr2->is_failed = true;
4077 	bdev_io->internal.f.in_submit_request = true;
4078 
4079 	bdev_nvme_submit_request(ch, bdev_io);
4080 
4081 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4082 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4083 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4084 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4085 
4086 	free(bdev_io);
4087 
4088 	spdk_put_io_channel(ch);
4089 
4090 	poll_threads();
4091 
4092 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4093 	CU_ASSERT(rc == 0);
4094 
4095 	poll_threads();
4096 	spdk_delay_us(1000);
4097 	poll_threads();
4098 
4099 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4100 }
4101 
4102 static struct nvme_io_path *
4103 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
4104 			struct nvme_ctrlr *nvme_ctrlr)
4105 {
4106 	struct nvme_io_path *io_path;
4107 
4108 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
4109 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
4110 			return io_path;
4111 		}
4112 	}
4113 
4114 	return NULL;
4115 }
4116 
4117 static void
4118 test_reset_bdev_ctrlr(void)
4119 {
4120 	struct nvme_path_id path1 = {}, path2 = {};
4121 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4122 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4123 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4124 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4125 	struct nvme_path_id *curr_path1, *curr_path2;
4126 	const int STRING_SIZE = 32;
4127 	const char *attached_names[STRING_SIZE];
4128 	struct nvme_bdev *bdev;
4129 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
4130 	struct nvme_bdev_io *first_bio;
4131 	struct spdk_io_channel *ch1, *ch2;
4132 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
4133 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
4134 	int rc;
4135 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4136 
4137 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4138 	bdev_opts.multipath = true;
4139 
4140 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4141 	ut_init_trid(&path1.trid);
4142 	ut_init_trid2(&path2.trid);
4143 	g_ut_attach_ctrlr_status = 0;
4144 	g_ut_attach_bdev_count = 1;
4145 
4146 	set_thread(0);
4147 
4148 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4149 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4150 
4151 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4152 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4153 	CU_ASSERT(rc == 0);
4154 
4155 	spdk_delay_us(1000);
4156 	poll_threads();
4157 
4158 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4159 	poll_threads();
4160 
4161 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4162 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4163 
4164 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4165 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4166 	CU_ASSERT(rc == 0);
4167 
4168 	spdk_delay_us(1000);
4169 	poll_threads();
4170 
4171 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4172 	poll_threads();
4173 
4174 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4175 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4176 
4177 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4178 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
4179 
4180 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
4181 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
4182 
4183 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4184 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
4185 
4186 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
4187 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
4188 
4189 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4190 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4191 
4192 	set_thread(0);
4193 
4194 	ch1 = spdk_get_io_channel(bdev);
4195 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
4196 
4197 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
4198 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
4199 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
4200 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
4201 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4202 
4203 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
4204 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4205 
4206 	set_thread(1);
4207 
4208 	ch2 = spdk_get_io_channel(bdev);
4209 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4210 
4211 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4212 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4213 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4214 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4215 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4216 
4217 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
4218 
4219 	/* The first reset request from bdev_io is submitted on thread 0.
4220 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4221 	 *
4222 	 * A few extra polls are necessary after resetting ctrlr1 to check
4223 	 * pending reset requests for ctrlr1.
4224 	 */
4225 	ctrlr1->is_failed = true;
4226 	curr_path1->last_failed_tsc = spdk_get_ticks();
4227 	ctrlr2->is_failed = true;
4228 	curr_path2->last_failed_tsc = spdk_get_ticks();
4229 
4230 	set_thread(0);
4231 
4232 	bdev_nvme_submit_request(ch1, first_bdev_io);
4233 
4234 	poll_thread_times(0, 1);
4235 	poll_thread_times(1, 1);
4236 	poll_thread_times(0, 2);
4237 	poll_thread_times(1, 1);
4238 	poll_thread_times(0, 1);
4239 
4240 	CU_ASSERT(first_bio->io_path == io_path11);
4241 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4242 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4243 
4244 	poll_thread_times(0, 3);
4245 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4246 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4247 
4248 	poll_thread_times(1, 2);
4249 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4250 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4251 	CU_ASSERT(ctrlr1->is_failed == true);
4252 
4253 	poll_thread_times(0, 1);
4254 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4255 	CU_ASSERT(ctrlr1->is_failed == false);
4256 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4257 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4258 
4259 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4260 	poll_thread_times(0, 2);
4261 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4262 
4263 	poll_thread_times(0, 1);
4264 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4265 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4266 
4267 	poll_thread_times(1, 1);
4268 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4269 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4270 
4271 	poll_thread_times(0, 2);
4272 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4273 	poll_thread_times(1, 1);
4274 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4275 	poll_thread_times(0, 2);
4276 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4277 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4278 	CU_ASSERT(first_bio->io_path == io_path12);
4279 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4280 
4281 	poll_thread_times(0, 3);
4282 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4283 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4284 
4285 	poll_thread_times(1, 2);
4286 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4287 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4288 	CU_ASSERT(ctrlr2->is_failed == true);
4289 
4290 	poll_thread_times(0, 1);
4291 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4292 	CU_ASSERT(ctrlr2->is_failed == false);
4293 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4294 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4295 
4296 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4297 	poll_thread_times(0, 2);
4298 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4299 
4300 	poll_thread_times(0, 1);
4301 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4302 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4303 
4304 	poll_thread_times(1, 2);
4305 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4306 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4307 
4308 	poll_thread_times(0, 2);
4309 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4310 	poll_thread_times(1, 1);
4311 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4312 	poll_thread_times(0, 2);
4313 	CU_ASSERT(first_bio->io_path == NULL);
4314 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4315 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4316 
4317 	poll_threads();
4318 
4319 	/* There is a race between two reset requests from bdev_io.
4320 	 *
4321 	 * The first reset request is submitted on thread 0, and the second reset
4322 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4323 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4324 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4325 	 * The second is pending on ctrlr2 again. After the first completes resetting
4326 	 * ctrl2, both complete successfully.
4327 	 */
4328 	ctrlr1->is_failed = true;
4329 	curr_path1->last_failed_tsc = spdk_get_ticks();
4330 	ctrlr2->is_failed = true;
4331 	curr_path2->last_failed_tsc = spdk_get_ticks();
4332 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4333 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4334 
4335 	set_thread(0);
4336 
4337 	bdev_nvme_submit_request(ch1, first_bdev_io);
4338 
4339 	set_thread(1);
4340 
4341 	bdev_nvme_submit_request(ch2, second_bdev_io);
4342 
4343 	poll_thread_times(0, 1);
4344 	poll_thread_times(1, 1);
4345 	poll_thread_times(0, 2);
4346 	poll_thread_times(1, 1);
4347 	poll_thread_times(0, 1);
4348 	poll_thread_times(1, 1);
4349 
4350 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4351 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4352 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4353 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4354 
4355 	poll_threads();
4356 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4357 	poll_threads();
4358 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4359 	poll_threads();
4360 
4361 	CU_ASSERT(ctrlr1->is_failed == false);
4362 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4363 	CU_ASSERT(ctrlr2->is_failed == false);
4364 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4365 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4366 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4367 
4368 	/* Reset of the first path succeeds, reset of the second path fails.
4369 	 * Since we have at least one working path we should not fail RESET IO.
4370 	 */
4371 	ctrlr1->is_failed = true;
4372 	curr_path1->last_failed_tsc = spdk_get_ticks();
4373 	ctrlr2->is_failed = true;
4374 	curr_path2->last_failed_tsc = spdk_get_ticks();
4375 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4376 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4377 
4378 	set_thread(0);
4379 	bdev_nvme_submit_request(ch1, first_bdev_io);
4380 
4381 	set_thread(1);
4382 	bdev_nvme_submit_request(ch2, second_bdev_io);
4383 
4384 	poll_thread_times(0, 1);
4385 	poll_thread_times(1, 1);
4386 	poll_thread_times(0, 2);
4387 	poll_thread_times(1, 1);
4388 	poll_thread_times(0, 1);
4389 	poll_thread_times(1, 1);
4390 
4391 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4392 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4393 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4394 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4395 
4396 	ctrlr2->fail_reset = true;
4397 
4398 	poll_threads();
4399 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4400 	poll_threads();
4401 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4402 	poll_threads();
4403 
4404 	CU_ASSERT(ctrlr1->is_failed == false);
4405 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4406 	CU_ASSERT(ctrlr2->is_failed == true);
4407 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4408 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4409 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4410 
4411 	/* Path 2 recovers */
4412 	ctrlr2->fail_reset = false;
4413 	poll_threads();
4414 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4415 	poll_threads();
4416 
4417 	CU_ASSERT(ctrlr2->is_failed == false);
4418 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4419 
4420 	/* Reset of the first path fails, reset of the second path succeeds.
4421 	 * Since we have at least one working path we should not fail RESET IO.
4422 	 */
4423 	ctrlr1->is_failed = true;
4424 	curr_path1->last_failed_tsc = spdk_get_ticks();
4425 	ctrlr2->is_failed = true;
4426 	curr_path2->last_failed_tsc = spdk_get_ticks();
4427 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4428 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4429 
4430 	set_thread(0);
4431 	bdev_nvme_submit_request(ch1, first_bdev_io);
4432 
4433 	set_thread(1);
4434 	bdev_nvme_submit_request(ch2, second_bdev_io);
4435 
4436 	poll_thread_times(0, 1);
4437 	poll_thread_times(1, 1);
4438 	poll_thread_times(0, 2);
4439 	poll_thread_times(1, 1);
4440 	poll_thread_times(0, 1);
4441 	poll_thread_times(1, 1);
4442 
4443 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4444 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4445 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4446 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4447 
4448 	ctrlr1->fail_reset = true;
4449 
4450 	poll_threads();
4451 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4452 	poll_threads();
4453 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4454 	poll_threads();
4455 
4456 	CU_ASSERT(ctrlr1->is_failed == true);
4457 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4458 	CU_ASSERT(ctrlr2->is_failed == false);
4459 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4460 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4461 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4462 
4463 	/* Path 1 recovers */
4464 	ctrlr1->fail_reset = false;
4465 	poll_threads();
4466 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4467 	poll_threads();
4468 
4469 	CU_ASSERT(ctrlr1->is_failed == false);
4470 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4471 
4472 	/* Reset of both paths fail.
4473 	 * Since we have no working paths we should fail RESET IO.
4474 	 */
4475 	ctrlr1->is_failed = true;
4476 	curr_path1->last_failed_tsc = spdk_get_ticks();
4477 	ctrlr2->is_failed = true;
4478 	curr_path2->last_failed_tsc = spdk_get_ticks();
4479 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4480 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4481 
4482 	set_thread(0);
4483 	bdev_nvme_submit_request(ch1, first_bdev_io);
4484 
4485 	set_thread(1);
4486 	bdev_nvme_submit_request(ch2, second_bdev_io);
4487 
4488 	poll_thread_times(0, 1);
4489 	poll_thread_times(1, 1);
4490 	poll_thread_times(0, 2);
4491 	poll_thread_times(1, 1);
4492 	poll_thread_times(0, 1);
4493 	poll_thread_times(1, 1);
4494 
4495 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4496 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4497 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4498 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4499 
4500 	ctrlr1->fail_reset = true;
4501 	ctrlr2->fail_reset = true;
4502 
4503 	poll_threads();
4504 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4505 	poll_threads();
4506 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4507 	poll_threads();
4508 
4509 	CU_ASSERT(ctrlr1->is_failed == true);
4510 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4511 	CU_ASSERT(ctrlr2->is_failed == true);
4512 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4513 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4514 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4515 
4516 	/* Paths 1 and 2 recover */
4517 	ctrlr1->fail_reset = false;
4518 	ctrlr2->fail_reset = false;
4519 	poll_threads();
4520 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4521 	poll_threads();
4522 
4523 	CU_ASSERT(ctrlr1->is_failed == false);
4524 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4525 	CU_ASSERT(ctrlr2->is_failed == false);
4526 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4527 
4528 	/* Reset of the first path failes, reset of the second path succeeds.
4529 	 * Since we have at least one working path we should not fail RESET IO.
4530 	 *
4531 	 * Here, reset of the first path fails immediately because it is disabled.
4532 	 *
4533 	 * The purpose is to verify the fix. We had a bug that bdev_io did not
4534 	 * hold io_path when reset of it failed immediately, and then continue
4535 	 * operation caused NULL pointer access.
4536 	 */
4537 	nvme_ctrlr1->disabled = true;
4538 	ctrlr1->is_failed = true;
4539 	curr_path1->last_failed_tsc = spdk_get_ticks();
4540 	ctrlr2->is_failed = true;
4541 	curr_path2->last_failed_tsc = spdk_get_ticks();
4542 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4543 
4544 	set_thread(0);
4545 	bdev_nvme_submit_request(ch1, first_bdev_io);
4546 
4547 	poll_threads();
4548 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4549 	poll_threads();
4550 
4551 	CU_ASSERT(ctrlr1->is_failed == true);
4552 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4553 	CU_ASSERT(ctrlr2->is_failed == false);
4554 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4555 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4556 
4557 	nvme_ctrlr1->disabled = false;
4558 	ctrlr1->is_failed = false;
4559 	curr_path1->last_failed_tsc = 0;
4560 
4561 	set_thread(0);
4562 
4563 	spdk_put_io_channel(ch1);
4564 
4565 	set_thread(1);
4566 
4567 	spdk_put_io_channel(ch2);
4568 
4569 	poll_threads();
4570 
4571 	set_thread(0);
4572 
4573 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4574 	CU_ASSERT(rc == 0);
4575 
4576 	poll_threads();
4577 	spdk_delay_us(1000);
4578 	poll_threads();
4579 
4580 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4581 
4582 	free(first_bdev_io);
4583 	free(second_bdev_io);
4584 }
4585 
4586 static void
4587 test_find_io_path(void)
4588 {
4589 	struct nvme_bdev_channel nbdev_ch = {
4590 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4591 	};
4592 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4593 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4594 	struct spdk_nvme_ns ns1 = {}, ns2 = {};
4595 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4596 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4597 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4598 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4599 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, };
4600 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4601 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4602 
4603 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4604 
4605 	/* Test if io_path whose ANA state is not accessible is excluded. */
4606 
4607 	nvme_qpair1.qpair = &qpair1;
4608 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4609 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4610 
4611 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4612 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4613 
4614 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4615 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4616 
4617 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4618 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4619 
4620 	nbdev_ch.current_io_path = NULL;
4621 
4622 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4623 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4624 
4625 	nbdev_ch.current_io_path = NULL;
4626 
4627 	/* Test if io_path whose qpair is resetting is excluded. */
4628 
4629 	nvme_qpair1.qpair = NULL;
4630 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4631 
4632 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4633 
4634 	/* Test if ANA optimized state or the first found ANA non-optimized state
4635 	 * is prioritized.
4636 	 */
4637 
4638 	nvme_qpair1.qpair = &qpair1;
4639 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4640 	nvme_qpair2.qpair = &qpair2;
4641 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4642 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4643 
4644 	nbdev_ch.current_io_path = NULL;
4645 
4646 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4647 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4648 
4649 	nbdev_ch.current_io_path = NULL;
4650 }
4651 
4652 static void
4653 test_retry_io_if_ana_state_is_updating(void)
4654 {
4655 	struct nvme_path_id path = {};
4656 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
4657 	struct spdk_nvme_ctrlr *ctrlr;
4658 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
4659 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4660 	struct nvme_ctrlr *nvme_ctrlr;
4661 	const int STRING_SIZE = 32;
4662 	const char *attached_names[STRING_SIZE];
4663 	struct nvme_bdev *bdev;
4664 	struct nvme_ns *nvme_ns;
4665 	struct spdk_bdev_io *bdev_io1;
4666 	struct spdk_io_channel *ch;
4667 	struct nvme_bdev_channel *nbdev_ch;
4668 	struct nvme_io_path *io_path;
4669 	struct nvme_qpair *nvme_qpair;
4670 	int rc;
4671 
4672 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4673 	ut_init_trid(&path.trid);
4674 
4675 	set_thread(0);
4676 
4677 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4678 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4679 
4680 	g_ut_attach_ctrlr_status = 0;
4681 	g_ut_attach_bdev_count = 1;
4682 
4683 	opts.ctrlr_loss_timeout_sec = -1;
4684 	opts.reconnect_delay_sec = 1;
4685 	opts.multipath = false;
4686 
4687 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4688 				   attach_ctrlr_done, NULL, &dopts, &opts);
4689 	CU_ASSERT(rc == 0);
4690 
4691 	spdk_delay_us(1000);
4692 	poll_threads();
4693 
4694 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4695 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4696 
4697 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
4698 	CU_ASSERT(nvme_ctrlr != NULL);
4699 
4700 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4701 	CU_ASSERT(bdev != NULL);
4702 
4703 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4704 	CU_ASSERT(nvme_ns != NULL);
4705 
4706 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4707 	ut_bdev_io_set_buf(bdev_io1);
4708 
4709 	ch = spdk_get_io_channel(bdev);
4710 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4711 
4712 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4713 
4714 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4715 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4716 
4717 	nvme_qpair = io_path->qpair;
4718 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4719 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4720 
4721 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4722 
4723 	/* If qpair is connected, I/O should succeed. */
4724 	bdev_io1->internal.f.in_submit_request = true;
4725 
4726 	bdev_nvme_submit_request(ch, bdev_io1);
4727 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4728 
4729 	poll_threads();
4730 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4731 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4732 
4733 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4734 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4735 	nbdev_ch->current_io_path = NULL;
4736 
4737 	bdev_io1->internal.f.in_submit_request = true;
4738 
4739 	bdev_nvme_submit_request(ch, bdev_io1);
4740 
4741 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4742 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4743 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4744 
4745 	/* ANA state became accessible while I/O was queued. */
4746 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4747 
4748 	spdk_delay_us(1000000);
4749 
4750 	poll_thread_times(0, 1);
4751 
4752 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4753 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4754 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4755 
4756 	poll_threads();
4757 
4758 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4759 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4760 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4761 
4762 	free(bdev_io1);
4763 
4764 	spdk_put_io_channel(ch);
4765 
4766 	poll_threads();
4767 
4768 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4769 	CU_ASSERT(rc == 0);
4770 
4771 	poll_threads();
4772 	spdk_delay_us(1000);
4773 	poll_threads();
4774 
4775 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4776 }
4777 
4778 static void
4779 test_retry_io_for_io_path_error(void)
4780 {
4781 	struct nvme_path_id path1 = {}, path2 = {};
4782 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4783 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4784 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4785 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4786 	const int STRING_SIZE = 32;
4787 	const char *attached_names[STRING_SIZE];
4788 	struct nvme_bdev *bdev;
4789 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4790 	struct spdk_bdev_io *bdev_io;
4791 	struct nvme_bdev_io *bio;
4792 	struct spdk_io_channel *ch;
4793 	struct nvme_bdev_channel *nbdev_ch;
4794 	struct nvme_io_path *io_path1, *io_path2;
4795 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4796 	struct ut_nvme_req *req;
4797 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4798 	int rc;
4799 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4800 
4801 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4802 	bdev_opts.multipath = true;
4803 
4804 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4805 	ut_init_trid(&path1.trid);
4806 	ut_init_trid2(&path2.trid);
4807 
4808 	g_opts.bdev_retry_count = 1;
4809 
4810 	set_thread(0);
4811 
4812 	g_ut_attach_ctrlr_status = 0;
4813 	g_ut_attach_bdev_count = 1;
4814 
4815 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4816 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4817 
4818 	ctrlr1->ns[0].uuid = &uuid1;
4819 
4820 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4821 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4822 	CU_ASSERT(rc == 0);
4823 
4824 	spdk_delay_us(1000);
4825 	poll_threads();
4826 
4827 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4828 	poll_threads();
4829 
4830 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4831 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4832 
4833 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4834 	CU_ASSERT(nvme_ctrlr1 != NULL);
4835 
4836 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4837 	CU_ASSERT(bdev != NULL);
4838 
4839 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4840 	CU_ASSERT(nvme_ns1 != NULL);
4841 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4842 
4843 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4844 	ut_bdev_io_set_buf(bdev_io);
4845 
4846 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4847 
4848 	ch = spdk_get_io_channel(bdev);
4849 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4850 
4851 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4852 
4853 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4854 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4855 
4856 	nvme_qpair1 = io_path1->qpair;
4857 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4858 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4859 
4860 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4861 
4862 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4863 	bdev_io->internal.f.in_submit_request = true;
4864 
4865 	bdev_nvme_submit_request(ch, bdev_io);
4866 
4867 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4868 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4869 
4870 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4871 	SPDK_CU_ASSERT_FATAL(req != NULL);
4872 
4873 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4874 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4875 	req->cpl.status.dnr = 1;
4876 
4877 	poll_thread_times(0, 1);
4878 
4879 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4880 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4881 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4882 
4883 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4884 	bdev_io->internal.f.in_submit_request = true;
4885 
4886 	bdev_nvme_submit_request(ch, bdev_io);
4887 
4888 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4889 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4890 
4891 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4892 	SPDK_CU_ASSERT_FATAL(req != NULL);
4893 
4894 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4895 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4896 
4897 	poll_thread_times(0, 1);
4898 
4899 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4900 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4901 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4902 
4903 	poll_threads();
4904 
4905 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4906 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4907 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4908 
4909 	/* Add io_path2 dynamically, and create a multipath configuration. */
4910 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4911 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4912 
4913 	ctrlr2->ns[0].uuid = &uuid1;
4914 
4915 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4916 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4917 	CU_ASSERT(rc == 0);
4918 
4919 	spdk_delay_us(1000);
4920 	poll_threads();
4921 
4922 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4923 	poll_threads();
4924 
4925 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4926 	CU_ASSERT(nvme_ctrlr2 != NULL);
4927 
4928 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4929 	CU_ASSERT(nvme_ns2 != NULL);
4930 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4931 
4932 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4933 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4934 
4935 	nvme_qpair2 = io_path2->qpair;
4936 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4937 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4938 
4939 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4940 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4941 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4942 	 */
4943 	bdev_io->internal.f.in_submit_request = true;
4944 
4945 	bdev_nvme_submit_request(ch, bdev_io);
4946 
4947 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4948 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4949 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4950 
4951 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4952 	SPDK_CU_ASSERT_FATAL(req != NULL);
4953 
4954 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4955 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4956 
4957 	poll_thread_times(0, 1);
4958 
4959 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4960 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4961 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4962 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4963 
4964 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4965 	nvme_qpair1->qpair = NULL;
4966 
4967 	poll_threads();
4968 
4969 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4970 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4971 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4972 
4973 	free(bdev_io);
4974 
4975 	spdk_put_io_channel(ch);
4976 
4977 	poll_threads();
4978 
4979 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4980 	CU_ASSERT(rc == 0);
4981 
4982 	poll_threads();
4983 	spdk_delay_us(1000);
4984 	poll_threads();
4985 
4986 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4987 
4988 	g_opts.bdev_retry_count = 0;
4989 }
4990 
4991 static void
4992 test_retry_io_count(void)
4993 {
4994 	struct nvme_path_id path = {};
4995 	struct spdk_nvme_ctrlr *ctrlr;
4996 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4997 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4998 	struct nvme_ctrlr *nvme_ctrlr;
4999 	const int STRING_SIZE = 32;
5000 	const char *attached_names[STRING_SIZE];
5001 	struct nvme_bdev *bdev;
5002 	struct nvme_ns *nvme_ns;
5003 	struct spdk_bdev_io *bdev_io;
5004 	struct nvme_bdev_io *bio;
5005 	struct spdk_io_channel *ch;
5006 	struct nvme_bdev_channel *nbdev_ch;
5007 	struct nvme_io_path *io_path;
5008 	struct nvme_qpair *nvme_qpair;
5009 	struct ut_nvme_req *req;
5010 	int rc;
5011 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5012 
5013 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5014 	bdev_opts.multipath = false;
5015 
5016 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5017 	ut_init_trid(&path.trid);
5018 
5019 	set_thread(0);
5020 
5021 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5022 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5023 
5024 	g_ut_attach_ctrlr_status = 0;
5025 	g_ut_attach_bdev_count = 1;
5026 
5027 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5028 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5029 	CU_ASSERT(rc == 0);
5030 
5031 	spdk_delay_us(1000);
5032 	poll_threads();
5033 
5034 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5035 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5036 
5037 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5038 	CU_ASSERT(nvme_ctrlr != NULL);
5039 
5040 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5041 	CU_ASSERT(bdev != NULL);
5042 
5043 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5044 	CU_ASSERT(nvme_ns != NULL);
5045 
5046 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5047 	ut_bdev_io_set_buf(bdev_io);
5048 
5049 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5050 
5051 	ch = spdk_get_io_channel(bdev);
5052 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5053 
5054 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5055 
5056 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5057 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5058 
5059 	nvme_qpair = io_path->qpair;
5060 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5061 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5062 
5063 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5064 
5065 	/* If I/O is aborted by request, it should not be retried. */
5066 	g_opts.bdev_retry_count = 1;
5067 
5068 	bdev_io->internal.f.in_submit_request = true;
5069 
5070 	bdev_nvme_submit_request(ch, bdev_io);
5071 
5072 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5073 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5074 
5075 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5076 	SPDK_CU_ASSERT_FATAL(req != NULL);
5077 
5078 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5079 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5080 
5081 	poll_thread_times(0, 1);
5082 
5083 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5084 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5085 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
5086 
5087 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
5088 	 * the failed I/O should not be retried.
5089 	 */
5090 	g_opts.bdev_retry_count = 4;
5091 
5092 	bdev_io->internal.f.in_submit_request = true;
5093 
5094 	bdev_nvme_submit_request(ch, bdev_io);
5095 
5096 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5097 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5098 
5099 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5100 	SPDK_CU_ASSERT_FATAL(req != NULL);
5101 
5102 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5103 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5104 	bio->retry_count = 4;
5105 
5106 	poll_thread_times(0, 1);
5107 
5108 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5109 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5110 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
5111 
5112 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
5113 	g_opts.bdev_retry_count = -1;
5114 
5115 	bdev_io->internal.f.in_submit_request = true;
5116 
5117 	bdev_nvme_submit_request(ch, bdev_io);
5118 
5119 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5120 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5121 
5122 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5123 	SPDK_CU_ASSERT_FATAL(req != NULL);
5124 
5125 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5126 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5127 	bio->retry_count = 4;
5128 
5129 	poll_thread_times(0, 1);
5130 
5131 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5132 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5133 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5134 
5135 	poll_threads();
5136 
5137 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5138 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5139 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5140 
5141 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
5142 	 * the failed I/O should be retried.
5143 	 */
5144 	g_opts.bdev_retry_count = 4;
5145 
5146 	bdev_io->internal.f.in_submit_request = true;
5147 
5148 	bdev_nvme_submit_request(ch, bdev_io);
5149 
5150 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5151 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5152 
5153 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5154 	SPDK_CU_ASSERT_FATAL(req != NULL);
5155 
5156 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5157 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5158 	bio->retry_count = 3;
5159 
5160 	poll_thread_times(0, 1);
5161 
5162 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5163 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5164 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5165 
5166 	poll_threads();
5167 
5168 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5169 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5170 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5171 
5172 	free(bdev_io);
5173 
5174 	spdk_put_io_channel(ch);
5175 
5176 	poll_threads();
5177 
5178 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5179 	CU_ASSERT(rc == 0);
5180 
5181 	poll_threads();
5182 	spdk_delay_us(1000);
5183 	poll_threads();
5184 
5185 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5186 
5187 	g_opts.bdev_retry_count = 0;
5188 }
5189 
5190 static void
5191 test_concurrent_read_ana_log_page(void)
5192 {
5193 	struct spdk_nvme_transport_id trid = {};
5194 	struct spdk_nvme_ctrlr *ctrlr;
5195 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5196 	struct nvme_ctrlr *nvme_ctrlr;
5197 	const int STRING_SIZE = 32;
5198 	const char *attached_names[STRING_SIZE];
5199 	int rc;
5200 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5201 
5202 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5203 	bdev_opts.multipath = false;
5204 
5205 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5206 	ut_init_trid(&trid);
5207 
5208 	set_thread(0);
5209 
5210 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
5211 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5212 
5213 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5214 
5215 	g_ut_attach_ctrlr_status = 0;
5216 	g_ut_attach_bdev_count = 1;
5217 
5218 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
5219 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5220 	CU_ASSERT(rc == 0);
5221 
5222 	spdk_delay_us(1000);
5223 	poll_threads();
5224 
5225 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5226 	poll_threads();
5227 
5228 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5229 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5230 
5231 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5232 
5233 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5234 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5235 
5236 	/* Following read request should be rejected. */
5237 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5238 
5239 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5240 
5241 	set_thread(1);
5242 
5243 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5244 
5245 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5246 
5247 	/* Reset request while reading ANA log page should not be rejected. */
5248 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5249 	CU_ASSERT(rc == 0);
5250 
5251 	poll_threads();
5252 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5253 	poll_threads();
5254 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5255 	poll_threads();
5256 
5257 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5258 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5259 
5260 	/* Read ANA log page while resetting ctrlr should be rejected. */
5261 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5262 	CU_ASSERT(rc == 0);
5263 
5264 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5265 
5266 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5267 
5268 	poll_threads();
5269 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5270 	poll_threads();
5271 
5272 	set_thread(0);
5273 
5274 	/* It is possible that target sent ANA change for inactive namespaces.
5275 	 *
5276 	 * Previously, assert() was added because this case was unlikely.
5277 	 * However, assert() was hit in real environment.
5278 
5279 	 * Hence, remove assert() and add unit test case.
5280 	 *
5281 	 * Simulate this case by depopulating namespaces and then parsing ANA
5282 	 * log page created when all namespaces are active.
5283 	 * Then, check if parsing ANA log page completes successfully.
5284 	 */
5285 	nvme_ctrlr_depopulate_namespaces(nvme_ctrlr);
5286 
5287 	rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr);
5288 	CU_ASSERT(rc == 0);
5289 
5290 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5291 	CU_ASSERT(rc == 0);
5292 
5293 	poll_threads();
5294 	spdk_delay_us(1000);
5295 	poll_threads();
5296 
5297 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5298 }
5299 
5300 static void
5301 test_retry_io_for_ana_error(void)
5302 {
5303 	struct nvme_path_id path = {};
5304 	struct spdk_nvme_ctrlr *ctrlr;
5305 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5306 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5307 	struct nvme_ctrlr *nvme_ctrlr;
5308 	const int STRING_SIZE = 32;
5309 	const char *attached_names[STRING_SIZE];
5310 	struct nvme_bdev *bdev;
5311 	struct nvme_ns *nvme_ns;
5312 	struct spdk_bdev_io *bdev_io;
5313 	struct nvme_bdev_io *bio;
5314 	struct spdk_io_channel *ch;
5315 	struct nvme_bdev_channel *nbdev_ch;
5316 	struct nvme_io_path *io_path;
5317 	struct nvme_qpair *nvme_qpair;
5318 	struct ut_nvme_req *req;
5319 	uint64_t now;
5320 	int rc;
5321 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5322 
5323 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5324 	bdev_opts.multipath = false;
5325 
5326 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5327 	ut_init_trid(&path.trid);
5328 
5329 	g_opts.bdev_retry_count = 1;
5330 
5331 	set_thread(0);
5332 
5333 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
5334 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5335 
5336 	g_ut_attach_ctrlr_status = 0;
5337 	g_ut_attach_bdev_count = 1;
5338 
5339 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5340 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5341 	CU_ASSERT(rc == 0);
5342 
5343 	spdk_delay_us(1000);
5344 	poll_threads();
5345 
5346 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5347 	poll_threads();
5348 
5349 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5350 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5351 
5352 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5353 	CU_ASSERT(nvme_ctrlr != NULL);
5354 
5355 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5356 	CU_ASSERT(bdev != NULL);
5357 
5358 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5359 	CU_ASSERT(nvme_ns != NULL);
5360 
5361 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5362 	ut_bdev_io_set_buf(bdev_io);
5363 
5364 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5365 
5366 	ch = spdk_get_io_channel(bdev);
5367 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5368 
5369 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5370 
5371 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5372 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5373 
5374 	nvme_qpair = io_path->qpair;
5375 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5376 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5377 
5378 	now = spdk_get_ticks();
5379 
5380 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5381 
5382 	/* If I/O got ANA error, it should be queued, the corresponding namespace
5383 	 * should be freezed and its ANA state should be updated.
5384 	 */
5385 	bdev_io->internal.f.in_submit_request = true;
5386 
5387 	bdev_nvme_submit_request(ch, bdev_io);
5388 
5389 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5390 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5391 
5392 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5393 	SPDK_CU_ASSERT_FATAL(req != NULL);
5394 
5395 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5396 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
5397 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
5398 
5399 	poll_thread_times(0, 1);
5400 
5401 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5402 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5403 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5404 	/* I/O should be retried immediately. */
5405 	CU_ASSERT(bio->retry_ticks == now);
5406 	CU_ASSERT(nvme_ns->ana_state_updating == true);
5407 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5408 
5409 	poll_threads();
5410 
5411 	/* Namespace is inaccessible, and hence I/O should be queued again. */
5412 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5413 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5414 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5415 	/* I/O should be retried after a second if no I/O path was found but
5416 	 * any I/O path may become available.
5417 	 */
5418 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
5419 
5420 	/* Namespace should be unfreezed after completing to update its ANA state. */
5421 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5422 	poll_threads();
5423 
5424 	CU_ASSERT(nvme_ns->ana_state_updating == false);
5425 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5426 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5427 
5428 	/* Retry the queued I/O should succeed. */
5429 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
5430 	poll_threads();
5431 
5432 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5433 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5434 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5435 
5436 	free(bdev_io);
5437 
5438 	spdk_put_io_channel(ch);
5439 
5440 	poll_threads();
5441 
5442 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5443 	CU_ASSERT(rc == 0);
5444 
5445 	poll_threads();
5446 	spdk_delay_us(1000);
5447 	poll_threads();
5448 
5449 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5450 
5451 	g_opts.bdev_retry_count = 0;
5452 }
5453 
5454 static void
5455 test_check_io_error_resiliency_params(void)
5456 {
5457 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5458 	 * 3rd parameter is fast_io_fail_timeout_sec.
5459 	 */
5460 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5461 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5462 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5463 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5464 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5465 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5466 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5467 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5468 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5469 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5470 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5471 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5472 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5473 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5474 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5475 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5476 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5477 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5478 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5479 }
5480 
5481 static void
5482 test_retry_io_if_ctrlr_is_resetting(void)
5483 {
5484 	struct nvme_path_id path = {};
5485 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5486 	struct spdk_nvme_ctrlr *ctrlr;
5487 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5488 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5489 	struct nvme_ctrlr *nvme_ctrlr;
5490 	const int STRING_SIZE = 32;
5491 	const char *attached_names[STRING_SIZE];
5492 	struct nvme_bdev *bdev;
5493 	struct nvme_ns *nvme_ns;
5494 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5495 	struct spdk_io_channel *ch;
5496 	struct nvme_bdev_channel *nbdev_ch;
5497 	struct nvme_io_path *io_path;
5498 	struct nvme_qpair *nvme_qpair;
5499 	int rc;
5500 
5501 	g_opts.bdev_retry_count = 1;
5502 
5503 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5504 	ut_init_trid(&path.trid);
5505 
5506 	set_thread(0);
5507 
5508 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5509 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5510 
5511 	g_ut_attach_ctrlr_status = 0;
5512 	g_ut_attach_bdev_count = 1;
5513 
5514 	opts.ctrlr_loss_timeout_sec = -1;
5515 	opts.reconnect_delay_sec = 1;
5516 	opts.multipath = false;
5517 
5518 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5519 				   attach_ctrlr_done, NULL, &dopts, &opts);
5520 	CU_ASSERT(rc == 0);
5521 
5522 	spdk_delay_us(1000);
5523 	poll_threads();
5524 
5525 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5526 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5527 
5528 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5529 	CU_ASSERT(nvme_ctrlr != NULL);
5530 
5531 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5532 	CU_ASSERT(bdev != NULL);
5533 
5534 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5535 	CU_ASSERT(nvme_ns != NULL);
5536 
5537 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5538 	ut_bdev_io_set_buf(bdev_io1);
5539 
5540 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5541 	ut_bdev_io_set_buf(bdev_io2);
5542 
5543 	ch = spdk_get_io_channel(bdev);
5544 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5545 
5546 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5547 
5548 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5549 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5550 
5551 	nvme_qpair = io_path->qpair;
5552 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5553 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5554 
5555 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5556 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5557 
5558 	/* If qpair is connected, I/O should succeed. */
5559 	bdev_io1->internal.f.in_submit_request = true;
5560 
5561 	bdev_nvme_submit_request(ch, bdev_io1);
5562 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5563 
5564 	poll_threads();
5565 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5566 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5567 
5568 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5569 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5570 	 * while resetting the nvme_ctrlr.
5571 	 */
5572 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5573 	ctrlr->is_failed = true;
5574 
5575 	poll_thread_times(0, 5);
5576 
5577 	CU_ASSERT(nvme_qpair->qpair == NULL);
5578 	CU_ASSERT(nvme_ctrlr->resetting == true);
5579 	CU_ASSERT(ctrlr->is_failed == false);
5580 
5581 	bdev_io1->internal.f.in_submit_request = true;
5582 
5583 	bdev_nvme_submit_request(ch, bdev_io1);
5584 
5585 	spdk_delay_us(1);
5586 
5587 	bdev_io2->internal.f.in_submit_request = true;
5588 
5589 	bdev_nvme_submit_request(ch, bdev_io2);
5590 
5591 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5592 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5593 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5594 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(
5595 			  TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx,
5596 				     retry_link)));
5597 
5598 	poll_threads();
5599 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5600 	poll_threads();
5601 
5602 	CU_ASSERT(nvme_qpair->qpair != NULL);
5603 	CU_ASSERT(nvme_ctrlr->resetting == false);
5604 
5605 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5606 
5607 	poll_thread_times(0, 1);
5608 
5609 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5610 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5611 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5612 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5613 
5614 	poll_threads();
5615 
5616 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5617 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5618 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5619 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5620 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5621 
5622 	spdk_delay_us(1);
5623 
5624 	poll_thread_times(0, 1);
5625 
5626 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5627 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5628 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5629 
5630 	poll_threads();
5631 
5632 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5633 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == false);
5634 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5635 
5636 	free(bdev_io1);
5637 	free(bdev_io2);
5638 
5639 	spdk_put_io_channel(ch);
5640 
5641 	poll_threads();
5642 
5643 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5644 	CU_ASSERT(rc == 0);
5645 
5646 	poll_threads();
5647 	spdk_delay_us(1000);
5648 	poll_threads();
5649 
5650 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5651 
5652 	g_opts.bdev_retry_count = 0;
5653 }
5654 
5655 static void
5656 test_reconnect_ctrlr(void)
5657 {
5658 	struct spdk_nvme_transport_id trid = {};
5659 	struct spdk_nvme_ctrlr ctrlr = {};
5660 	struct nvme_ctrlr *nvme_ctrlr;
5661 	struct spdk_io_channel *ch1, *ch2;
5662 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5663 	int rc;
5664 
5665 	ut_init_trid(&trid);
5666 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5667 
5668 	set_thread(0);
5669 
5670 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5671 	CU_ASSERT(rc == 0);
5672 
5673 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5674 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5675 
5676 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5677 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5678 
5679 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5680 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5681 
5682 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5683 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5684 
5685 	set_thread(1);
5686 
5687 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5688 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5689 
5690 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5691 
5692 	/* Reset starts from thread 1. */
5693 	set_thread(1);
5694 
5695 	/* The reset should fail and a reconnect timer should be registered. */
5696 	ctrlr.fail_reset = true;
5697 	ctrlr.is_failed = true;
5698 
5699 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5700 	CU_ASSERT(rc == 0);
5701 	CU_ASSERT(nvme_ctrlr->resetting == true);
5702 	CU_ASSERT(ctrlr.is_failed == true);
5703 
5704 	poll_threads();
5705 
5706 	CU_ASSERT(nvme_ctrlr->resetting == false);
5707 	CU_ASSERT(ctrlr.is_failed == false);
5708 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5709 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5710 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5711 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5712 
5713 	/* A new reset starts from thread 0. */
5714 	set_thread(1);
5715 
5716 	/* The reset should cancel the reconnect timer and should start from reconnection.
5717 	 * Then, the reset should fail and a reconnect timer should be registered again.
5718 	 */
5719 	ctrlr.fail_reset = true;
5720 	ctrlr.is_failed = true;
5721 
5722 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5723 	CU_ASSERT(rc == 0);
5724 	CU_ASSERT(nvme_ctrlr->resetting == true);
5725 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5726 	CU_ASSERT(ctrlr.is_failed == true);
5727 
5728 	poll_threads();
5729 
5730 	CU_ASSERT(nvme_ctrlr->resetting == false);
5731 	CU_ASSERT(ctrlr.is_failed == false);
5732 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5733 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5734 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5735 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5736 
5737 	/* Then a reconnect retry should suceeed. */
5738 	ctrlr.fail_reset = false;
5739 
5740 	spdk_delay_us(SPDK_SEC_TO_USEC);
5741 	poll_thread_times(0, 1);
5742 
5743 	CU_ASSERT(nvme_ctrlr->resetting == true);
5744 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5745 
5746 	poll_threads();
5747 
5748 	CU_ASSERT(nvme_ctrlr->resetting == false);
5749 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5750 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5751 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5752 
5753 	/* The reset should fail and a reconnect timer should be registered. */
5754 	ctrlr.fail_reset = true;
5755 	ctrlr.is_failed = true;
5756 
5757 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5758 	CU_ASSERT(rc == 0);
5759 	CU_ASSERT(nvme_ctrlr->resetting == true);
5760 	CU_ASSERT(ctrlr.is_failed == true);
5761 
5762 	poll_threads();
5763 
5764 	CU_ASSERT(nvme_ctrlr->resetting == false);
5765 	CU_ASSERT(ctrlr.is_failed == false);
5766 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5767 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5768 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5769 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5770 
5771 	/* Then a reconnect retry should still fail. */
5772 	spdk_delay_us(SPDK_SEC_TO_USEC);
5773 	poll_thread_times(0, 1);
5774 
5775 	CU_ASSERT(nvme_ctrlr->resetting == true);
5776 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5777 
5778 	poll_threads();
5779 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5780 	poll_threads();
5781 
5782 	CU_ASSERT(nvme_ctrlr->resetting == false);
5783 	CU_ASSERT(ctrlr.is_failed == false);
5784 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5785 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5786 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5787 
5788 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5789 	spdk_delay_us(SPDK_SEC_TO_USEC);
5790 	poll_threads();
5791 
5792 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5793 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5794 	CU_ASSERT(nvme_ctrlr->destruct == true);
5795 
5796 	spdk_put_io_channel(ch2);
5797 
5798 	set_thread(0);
5799 
5800 	spdk_put_io_channel(ch1);
5801 
5802 	poll_threads();
5803 	spdk_delay_us(1000);
5804 	poll_threads();
5805 
5806 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5807 }
5808 
5809 static struct nvme_path_id *
5810 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5811 		       const struct spdk_nvme_transport_id *trid)
5812 {
5813 	struct nvme_path_id *p;
5814 
5815 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5816 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5817 			break;
5818 		}
5819 	}
5820 
5821 	return p;
5822 }
5823 
5824 static void
5825 test_retry_failover_ctrlr(void)
5826 {
5827 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5828 	struct spdk_nvme_ctrlr ctrlr = {};
5829 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5830 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5831 	struct spdk_io_channel *ch;
5832 	struct nvme_ctrlr_channel *ctrlr_ch;
5833 	int rc;
5834 
5835 	ut_init_trid(&trid1);
5836 	ut_init_trid2(&trid2);
5837 	ut_init_trid3(&trid3);
5838 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5839 
5840 	set_thread(0);
5841 
5842 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5843 	CU_ASSERT(rc == 0);
5844 
5845 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5846 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5847 
5848 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5849 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5850 
5851 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5852 	CU_ASSERT(rc == 0);
5853 
5854 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5855 	CU_ASSERT(rc == 0);
5856 
5857 	ch = spdk_get_io_channel(nvme_ctrlr);
5858 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5859 
5860 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5861 
5862 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5863 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5864 	CU_ASSERT(path_id1->last_failed_tsc == 0);
5865 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5866 
5867 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5868 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5869 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5870 
5871 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5872 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5873 
5874 	/* It is expected that connecting both of trid1, trid2, and trid3 fail,
5875 	 * and a reconnect timer is started. */
5876 	ctrlr.fail_reset = true;
5877 	ctrlr.is_failed = true;
5878 
5879 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5880 	CU_ASSERT(rc == 0);
5881 
5882 	poll_threads();
5883 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5884 	poll_threads();
5885 
5886 	CU_ASSERT(nvme_ctrlr->resetting == false);
5887 	CU_ASSERT(ctrlr.is_failed == false);
5888 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5889 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5890 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5891 	CU_ASSERT(path_id1->last_failed_tsc != 0);
5892 
5893 	CU_ASSERT(path_id2->last_failed_tsc != 0);
5894 	CU_ASSERT(path_id3->last_failed_tsc != 0);
5895 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5896 
5897 	/* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is
5898 	 * switched to trid2 but reset is not started.
5899 	 */
5900 	rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true);
5901 	CU_ASSERT(rc == -EALREADY);
5902 
5903 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL);
5904 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5905 
5906 	CU_ASSERT(nvme_ctrlr->resetting == false);
5907 
5908 	/* If reconnect succeeds, trid2 should be the active path_id */
5909 	ctrlr.fail_reset = false;
5910 
5911 	spdk_delay_us(SPDK_SEC_TO_USEC);
5912 	poll_thread_times(0, 1);
5913 
5914 	CU_ASSERT(nvme_ctrlr->resetting == true);
5915 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5916 
5917 	poll_threads();
5918 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5919 	poll_threads();
5920 
5921 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL);
5922 	CU_ASSERT(path_id2->last_failed_tsc == 0);
5923 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5924 	CU_ASSERT(nvme_ctrlr->resetting == false);
5925 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5926 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5927 
5928 	spdk_put_io_channel(ch);
5929 
5930 	poll_threads();
5931 
5932 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5933 	CU_ASSERT(rc == 0);
5934 
5935 	poll_threads();
5936 	spdk_delay_us(1000);
5937 	poll_threads();
5938 
5939 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5940 }
5941 
5942 static void
5943 test_fail_path(void)
5944 {
5945 	struct nvme_path_id path = {};
5946 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5947 	struct spdk_nvme_ctrlr *ctrlr;
5948 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5949 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5950 	struct nvme_ctrlr *nvme_ctrlr;
5951 	const int STRING_SIZE = 32;
5952 	const char *attached_names[STRING_SIZE];
5953 	struct nvme_bdev *bdev;
5954 	struct nvme_ns *nvme_ns;
5955 	struct spdk_bdev_io *bdev_io;
5956 	struct spdk_io_channel *ch;
5957 	struct nvme_bdev_channel *nbdev_ch;
5958 	struct nvme_io_path *io_path;
5959 	struct nvme_ctrlr_channel *ctrlr_ch;
5960 	int rc;
5961 
5962 	/* The test scenario is the following.
5963 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5964 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5965 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5966 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5967 	 *   comes first. The queued I/O is failed.
5968 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5969 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5970 	 */
5971 
5972 	g_opts.bdev_retry_count = 1;
5973 
5974 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5975 	ut_init_trid(&path.trid);
5976 
5977 	set_thread(0);
5978 
5979 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5980 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5981 
5982 	g_ut_attach_ctrlr_status = 0;
5983 	g_ut_attach_bdev_count = 1;
5984 
5985 	opts.ctrlr_loss_timeout_sec = 4;
5986 	opts.reconnect_delay_sec = 1;
5987 	opts.fast_io_fail_timeout_sec = 2;
5988 	opts.multipath = false;
5989 
5990 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5991 				   attach_ctrlr_done, NULL, &dopts, &opts);
5992 	CU_ASSERT(rc == 0);
5993 
5994 	spdk_delay_us(1000);
5995 	poll_threads();
5996 
5997 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5998 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5999 
6000 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
6001 	CU_ASSERT(nvme_ctrlr != NULL);
6002 
6003 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6004 	CU_ASSERT(bdev != NULL);
6005 
6006 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
6007 	CU_ASSERT(nvme_ns != NULL);
6008 
6009 	ch = spdk_get_io_channel(bdev);
6010 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6011 
6012 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6013 
6014 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
6015 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6016 
6017 	ctrlr_ch = io_path->qpair->ctrlr_ch;
6018 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
6019 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
6020 
6021 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6022 	ut_bdev_io_set_buf(bdev_io);
6023 
6024 
6025 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
6026 	ctrlr->fail_reset = true;
6027 	ctrlr->is_failed = true;
6028 
6029 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
6030 	CU_ASSERT(rc == 0);
6031 	CU_ASSERT(nvme_ctrlr->resetting == true);
6032 	CU_ASSERT(ctrlr->is_failed == true);
6033 
6034 	poll_threads();
6035 
6036 	CU_ASSERT(nvme_ctrlr->resetting == false);
6037 	CU_ASSERT(ctrlr->is_failed == false);
6038 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6039 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6040 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
6041 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6042 
6043 	/* I/O should be queued. */
6044 	bdev_io->internal.f.in_submit_request = true;
6045 
6046 	bdev_nvme_submit_request(ch, bdev_io);
6047 
6048 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6049 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6050 
6051 	/* After a second, the I/O should be still queued and the ctrlr should be
6052 	 * still recovering.
6053 	 */
6054 	spdk_delay_us(SPDK_SEC_TO_USEC);
6055 	poll_threads();
6056 
6057 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6058 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6059 
6060 	CU_ASSERT(nvme_ctrlr->resetting == false);
6061 	CU_ASSERT(ctrlr->is_failed == false);
6062 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6063 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6064 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6065 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6066 
6067 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6068 
6069 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
6070 	spdk_delay_us(SPDK_SEC_TO_USEC);
6071 	poll_threads();
6072 
6073 	CU_ASSERT(nvme_ctrlr->resetting == false);
6074 	CU_ASSERT(ctrlr->is_failed == false);
6075 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6076 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6077 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6078 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
6079 
6080 	/* Then within a second, pending I/O should be failed. */
6081 	spdk_delay_us(SPDK_SEC_TO_USEC);
6082 	poll_threads();
6083 
6084 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6085 	poll_threads();
6086 
6087 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6088 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6089 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
6090 
6091 	/* Another I/O submission should be failed immediately. */
6092 	bdev_io->internal.f.in_submit_request = true;
6093 
6094 	bdev_nvme_submit_request(ch, bdev_io);
6095 
6096 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6097 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6098 
6099 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
6100 	 * be deleted.
6101 	 */
6102 	spdk_delay_us(SPDK_SEC_TO_USEC);
6103 	poll_threads();
6104 
6105 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6106 	poll_threads();
6107 
6108 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
6109 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
6110 	CU_ASSERT(nvme_ctrlr->destruct == true);
6111 
6112 	spdk_put_io_channel(ch);
6113 
6114 	poll_threads();
6115 	spdk_delay_us(1000);
6116 	poll_threads();
6117 
6118 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6119 
6120 	free(bdev_io);
6121 
6122 	g_opts.bdev_retry_count = 0;
6123 }
6124 
6125 static void
6126 test_nvme_ns_cmp(void)
6127 {
6128 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
6129 
6130 	nvme_ns1.id = 0;
6131 	nvme_ns2.id = UINT32_MAX;
6132 
6133 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
6134 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
6135 }
6136 
6137 static void
6138 test_ana_transition(void)
6139 {
6140 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
6141 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
6142 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
6143 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
6144 
6145 	/* case 1: ANA transition timedout is canceled. */
6146 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6147 	nvme_ns.ana_transition_timedout = true;
6148 
6149 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6150 
6151 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6152 
6153 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
6154 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6155 
6156 	/* case 2: ANATT timer is kept. */
6157 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6158 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
6159 			      &nvme_ns,
6160 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6161 
6162 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6163 
6164 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6165 
6166 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6167 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
6168 
6169 	/* case 3: ANATT timer is stopped. */
6170 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6171 
6172 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6173 
6174 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6175 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6176 
6177 	/* ANATT timer is started. */
6178 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6179 
6180 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6181 
6182 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6183 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
6184 
6185 	/* ANATT timer is expired. */
6186 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6187 
6188 	poll_threads();
6189 
6190 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6191 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
6192 }
6193 
6194 static void
6195 _set_preferred_path_cb(void *cb_arg, int rc)
6196 {
6197 	bool *done = cb_arg;
6198 
6199 	*done = true;
6200 }
6201 
6202 static void
6203 test_set_preferred_path(void)
6204 {
6205 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
6206 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
6207 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6208 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6209 	const int STRING_SIZE = 32;
6210 	const char *attached_names[STRING_SIZE];
6211 	struct nvme_bdev *bdev;
6212 	struct spdk_io_channel *ch;
6213 	struct nvme_bdev_channel *nbdev_ch;
6214 	struct nvme_io_path *io_path;
6215 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6216 	const struct spdk_nvme_ctrlr_data *cdata;
6217 	bool done;
6218 	int rc;
6219 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6220 
6221 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6222 	bdev_opts.multipath = true;
6223 
6224 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6225 	ut_init_trid(&path1.trid);
6226 	ut_init_trid2(&path2.trid);
6227 	ut_init_trid3(&path3.trid);
6228 	g_ut_attach_ctrlr_status = 0;
6229 	g_ut_attach_bdev_count = 1;
6230 
6231 	set_thread(0);
6232 
6233 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6234 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6235 
6236 	ctrlr1->ns[0].uuid = &uuid1;
6237 
6238 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6239 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6240 	CU_ASSERT(rc == 0);
6241 
6242 	spdk_delay_us(1000);
6243 	poll_threads();
6244 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6245 	poll_threads();
6246 
6247 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6248 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6249 
6250 	ctrlr2->ns[0].uuid = &uuid1;
6251 
6252 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6253 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6254 	CU_ASSERT(rc == 0);
6255 
6256 	spdk_delay_us(1000);
6257 	poll_threads();
6258 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6259 	poll_threads();
6260 
6261 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
6262 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
6263 
6264 	ctrlr3->ns[0].uuid = &uuid1;
6265 
6266 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
6267 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6268 	CU_ASSERT(rc == 0);
6269 
6270 	spdk_delay_us(1000);
6271 	poll_threads();
6272 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6273 	poll_threads();
6274 
6275 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6276 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6277 
6278 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6279 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6280 
6281 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6282 
6283 	ch = spdk_get_io_channel(bdev);
6284 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6285 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6286 
6287 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6288 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6289 
6290 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6291 
6292 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
6293 	 * should return io_path to ctrlr2.
6294 	 */
6295 
6296 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
6297 	done = false;
6298 
6299 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6300 
6301 	poll_threads();
6302 	CU_ASSERT(done == true);
6303 
6304 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6305 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6306 
6307 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6308 
6309 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
6310 	 * acquired, find_io_path() should return io_path to ctrlr3.
6311 	 */
6312 
6313 	spdk_put_io_channel(ch);
6314 
6315 	poll_threads();
6316 
6317 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
6318 	done = false;
6319 
6320 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6321 
6322 	poll_threads();
6323 	CU_ASSERT(done == true);
6324 
6325 	ch = spdk_get_io_channel(bdev);
6326 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6327 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6328 
6329 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6330 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6331 
6332 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
6333 
6334 	spdk_put_io_channel(ch);
6335 
6336 	poll_threads();
6337 
6338 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6339 	CU_ASSERT(rc == 0);
6340 
6341 	poll_threads();
6342 	spdk_delay_us(1000);
6343 	poll_threads();
6344 
6345 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6346 }
6347 
6348 static void
6349 test_find_next_io_path(void)
6350 {
6351 	struct nvme_bdev_channel nbdev_ch = {
6352 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6353 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6354 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
6355 	};
6356 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6357 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6358 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6359 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6360 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6361 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6362 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6363 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6364 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6365 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6366 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6367 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6368 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6369 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6370 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6371 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6372 
6373 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6374 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6375 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6376 
6377 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
6378 	 * is covered in test_find_io_path.
6379 	 */
6380 
6381 	nbdev_ch.current_io_path = &io_path2;
6382 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6383 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6384 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6385 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6386 
6387 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6388 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6389 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6390 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6391 
6392 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6393 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6394 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6395 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6396 
6397 	nbdev_ch.current_io_path = &io_path3;
6398 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6399 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6400 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6401 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6402 
6403 	/* Test if next io_path is selected according to rr_min_io */
6404 
6405 	nbdev_ch.current_io_path = NULL;
6406 	nbdev_ch.rr_min_io = 2;
6407 	nbdev_ch.rr_counter = 0;
6408 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6409 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6410 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6411 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6412 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6413 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6414 
6415 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6416 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6417 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6418 }
6419 
6420 static void
6421 test_find_io_path_min_qd(void)
6422 {
6423 	struct nvme_bdev_channel nbdev_ch = {
6424 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6425 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6426 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
6427 	};
6428 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6429 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6430 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6431 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6432 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6433 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6434 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6435 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6436 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6437 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6438 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6439 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6440 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6441 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6442 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6443 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6444 
6445 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6446 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6447 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6448 
6449 	/* Test if the minimum io_outstanding or the ANA optimized state is
6450 	 * prioritized when using least queue depth selector
6451 	 */
6452 	qpair1.num_outstanding_reqs = 2;
6453 	qpair2.num_outstanding_reqs = 1;
6454 	qpair3.num_outstanding_reqs = 0;
6455 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6456 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6457 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6458 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6459 
6460 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6461 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6462 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6463 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6464 
6465 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6466 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6467 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6468 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6469 
6470 	qpair2.num_outstanding_reqs = 4;
6471 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6472 }
6473 
6474 static void
6475 test_disable_auto_failback(void)
6476 {
6477 	struct nvme_path_id path1 = {}, path2 = {};
6478 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6479 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6480 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6481 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6482 	struct nvme_ctrlr *nvme_ctrlr1;
6483 	const int STRING_SIZE = 32;
6484 	const char *attached_names[STRING_SIZE];
6485 	struct nvme_bdev *bdev;
6486 	struct spdk_io_channel *ch;
6487 	struct nvme_bdev_channel *nbdev_ch;
6488 	struct nvme_io_path *io_path;
6489 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6490 	const struct spdk_nvme_ctrlr_data *cdata;
6491 	bool done;
6492 	int rc;
6493 
6494 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6495 	ut_init_trid(&path1.trid);
6496 	ut_init_trid2(&path2.trid);
6497 	g_ut_attach_ctrlr_status = 0;
6498 	g_ut_attach_bdev_count = 1;
6499 
6500 	g_opts.disable_auto_failback = true;
6501 
6502 	opts.ctrlr_loss_timeout_sec = -1;
6503 	opts.reconnect_delay_sec = 1;
6504 	opts.multipath = true;
6505 
6506 	set_thread(0);
6507 
6508 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6509 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6510 
6511 	ctrlr1->ns[0].uuid = &uuid1;
6512 
6513 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6514 				   attach_ctrlr_done, NULL, &dopts, &opts);
6515 	CU_ASSERT(rc == 0);
6516 
6517 	spdk_delay_us(1000);
6518 	poll_threads();
6519 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6520 	poll_threads();
6521 
6522 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6523 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6524 
6525 	ctrlr2->ns[0].uuid = &uuid1;
6526 
6527 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6528 				   attach_ctrlr_done, NULL, &dopts, &opts);
6529 	CU_ASSERT(rc == 0);
6530 
6531 	spdk_delay_us(1000);
6532 	poll_threads();
6533 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6534 	poll_threads();
6535 
6536 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6537 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6538 
6539 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6540 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6541 
6542 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn);
6543 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6544 
6545 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6546 
6547 	ch = spdk_get_io_channel(bdev);
6548 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6549 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6550 
6551 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6552 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6553 
6554 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6555 
6556 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6557 	ctrlr1->fail_reset = true;
6558 	ctrlr1->is_failed = true;
6559 
6560 	bdev_nvme_reset_ctrlr(nvme_ctrlr1);
6561 
6562 	poll_threads();
6563 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6564 	poll_threads();
6565 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6566 	poll_threads();
6567 
6568 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6569 
6570 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6571 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6572 
6573 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6574 
6575 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6576 	 * Hence, io_path to ctrlr2 should still be used.
6577 	 */
6578 	ctrlr1->fail_reset = false;
6579 
6580 	spdk_delay_us(SPDK_SEC_TO_USEC);
6581 	poll_threads();
6582 
6583 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6584 
6585 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6586 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6587 
6588 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6589 
6590 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6591 	 * be used again.
6592 	 */
6593 
6594 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6595 	done = false;
6596 
6597 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6598 
6599 	poll_threads();
6600 	CU_ASSERT(done == true);
6601 
6602 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6603 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6604 
6605 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6606 
6607 	spdk_put_io_channel(ch);
6608 
6609 	poll_threads();
6610 
6611 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6612 	CU_ASSERT(rc == 0);
6613 
6614 	poll_threads();
6615 	spdk_delay_us(1000);
6616 	poll_threads();
6617 
6618 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6619 
6620 	g_opts.disable_auto_failback = false;
6621 }
6622 
6623 static void
6624 ut_set_multipath_policy_done(void *cb_arg, int rc)
6625 {
6626 	int *done = cb_arg;
6627 
6628 	SPDK_CU_ASSERT_FATAL(done != NULL);
6629 	*done = rc;
6630 }
6631 
6632 static void
6633 test_set_multipath_policy(void)
6634 {
6635 	struct nvme_path_id path1 = {}, path2 = {};
6636 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6637 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6638 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6639 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6640 	const int STRING_SIZE = 32;
6641 	const char *attached_names[STRING_SIZE];
6642 	struct nvme_bdev *bdev;
6643 	struct spdk_io_channel *ch;
6644 	struct nvme_bdev_channel *nbdev_ch;
6645 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6646 	int done;
6647 	int rc;
6648 
6649 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6650 	ut_init_trid(&path1.trid);
6651 	ut_init_trid2(&path2.trid);
6652 	g_ut_attach_ctrlr_status = 0;
6653 	g_ut_attach_bdev_count = 1;
6654 
6655 	g_opts.disable_auto_failback = true;
6656 
6657 	opts.ctrlr_loss_timeout_sec = -1;
6658 	opts.reconnect_delay_sec = 1;
6659 	opts.multipath = true;
6660 
6661 	set_thread(0);
6662 
6663 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6664 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6665 
6666 	ctrlr1->ns[0].uuid = &uuid1;
6667 
6668 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6669 				   attach_ctrlr_done, NULL, &dopts, &opts);
6670 	CU_ASSERT(rc == 0);
6671 
6672 	spdk_delay_us(1000);
6673 	poll_threads();
6674 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6675 	poll_threads();
6676 
6677 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6678 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6679 
6680 	ctrlr2->ns[0].uuid = &uuid1;
6681 
6682 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6683 				   attach_ctrlr_done, NULL, &dopts, &opts);
6684 	CU_ASSERT(rc == 0);
6685 
6686 	spdk_delay_us(1000);
6687 	poll_threads();
6688 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6689 	poll_threads();
6690 
6691 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6692 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6693 
6694 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6695 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6696 
6697 	/* If multipath policy is updated before getting any I/O channel,
6698 	 * an new I/O channel should have the update.
6699 	 */
6700 	done = -1;
6701 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6702 					    BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6703 					    ut_set_multipath_policy_done, &done);
6704 	poll_threads();
6705 	CU_ASSERT(done == 0);
6706 
6707 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6708 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6709 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6710 
6711 	ch = spdk_get_io_channel(bdev);
6712 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6713 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6714 
6715 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6716 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6717 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6718 
6719 	/* If multipath policy is updated while a I/O channel is active,
6720 	 * the update should be applied to the I/O channel immediately.
6721 	 */
6722 	done = -1;
6723 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6724 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6725 					    ut_set_multipath_policy_done, &done);
6726 	poll_threads();
6727 	CU_ASSERT(done == 0);
6728 
6729 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6730 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6731 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6732 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6733 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6734 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6735 
6736 	spdk_put_io_channel(ch);
6737 
6738 	poll_threads();
6739 
6740 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6741 	CU_ASSERT(rc == 0);
6742 
6743 	poll_threads();
6744 	spdk_delay_us(1000);
6745 	poll_threads();
6746 
6747 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6748 }
6749 
6750 static void
6751 test_uuid_generation(void)
6752 {
6753 	uint32_t nsid1 = 1, nsid2 = 2;
6754 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6755 	char sn3[21] = "                    ";
6756 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6757 	struct spdk_uuid uuid1, uuid2;
6758 	int rc;
6759 
6760 	/* Test case 1:
6761 	 * Serial numbers are the same, nsids are different.
6762 	 * Compare two generated UUID - they should be different. */
6763 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6764 	CU_ASSERT(rc == 0);
6765 	rc = nvme_generate_uuid(sn1, nsid2, &uuid2);
6766 	CU_ASSERT(rc == 0);
6767 
6768 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6769 
6770 	/* Test case 2:
6771 	 * Serial numbers differ only by one character, nsids are the same.
6772 	 * Compare two generated UUID - they should be different. */
6773 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6774 	CU_ASSERT(rc == 0);
6775 	rc = nvme_generate_uuid(sn2, nsid1, &uuid2);
6776 	CU_ASSERT(rc == 0);
6777 
6778 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6779 
6780 	/* Test case 3:
6781 	 * Serial number comprises only of space characters.
6782 	 * Validate the generated UUID. */
6783 	rc = nvme_generate_uuid(sn3, nsid1, &uuid1);
6784 	CU_ASSERT(rc == 0);
6785 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6786 
6787 }
6788 
6789 static void
6790 test_retry_io_to_same_path(void)
6791 {
6792 	struct nvme_path_id path1 = {}, path2 = {};
6793 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6794 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6795 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6796 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6797 	const int STRING_SIZE = 32;
6798 	const char *attached_names[STRING_SIZE];
6799 	struct nvme_bdev *bdev;
6800 	struct spdk_bdev_io *bdev_io;
6801 	struct nvme_bdev_io *bio;
6802 	struct spdk_io_channel *ch;
6803 	struct nvme_bdev_channel *nbdev_ch;
6804 	struct nvme_io_path *io_path1, *io_path2;
6805 	struct ut_nvme_req *req;
6806 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6807 	int done;
6808 	int rc;
6809 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6810 
6811 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6812 	bdev_opts.multipath = true;
6813 
6814 	g_opts.nvme_ioq_poll_period_us = 1;
6815 
6816 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6817 	ut_init_trid(&path1.trid);
6818 	ut_init_trid2(&path2.trid);
6819 	g_ut_attach_ctrlr_status = 0;
6820 	g_ut_attach_bdev_count = 1;
6821 
6822 	set_thread(0);
6823 
6824 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6825 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6826 
6827 	ctrlr1->ns[0].uuid = &uuid1;
6828 
6829 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6830 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6831 	CU_ASSERT(rc == 0);
6832 
6833 	spdk_delay_us(1000);
6834 	poll_threads();
6835 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6836 	poll_threads();
6837 
6838 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6839 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6840 
6841 	ctrlr2->ns[0].uuid = &uuid1;
6842 
6843 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6844 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6845 	CU_ASSERT(rc == 0);
6846 
6847 	spdk_delay_us(1000);
6848 	poll_threads();
6849 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6850 	poll_threads();
6851 
6852 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6853 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6854 
6855 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
6856 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6857 
6858 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
6859 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6860 
6861 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6862 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6863 
6864 	done = -1;
6865 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6866 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6867 	poll_threads();
6868 	CU_ASSERT(done == 0);
6869 
6870 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6871 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6872 	CU_ASSERT(bdev->rr_min_io == 1);
6873 
6874 	ch = spdk_get_io_channel(bdev);
6875 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6876 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6877 
6878 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6879 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6880 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6881 
6882 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6883 	ut_bdev_io_set_buf(bdev_io);
6884 
6885 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6886 
6887 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6888 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6889 
6890 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6891 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6892 
6893 	/* The 1st I/O should be submitted to io_path1. */
6894 	bdev_io->internal.f.in_submit_request = true;
6895 
6896 	bdev_nvme_submit_request(ch, bdev_io);
6897 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6898 	CU_ASSERT(bio->io_path == io_path1);
6899 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6900 
6901 	spdk_delay_us(1);
6902 
6903 	poll_threads();
6904 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6905 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
6906 
6907 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6908 	 * policy is round-robin.
6909 	 */
6910 	bdev_io->internal.f.in_submit_request = true;
6911 
6912 	bdev_nvme_submit_request(ch, bdev_io);
6913 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6914 	CU_ASSERT(bio->io_path == io_path2);
6915 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6916 
6917 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6918 	SPDK_CU_ASSERT_FATAL(req != NULL);
6919 
6920 	/* Set retry count to non-zero. */
6921 	g_opts.bdev_retry_count = 2;
6922 
6923 	/* Inject an I/O error. */
6924 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6925 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6926 
6927 	/* The 2nd I/O should be queued to nbdev_ch. */
6928 	spdk_delay_us(1);
6929 	poll_thread_times(0, 1);
6930 
6931 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6932 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6933 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6934 
6935 	/* The 2nd I/O should keep caching io_path2. */
6936 	CU_ASSERT(bio->io_path == io_path2);
6937 
6938 	/* The 2nd I/O should be submitted to io_path2 again. */
6939 	poll_thread_times(0, 1);
6940 
6941 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6942 	CU_ASSERT(bio->io_path == io_path2);
6943 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6944 
6945 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6946 	SPDK_CU_ASSERT_FATAL(req != NULL);
6947 
6948 	/* Inject an I/O error again. */
6949 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6950 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6951 	req->cpl.status.crd = 1;
6952 
6953 	ctrlr2->cdata.crdt[1] = 1;
6954 
6955 	/* The 2nd I/O should be queued to nbdev_ch. */
6956 	spdk_delay_us(1);
6957 	poll_thread_times(0, 1);
6958 
6959 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6960 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6961 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6962 
6963 	/* The 2nd I/O should keep caching io_path2. */
6964 	CU_ASSERT(bio->io_path == io_path2);
6965 
6966 	/* Detach ctrlr2 dynamically. */
6967 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
6968 	CU_ASSERT(rc == 0);
6969 
6970 	spdk_delay_us(1000);
6971 	poll_threads();
6972 	spdk_delay_us(1000);
6973 	poll_threads();
6974 	spdk_delay_us(1000);
6975 	poll_threads();
6976 	spdk_delay_us(1000);
6977 	poll_threads();
6978 
6979 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
6980 
6981 	poll_threads();
6982 	spdk_delay_us(100000);
6983 	poll_threads();
6984 	spdk_delay_us(1);
6985 	poll_threads();
6986 
6987 	/* The 2nd I/O should succeed by io_path1. */
6988 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6989 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
6990 	CU_ASSERT(bio->io_path == io_path1);
6991 
6992 	free(bdev_io);
6993 
6994 	spdk_put_io_channel(ch);
6995 
6996 	poll_threads();
6997 	spdk_delay_us(1);
6998 	poll_threads();
6999 
7000 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7001 	CU_ASSERT(rc == 0);
7002 
7003 	poll_threads();
7004 	spdk_delay_us(1000);
7005 	poll_threads();
7006 
7007 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7008 
7009 	g_opts.nvme_ioq_poll_period_us = 0;
7010 	g_opts.bdev_retry_count = 0;
7011 }
7012 
7013 /* This case is to verify a fix for a complex race condition that
7014  * failover is lost if fabric connect command gets timeout while
7015  * controller is being reset.
7016  */
7017 static void
7018 test_race_between_reset_and_disconnected(void)
7019 {
7020 	struct spdk_nvme_transport_id trid = {};
7021 	struct spdk_nvme_ctrlr ctrlr = {};
7022 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7023 	struct nvme_path_id *curr_trid;
7024 	struct spdk_io_channel *ch1, *ch2;
7025 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7026 	int rc;
7027 
7028 	ut_init_trid(&trid);
7029 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7030 
7031 	set_thread(0);
7032 
7033 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7034 	CU_ASSERT(rc == 0);
7035 
7036 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7037 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7038 
7039 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7040 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7041 
7042 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7043 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7044 
7045 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7046 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7047 
7048 	set_thread(1);
7049 
7050 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7051 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7052 
7053 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7054 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7055 
7056 	/* Reset starts from thread 1. */
7057 	set_thread(1);
7058 
7059 	nvme_ctrlr->resetting = false;
7060 	curr_trid->last_failed_tsc = spdk_get_ticks();
7061 	ctrlr.is_failed = true;
7062 
7063 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7064 	CU_ASSERT(rc == 0);
7065 	CU_ASSERT(nvme_ctrlr->resetting == true);
7066 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7067 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7068 
7069 	poll_thread_times(0, 3);
7070 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7071 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7072 
7073 	poll_thread_times(0, 1);
7074 	poll_thread_times(1, 1);
7075 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7076 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7077 	CU_ASSERT(ctrlr.is_failed == true);
7078 
7079 	poll_thread_times(1, 1);
7080 	poll_thread_times(0, 1);
7081 	CU_ASSERT(ctrlr.is_failed == false);
7082 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7083 
7084 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7085 	poll_thread_times(0, 2);
7086 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7087 
7088 	poll_thread_times(0, 1);
7089 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7090 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7091 
7092 	poll_thread_times(1, 1);
7093 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7094 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7095 	CU_ASSERT(nvme_ctrlr->resetting == true);
7096 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
7097 
7098 	poll_thread_times(0, 2);
7099 	CU_ASSERT(nvme_ctrlr->resetting == true);
7100 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7101 	poll_thread_times(1, 1);
7102 	CU_ASSERT(nvme_ctrlr->resetting == true);
7103 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7104 
7105 	/* Here is just one poll before _bdev_nvme_reset_complete() is executed.
7106 	 *
7107 	 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric
7108 	 * connect command is executed. If fabric connect command gets timeout,
7109 	 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until
7110 	 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false.
7111 	 *
7112 	 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr().
7113 	 */
7114 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
7115 	CU_ASSERT(rc == -EINPROGRESS);
7116 	CU_ASSERT(nvme_ctrlr->resetting == true);
7117 	CU_ASSERT(nvme_ctrlr->pending_failover == true);
7118 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7119 
7120 	poll_thread_times(0, 1);
7121 
7122 	CU_ASSERT(nvme_ctrlr->resetting == true);
7123 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7124 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
7125 
7126 	poll_threads();
7127 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7128 	poll_threads();
7129 
7130 	CU_ASSERT(nvme_ctrlr->resetting == false);
7131 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7132 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7133 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7134 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7135 
7136 	spdk_put_io_channel(ch2);
7137 
7138 	set_thread(0);
7139 
7140 	spdk_put_io_channel(ch1);
7141 
7142 	poll_threads();
7143 
7144 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7145 	CU_ASSERT(rc == 0);
7146 
7147 	poll_threads();
7148 	spdk_delay_us(1000);
7149 	poll_threads();
7150 
7151 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7152 }
7153 static void
7154 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
7155 {
7156 	int *_rc = (int *)cb_arg;
7157 
7158 	SPDK_CU_ASSERT_FATAL(_rc != NULL);
7159 	*_rc = rc;
7160 }
7161 
7162 static void
7163 test_ctrlr_op_rpc(void)
7164 {
7165 	struct spdk_nvme_transport_id trid = {};
7166 	struct spdk_nvme_ctrlr ctrlr = {};
7167 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7168 	struct nvme_path_id *curr_trid;
7169 	struct spdk_io_channel *ch1, *ch2;
7170 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7171 	int ctrlr_op_rc;
7172 	int rc;
7173 
7174 	ut_init_trid(&trid);
7175 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7176 
7177 	set_thread(0);
7178 
7179 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7180 	CU_ASSERT(rc == 0);
7181 
7182 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7183 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7184 
7185 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7186 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7187 
7188 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7189 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7190 
7191 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7192 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7193 
7194 	set_thread(1);
7195 
7196 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7197 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7198 
7199 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7200 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7201 
7202 	/* Reset starts from thread 1. */
7203 	set_thread(1);
7204 
7205 	/* Case 1: ctrlr is already being destructed. */
7206 	nvme_ctrlr->destruct = true;
7207 	ctrlr_op_rc = 0;
7208 
7209 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7210 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7211 
7212 	poll_threads();
7213 
7214 	CU_ASSERT(ctrlr_op_rc == -ENXIO);
7215 
7216 	/* Case 2: reset is in progress. */
7217 	nvme_ctrlr->destruct = false;
7218 	nvme_ctrlr->resetting = true;
7219 	ctrlr_op_rc = 0;
7220 
7221 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7222 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7223 
7224 	poll_threads();
7225 
7226 	CU_ASSERT(ctrlr_op_rc == -EBUSY);
7227 
7228 	/* Case 3: reset completes successfully. */
7229 	nvme_ctrlr->resetting = false;
7230 	curr_trid->last_failed_tsc = spdk_get_ticks();
7231 	ctrlr.is_failed = true;
7232 	ctrlr_op_rc = -1;
7233 
7234 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7235 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7236 
7237 	CU_ASSERT(nvme_ctrlr->resetting == true);
7238 	CU_ASSERT(ctrlr_op_rc == -1);
7239 
7240 	poll_threads();
7241 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7242 	poll_threads();
7243 
7244 	CU_ASSERT(nvme_ctrlr->resetting == false);
7245 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7246 	CU_ASSERT(ctrlr.is_failed == false);
7247 	CU_ASSERT(ctrlr_op_rc == 0);
7248 
7249 	/* Case 4: invalid operation. */
7250 	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
7251 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7252 
7253 	poll_threads();
7254 
7255 	CU_ASSERT(ctrlr_op_rc == -EINVAL);
7256 
7257 	spdk_put_io_channel(ch2);
7258 
7259 	set_thread(0);
7260 
7261 	spdk_put_io_channel(ch1);
7262 
7263 	poll_threads();
7264 
7265 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7266 	CU_ASSERT(rc == 0);
7267 
7268 	poll_threads();
7269 	spdk_delay_us(1000);
7270 	poll_threads();
7271 
7272 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7273 }
7274 
7275 static void
7276 test_bdev_ctrlr_op_rpc(void)
7277 {
7278 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
7279 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
7280 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7281 	struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL;
7282 	struct nvme_path_id *curr_trid1, *curr_trid2;
7283 	struct spdk_io_channel *ch11, *ch12, *ch21, *ch22;
7284 	struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22;
7285 	int ctrlr_op_rc;
7286 	int rc;
7287 
7288 	ut_init_trid(&trid1);
7289 	ut_init_trid2(&trid2);
7290 	TAILQ_INIT(&ctrlr1.active_io_qpairs);
7291 	TAILQ_INIT(&ctrlr2.active_io_qpairs);
7292 	ctrlr1.cdata.cmic.multi_ctrlr = 1;
7293 	ctrlr2.cdata.cmic.multi_ctrlr = 1;
7294 	ctrlr1.cdata.cntlid = 1;
7295 	ctrlr2.cdata.cntlid = 2;
7296 	ctrlr1.adminq.is_connected = true;
7297 	ctrlr2.adminq.is_connected = true;
7298 
7299 	set_thread(0);
7300 
7301 	rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL);
7302 	CU_ASSERT(rc == 0);
7303 
7304 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7305 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7306 
7307 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN);
7308 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
7309 
7310 	curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
7311 	SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL);
7312 
7313 	ch11 = spdk_get_io_channel(nvme_ctrlr1);
7314 	SPDK_CU_ASSERT_FATAL(ch11 != NULL);
7315 
7316 	ctrlr_ch11 = spdk_io_channel_get_ctx(ch11);
7317 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7318 
7319 	set_thread(1);
7320 
7321 	ch12 = spdk_get_io_channel(nvme_ctrlr1);
7322 	SPDK_CU_ASSERT_FATAL(ch12 != NULL);
7323 
7324 	ctrlr_ch12 = spdk_io_channel_get_ctx(ch12);
7325 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7326 
7327 	set_thread(0);
7328 
7329 	rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL);
7330 	CU_ASSERT(rc == 0);
7331 
7332 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN);
7333 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
7334 
7335 	curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
7336 	SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL);
7337 
7338 	ch21 = spdk_get_io_channel(nvme_ctrlr2);
7339 	SPDK_CU_ASSERT_FATAL(ch21 != NULL);
7340 
7341 	ctrlr_ch21 = spdk_io_channel_get_ctx(ch21);
7342 	CU_ASSERT(ctrlr_ch21->qpair != NULL);
7343 
7344 	set_thread(1);
7345 
7346 	ch22 = spdk_get_io_channel(nvme_ctrlr2);
7347 	SPDK_CU_ASSERT_FATAL(ch22 != NULL);
7348 
7349 	ctrlr_ch22 = spdk_io_channel_get_ctx(ch22);
7350 	CU_ASSERT(ctrlr_ch22->qpair != NULL);
7351 
7352 	/* Reset starts from thread 1. */
7353 	set_thread(1);
7354 
7355 	nvme_ctrlr1->resetting = false;
7356 	nvme_ctrlr2->resetting = false;
7357 	curr_trid1->last_failed_tsc = spdk_get_ticks();
7358 	curr_trid2->last_failed_tsc = spdk_get_ticks();
7359 	ctrlr_op_rc = -1;
7360 
7361 	nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET,
7362 			       ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7363 
7364 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7365 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7366 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7367 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7368 
7369 	poll_thread_times(0, 3);
7370 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7371 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7372 
7373 	poll_thread_times(0, 1);
7374 	poll_thread_times(1, 1);
7375 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7376 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7377 
7378 	poll_thread_times(1, 1);
7379 	poll_thread_times(0, 1);
7380 	CU_ASSERT(ctrlr1.adminq.is_connected == false);
7381 
7382 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7383 	poll_thread_times(0, 2);
7384 	CU_ASSERT(ctrlr1.adminq.is_connected == true);
7385 
7386 	poll_thread_times(0, 1);
7387 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7388 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7389 
7390 	poll_thread_times(1, 1);
7391 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7392 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7393 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7394 	CU_ASSERT(curr_trid1->last_failed_tsc != 0);
7395 
7396 	poll_thread_times(0, 2);
7397 	poll_thread_times(1, 1);
7398 	poll_thread_times(0, 1);
7399 	poll_thread_times(1, 1);
7400 	poll_thread_times(0, 1);
7401 	poll_thread_times(1, 1);
7402 	poll_thread_times(0, 1);
7403 
7404 	CU_ASSERT(nvme_ctrlr1->resetting == false);
7405 	CU_ASSERT(curr_trid1->last_failed_tsc == 0);
7406 	CU_ASSERT(nvme_ctrlr2->resetting == true);
7407 
7408 	poll_threads();
7409 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7410 	poll_threads();
7411 
7412 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7413 	CU_ASSERT(ctrlr_op_rc == 0);
7414 
7415 	set_thread(1);
7416 
7417 	spdk_put_io_channel(ch12);
7418 	spdk_put_io_channel(ch22);
7419 
7420 	set_thread(0);
7421 
7422 	spdk_put_io_channel(ch11);
7423 	spdk_put_io_channel(ch21);
7424 
7425 	poll_threads();
7426 
7427 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7428 	CU_ASSERT(rc == 0);
7429 
7430 	poll_threads();
7431 	spdk_delay_us(1000);
7432 	poll_threads();
7433 
7434 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7435 }
7436 
7437 static void
7438 test_disable_enable_ctrlr(void)
7439 {
7440 	struct spdk_nvme_transport_id trid = {};
7441 	struct spdk_nvme_ctrlr ctrlr = {};
7442 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7443 	struct nvme_path_id *curr_trid;
7444 	struct spdk_io_channel *ch1, *ch2;
7445 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7446 	int rc;
7447 
7448 	ut_init_trid(&trid);
7449 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7450 	ctrlr.adminq.is_connected = true;
7451 
7452 	set_thread(0);
7453 
7454 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7455 	CU_ASSERT(rc == 0);
7456 
7457 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7458 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7459 
7460 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7461 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7462 
7463 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7464 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7465 
7466 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7467 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7468 
7469 	set_thread(1);
7470 
7471 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7472 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7473 
7474 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7475 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7476 
7477 	/* Disable starts from thread 1. */
7478 	set_thread(1);
7479 
7480 	/* Case 1: ctrlr is already disabled. */
7481 	nvme_ctrlr->disabled = true;
7482 
7483 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7484 	CU_ASSERT(rc == -EALREADY);
7485 
7486 	/* Case 2: ctrlr is already being destructed. */
7487 	nvme_ctrlr->disabled = false;
7488 	nvme_ctrlr->destruct = true;
7489 
7490 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7491 	CU_ASSERT(rc == -ENXIO);
7492 
7493 	/* Case 3: reset is in progress. */
7494 	nvme_ctrlr->destruct = false;
7495 	nvme_ctrlr->resetting = true;
7496 
7497 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7498 	CU_ASSERT(rc == -EBUSY);
7499 
7500 	/* Case 4: disable completes successfully. */
7501 	nvme_ctrlr->resetting = false;
7502 
7503 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7504 	CU_ASSERT(rc == 0);
7505 	CU_ASSERT(nvme_ctrlr->resetting == true);
7506 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7507 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7508 
7509 	poll_thread_times(0, 3);
7510 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7511 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7512 
7513 	poll_thread_times(0, 1);
7514 	poll_thread_times(1, 1);
7515 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7516 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7517 
7518 	poll_thread_times(1, 1);
7519 	poll_thread_times(0, 1);
7520 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7521 	poll_thread_times(1, 1);
7522 	poll_thread_times(0, 1);
7523 	poll_thread_times(1, 1);
7524 	poll_thread_times(0, 1);
7525 	CU_ASSERT(nvme_ctrlr->resetting == false);
7526 	CU_ASSERT(nvme_ctrlr->disabled == true);
7527 
7528 	/* Case 5: enable completes successfully. */
7529 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7530 	CU_ASSERT(rc == 0);
7531 
7532 	CU_ASSERT(nvme_ctrlr->resetting == true);
7533 	CU_ASSERT(nvme_ctrlr->disabled == false);
7534 
7535 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7536 	poll_thread_times(0, 2);
7537 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7538 
7539 	poll_thread_times(0, 1);
7540 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7541 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7542 
7543 	poll_thread_times(1, 1);
7544 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7545 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7546 	CU_ASSERT(nvme_ctrlr->resetting == true);
7547 
7548 	poll_thread_times(0, 2);
7549 	CU_ASSERT(nvme_ctrlr->resetting == true);
7550 	poll_thread_times(1, 1);
7551 	CU_ASSERT(nvme_ctrlr->resetting == true);
7552 	poll_thread_times(0, 1);
7553 	CU_ASSERT(nvme_ctrlr->resetting == false);
7554 
7555 	/* Case 6: ctrlr is already enabled. */
7556 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7557 	CU_ASSERT(rc == -EALREADY);
7558 
7559 	set_thread(0);
7560 
7561 	/* Case 7: disable cancels delayed reconnect. */
7562 	nvme_ctrlr->opts.reconnect_delay_sec = 10;
7563 	ctrlr.fail_reset = true;
7564 
7565 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7566 	CU_ASSERT(rc == 0);
7567 
7568 	poll_threads();
7569 
7570 	CU_ASSERT(nvme_ctrlr->resetting == false);
7571 	CU_ASSERT(ctrlr.is_failed == false);
7572 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7573 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7574 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
7575 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
7576 
7577 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7578 	CU_ASSERT(rc == 0);
7579 
7580 	CU_ASSERT(nvme_ctrlr->resetting == true);
7581 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
7582 
7583 	poll_threads();
7584 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7585 	poll_threads();
7586 
7587 	CU_ASSERT(nvme_ctrlr->resetting == false);
7588 	CU_ASSERT(nvme_ctrlr->disabled == true);
7589 
7590 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7591 	CU_ASSERT(rc == 0);
7592 
7593 	CU_ASSERT(nvme_ctrlr->resetting == true);
7594 	CU_ASSERT(nvme_ctrlr->disabled == false);
7595 
7596 	poll_threads();
7597 
7598 	CU_ASSERT(nvme_ctrlr->resetting == false);
7599 
7600 	set_thread(1);
7601 
7602 	spdk_put_io_channel(ch2);
7603 
7604 	set_thread(0);
7605 
7606 	spdk_put_io_channel(ch1);
7607 
7608 	poll_threads();
7609 
7610 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7611 	CU_ASSERT(rc == 0);
7612 
7613 	poll_threads();
7614 	spdk_delay_us(1000);
7615 	poll_threads();
7616 
7617 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7618 }
7619 
7620 static void
7621 ut_delete_done(void *ctx, int rc)
7622 {
7623 	int *delete_done_rc = ctx;
7624 	*delete_done_rc = rc;
7625 }
7626 
7627 static void
7628 test_delete_ctrlr_done(void)
7629 {
7630 	struct spdk_nvme_transport_id trid = {};
7631 	struct spdk_nvme_ctrlr ctrlr = {};
7632 	int delete_done_rc = 0xDEADBEEF;
7633 	int rc;
7634 
7635 	ut_init_trid(&trid);
7636 
7637 	nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7638 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
7639 
7640 	rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc);
7641 	CU_ASSERT(rc == 0);
7642 
7643 	for (int i = 0; i < 20; i++) {
7644 		poll_threads();
7645 		if (delete_done_rc == 0) {
7646 			break;
7647 		}
7648 		spdk_delay_us(1000);
7649 	}
7650 
7651 	CU_ASSERT(delete_done_rc == 0);
7652 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7653 }
7654 
7655 static void
7656 test_ns_remove_during_reset(void)
7657 {
7658 	struct nvme_path_id path = {};
7659 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7660 	struct spdk_nvme_ctrlr *ctrlr;
7661 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7662 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7663 	struct nvme_ctrlr *nvme_ctrlr;
7664 	const int STRING_SIZE = 32;
7665 	const char *attached_names[STRING_SIZE];
7666 	struct nvme_bdev *bdev;
7667 	struct nvme_ns *nvme_ns;
7668 	union spdk_nvme_async_event_completion event = {};
7669 	struct spdk_nvme_cpl cpl = {};
7670 	int rc;
7671 
7672 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
7673 	ut_init_trid(&path.trid);
7674 
7675 	set_thread(0);
7676 
7677 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
7678 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7679 
7680 	g_ut_attach_ctrlr_status = 0;
7681 	g_ut_attach_bdev_count = 1;
7682 
7683 	opts.multipath = false;
7684 
7685 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
7686 				   attach_ctrlr_done, NULL, &dopts, &opts);
7687 	CU_ASSERT(rc == 0);
7688 
7689 	spdk_delay_us(1000);
7690 	poll_threads();
7691 
7692 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7693 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7694 
7695 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
7696 	CU_ASSERT(nvme_ctrlr != NULL);
7697 
7698 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
7699 	CU_ASSERT(bdev != NULL);
7700 
7701 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
7702 	CU_ASSERT(nvme_ns != NULL);
7703 
7704 	/* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist,
7705 	 * but nvme_ns->ns should be NULL.
7706 	 */
7707 
7708 	CU_ASSERT(ctrlr->ns[0].is_active == true);
7709 	ctrlr->ns[0].is_active = false;
7710 
7711 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7712 	CU_ASSERT(rc == 0);
7713 
7714 	poll_threads();
7715 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7716 	poll_threads();
7717 
7718 	CU_ASSERT(nvme_ctrlr->resetting == false);
7719 	CU_ASSERT(ctrlr->adminq.is_connected == true);
7720 
7721 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7722 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7723 	CU_ASSERT(nvme_ns->bdev == bdev);
7724 	CU_ASSERT(nvme_ns->ns == NULL);
7725 
7726 	/* Then, async event should fill nvme_ns->ns again. */
7727 
7728 	ctrlr->ns[0].is_active = true;
7729 
7730 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
7731 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
7732 	cpl.cdw0 = event.raw;
7733 
7734 	aer_cb(nvme_ctrlr, &cpl);
7735 
7736 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7737 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7738 	CU_ASSERT(nvme_ns->bdev == bdev);
7739 	CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]);
7740 
7741 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7742 	CU_ASSERT(rc == 0);
7743 
7744 	poll_threads();
7745 	spdk_delay_us(1000);
7746 	poll_threads();
7747 
7748 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7749 }
7750 
7751 static void
7752 test_io_path_is_current(void)
7753 {
7754 	struct nvme_bdev_channel nbdev_ch = {
7755 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
7756 	};
7757 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
7758 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
7759 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
7760 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, },
7761 	nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
7762 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {};
7763 	struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
7764 	struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
7765 	struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, };
7766 	struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, };
7767 	struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, };
7768 	struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, };
7769 	struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
7770 	struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
7771 	struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
7772 
7773 	/* io_path1 is deleting */
7774 	io_path1.nbdev_ch = NULL;
7775 
7776 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == false);
7777 
7778 	io_path1.nbdev_ch = &nbdev_ch;
7779 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
7780 	io_path2.nbdev_ch = &nbdev_ch;
7781 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
7782 	io_path3.nbdev_ch = &nbdev_ch;
7783 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
7784 
7785 	/* active/active: io_path is current if it is available and ANA optimized. */
7786 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7787 
7788 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7789 
7790 	/* active/active: io_path is not current if it is disconnected even if it is
7791 	 * ANA optimized.
7792 	 */
7793 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7794 
7795 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7796 
7797 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7798 
7799 	/* active/passive: io_path is current if it is available and cached.
7800 	 * (only ANA optimized path is cached for active/passive.)
7801 	 */
7802 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7803 	nbdev_ch.current_io_path = &io_path2;
7804 
7805 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7806 
7807 	/* active:passive: io_path is not current if it is disconnected even if it is cached */
7808 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7809 
7810 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7811 
7812 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7813 
7814 	/* active/active and active/passive: io_path is not current if it is ANA inaccessible. */
7815 	nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
7816 
7817 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7818 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7819 
7820 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7821 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7822 
7823 	/* active/active: non-optimized path is current only if there is no optimized path. */
7824 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7825 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7826 
7827 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7828 
7829 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7830 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7831 
7832 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7833 
7834 	/* active/passive: current is true if it is the first one when there is no optimized path. */
7835 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7836 	nbdev_ch.current_io_path = NULL;
7837 
7838 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == true);
7839 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7840 	CU_ASSERT(nvme_io_path_is_current(&io_path3) == false);
7841 }
7842 
7843 static void
7844 test_bdev_reset_abort_io(void)
7845 {
7846 	struct spdk_nvme_transport_id trid = {};
7847 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7848 	struct spdk_nvme_ctrlr *ctrlr;
7849 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7850 	struct nvme_ctrlr *nvme_ctrlr;
7851 	const int STRING_SIZE = 32;
7852 	const char *attached_names[STRING_SIZE];
7853 	struct nvme_bdev *bdev;
7854 	struct spdk_bdev_io *write_io, *read_io, *reset_io;
7855 	struct spdk_io_channel *ch1, *ch2;
7856 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
7857 	struct nvme_io_path *io_path1, *io_path2;
7858 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
7859 	int rc;
7860 
7861 	g_opts.bdev_retry_count = -1;
7862 
7863 	ut_init_trid(&trid);
7864 
7865 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
7866 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7867 
7868 	g_ut_attach_ctrlr_status = 0;
7869 	g_ut_attach_bdev_count = 1;
7870 
7871 	set_thread(1);
7872 
7873 	opts.ctrlr_loss_timeout_sec = -1;
7874 	opts.reconnect_delay_sec = 1;
7875 	opts.multipath = false;
7876 
7877 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
7878 				   attach_ctrlr_done, NULL, &dopts, &opts);
7879 	CU_ASSERT(rc == 0);
7880 
7881 	spdk_delay_us(1000);
7882 	poll_threads();
7883 
7884 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7885 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7886 
7887 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
7888 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
7889 
7890 	set_thread(0);
7891 
7892 	ch1 = spdk_get_io_channel(bdev);
7893 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7894 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
7895 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
7896 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
7897 	nvme_qpair1 = io_path1->qpair;
7898 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
7899 
7900 	set_thread(1);
7901 
7902 	ch2 = spdk_get_io_channel(bdev);
7903 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7904 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
7905 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
7906 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
7907 	nvme_qpair2 = io_path2->qpair;
7908 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
7909 
7910 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch1);
7911 	ut_bdev_io_set_buf(write_io);
7912 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7913 
7914 	read_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_READ, bdev, ch1);
7915 	ut_bdev_io_set_buf(read_io);
7916 	read_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7917 
7918 	reset_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
7919 
7920 	/* If qpair is disconnected, it is freed and then reconnected via resetting
7921 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
7922 	 * while resetting the nvme_ctrlr.
7923 	 */
7924 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7925 
7926 	poll_thread_times(0, 3);
7927 
7928 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7929 	CU_ASSERT(nvme_ctrlr->resetting == true);
7930 
7931 	set_thread(0);
7932 
7933 	write_io->internal.f.in_submit_request = true;
7934 
7935 	bdev_nvme_submit_request(ch1, write_io);
7936 
7937 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
7938 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
7939 
7940 	set_thread(1);
7941 
7942 	/* Submit a reset request to a bdev while resetting a nvme_ctrlr.
7943 	 * Further I/O queueing should be disabled and queued I/Os should be aborted.
7944 	 * Verify these behaviors.
7945 	 */
7946 	reset_io->internal.f.in_submit_request = true;
7947 
7948 	bdev_nvme_submit_request(ch2, reset_io);
7949 
7950 	poll_thread_times(0, 1);
7951 	poll_thread_times(1, 2);
7952 
7953 	CU_ASSERT(nbdev_ch1->resetting == true);
7954 
7955 	/* qpair1 should be still disconnected. */
7956 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7957 
7958 	set_thread(0);
7959 
7960 	read_io->internal.f.in_submit_request = true;
7961 
7962 	bdev_nvme_submit_request(ch1, read_io);
7963 
7964 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7965 
7966 	poll_thread_times(0, 1);
7967 
7968 	/* The I/O which was submitted during bdev_reset should fail immediately. */
7969 	CU_ASSERT(read_io->internal.f.in_submit_request == false);
7970 	CU_ASSERT(read_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
7971 
7972 	poll_threads();
7973 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7974 	poll_threads();
7975 
7976 	/* The completion of bdev_reset should ensure queued I/O is aborted. */
7977 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
7978 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
7979 
7980 	/* The reset request itself should complete with success. */
7981 	CU_ASSERT(reset_io->internal.f.in_submit_request == false);
7982 	CU_ASSERT(reset_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
7983 
7984 	set_thread(0);
7985 
7986 	spdk_put_io_channel(ch1);
7987 
7988 	set_thread(1);
7989 
7990 	spdk_put_io_channel(ch2);
7991 
7992 	poll_threads();
7993 
7994 	set_thread(0);
7995 
7996 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7997 	CU_ASSERT(rc == 0);
7998 
7999 	poll_threads();
8000 	spdk_delay_us(1000);
8001 	poll_threads();
8002 
8003 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
8004 
8005 	free(write_io);
8006 	free(read_io);
8007 	free(reset_io);
8008 
8009 	g_opts.bdev_retry_count = 0;
8010 }
8011 
8012 int
8013 main(int argc, char **argv)
8014 {
8015 	CU_pSuite	suite = NULL;
8016 	unsigned int	num_failures;
8017 
8018 	CU_initialize_registry();
8019 
8020 	suite = CU_add_suite("nvme", NULL, NULL);
8021 
8022 	CU_ADD_TEST(suite, test_create_ctrlr);
8023 	CU_ADD_TEST(suite, test_reset_ctrlr);
8024 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
8025 	CU_ADD_TEST(suite, test_failover_ctrlr);
8026 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
8027 	CU_ADD_TEST(suite, test_pending_reset);
8028 	CU_ADD_TEST(suite, test_attach_ctrlr);
8029 	CU_ADD_TEST(suite, test_aer_cb);
8030 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
8031 	CU_ADD_TEST(suite, test_add_remove_trid);
8032 	CU_ADD_TEST(suite, test_abort);
8033 	CU_ADD_TEST(suite, test_get_io_qpair);
8034 	CU_ADD_TEST(suite, test_bdev_unregister);
8035 	CU_ADD_TEST(suite, test_compare_ns);
8036 	CU_ADD_TEST(suite, test_init_ana_log_page);
8037 	CU_ADD_TEST(suite, test_get_memory_domains);
8038 	CU_ADD_TEST(suite, test_reconnect_qpair);
8039 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
8040 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
8041 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
8042 	CU_ADD_TEST(suite, test_admin_path);
8043 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
8044 	CU_ADD_TEST(suite, test_find_io_path);
8045 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
8046 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
8047 	CU_ADD_TEST(suite, test_retry_io_count);
8048 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
8049 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
8050 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
8051 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
8052 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
8053 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
8054 	CU_ADD_TEST(suite, test_fail_path);
8055 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
8056 	CU_ADD_TEST(suite, test_ana_transition);
8057 	CU_ADD_TEST(suite, test_set_preferred_path);
8058 	CU_ADD_TEST(suite, test_find_next_io_path);
8059 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
8060 	CU_ADD_TEST(suite, test_disable_auto_failback);
8061 	CU_ADD_TEST(suite, test_set_multipath_policy);
8062 	CU_ADD_TEST(suite, test_uuid_generation);
8063 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
8064 	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
8065 	CU_ADD_TEST(suite, test_ctrlr_op_rpc);
8066 	CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc);
8067 	CU_ADD_TEST(suite, test_disable_enable_ctrlr);
8068 	CU_ADD_TEST(suite, test_delete_ctrlr_done);
8069 	CU_ADD_TEST(suite, test_ns_remove_during_reset);
8070 	CU_ADD_TEST(suite, test_io_path_is_current);
8071 	CU_ADD_TEST(suite, test_bdev_reset_abort_io);
8072 
8073 	allocate_threads(3);
8074 	set_thread(0);
8075 	bdev_nvme_library_init();
8076 	init_accel();
8077 
8078 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
8079 
8080 	set_thread(0);
8081 	bdev_nvme_library_fini();
8082 	fini_accel();
8083 	free_threads();
8084 
8085 	CU_cleanup_registry();
8086 
8087 	return num_failures;
8088 }
8089