xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision a4931da069292ccd27a936edd1919742eecf29b5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 
12 #include "common/lib/ut_multithread.c"
13 
14 #include "bdev/nvme/bdev_nvme.c"
15 
16 #include "unit/lib/json_mock.c"
17 
18 #include "bdev/nvme/bdev_mdns_client.c"
19 
20 static void *g_accel_p = (void *)0xdeadbeaf;
21 
22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
23 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
24 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
25 	     spdk_nvme_remove_cb remove_cb), NULL);
26 
27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
28 		enum spdk_nvme_transport_type trtype));
29 
30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
31 	    NULL);
32 
33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
34 
35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
36 		struct spdk_nvme_transport_id *trid), 0);
37 
38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
39 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
40 
41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0);
43 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr),
44 	    SPDK_ENV_NUMA_ID_ANY);
45 
46 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
47 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
48 
49 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
50 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
51 
52 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
53 
54 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
55 		int error_code, const char *msg));
56 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *,
57 	    (struct spdk_jsonrpc_request *request), NULL);
58 DEFINE_STUB_V(spdk_jsonrpc_end_result,
59 	      (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w));
60 
61 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
62 		size_t opts_size));
63 
64 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
65 		size_t opts_size), 0);
66 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL);
67 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL);
68 
69 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
70 
71 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
72 					enum spdk_bdev_reset_stat_mode mode));
73 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
74 				      struct spdk_bdev_io_stat *add));
75 
76 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr));
77 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL);
78 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k));
79 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL);
80 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0);
81 
82 int
83 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
84 				   struct spdk_memory_domain **domains, int array_size)
85 {
86 	int i, min_array_size;
87 
88 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
89 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
90 		for (i = 0; i < min_array_size; i++) {
91 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
92 		}
93 	}
94 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
95 
96 	return 0;
97 }
98 
99 struct spdk_io_channel *
100 spdk_accel_get_io_channel(void)
101 {
102 	return spdk_get_io_channel(g_accel_p);
103 }
104 
105 void
106 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
107 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
108 {
109 	/* Avoid warning that opts is used uninitialised */
110 	memset(opts, 0, opts_size);
111 }
112 
113 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"
114 
115 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN};
116 
117 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
118 	    (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts);
119 
120 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
121 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
122 
123 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
124 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
125 
126 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
127 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
128 
129 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
130 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
131 
132 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
133 
134 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
135 
136 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
137 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
138 
139 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
140 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
141 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
142 
143 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
144 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
145 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
146 
147 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, (
148 		    struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
149 		    struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf,
150 		    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
151 		    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
152 		    spdk_nvme_req_next_sge_cb next_sge_fn), 0);
153 
154 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
155 		size_t *size), 0);
156 
157 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
158 
159 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
160 
161 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
162 
163 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
164 
165 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns),
166 	    SPDK_NVME_16B_GUARD_PI);
167 
168 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
169 
170 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
171 
172 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
173 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
174 
175 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
176 
177 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
178 		char *name, size_t *size), 0);
179 
180 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
181 	    (struct spdk_nvme_ns *ns), 0);
182 
183 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
184 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
185 
186 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
187 	    (struct spdk_nvme_ns *ns), 0);
188 
189 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
190 	    (struct spdk_nvme_ns *ns), 0);
191 
192 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
193 	    (struct spdk_nvme_ns *ns), 0);
194 
195 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
196 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
197 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
198 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
199 
200 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
201 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
202 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
203 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
204 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
205 
206 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
207 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
208 	     void *payload, uint32_t payload_size, uint64_t slba,
209 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
210 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
211 
212 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
213 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
214 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
215 
216 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
217 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
218 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
219 
220 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
221 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
222 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
223 
224 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
225 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
226 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
227 
228 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
229 
230 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
231 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
232 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
233 
234 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
235 	    (const struct spdk_nvme_status *status), NULL);
236 
237 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
238 	    (const struct spdk_nvme_status *status), NULL);
239 
240 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
241 
242 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
243 
244 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
245 
246 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
247 
248 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
249 
250 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
251 		struct iovec *iov,
252 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
253 DEFINE_STUB(spdk_accel_append_crc32c, int,
254 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst,
255 	     struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx,
256 	     uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
257 DEFINE_STUB(spdk_accel_append_copy, int,
258 	    (struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
259 	     struct iovec *dst_iovs, uint32_t dst_iovcnt,
260 	     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
261 	     struct iovec *src_iovs, uint32_t src_iovcnt,
262 	     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
263 	     spdk_accel_step_cb cb_fn, void *cb_arg), 0);
264 DEFINE_STUB_V(spdk_accel_sequence_finish,
265 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
266 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
267 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
268 DEFINE_STUB(spdk_nvme_qpair_authenticate, int,
269 	    (struct spdk_nvme_qpair *qpair, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
270 DEFINE_STUB(spdk_nvme_ctrlr_authenticate, int,
271 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
272 DEFINE_STUB(spdk_nvme_ctrlr_set_keys, int,
273 	    (struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr_key_opts *opts), 0);
274 
275 struct ut_nvme_req {
276 	uint16_t			opc;
277 	spdk_nvme_cmd_cb		cb_fn;
278 	void				*cb_arg;
279 	struct spdk_nvme_cpl		cpl;
280 	TAILQ_ENTRY(ut_nvme_req)	tailq;
281 };
282 
283 struct spdk_nvme_ns {
284 	struct spdk_nvme_ctrlr		*ctrlr;
285 	uint32_t			id;
286 	bool				is_active;
287 	struct spdk_uuid		*uuid;
288 	enum spdk_nvme_ana_state	ana_state;
289 	enum spdk_nvme_csi		csi;
290 };
291 
292 struct spdk_nvme_qpair {
293 	struct spdk_nvme_ctrlr		*ctrlr;
294 	uint8_t				failure_reason;
295 	bool				is_connected;
296 	bool				in_completion_context;
297 	bool				delete_after_completion_context;
298 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
299 	uint32_t			num_outstanding_reqs;
300 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
301 	struct spdk_nvme_poll_group	*poll_group;
302 	void				*poll_group_tailq_head;
303 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
304 };
305 
306 struct spdk_nvme_ctrlr {
307 	uint32_t			num_ns;
308 	struct spdk_nvme_ns		*ns;
309 	struct spdk_nvme_ns_data	*nsdata;
310 	struct spdk_nvme_qpair		adminq;
311 	struct spdk_nvme_ctrlr_data	cdata;
312 	bool				attached;
313 	bool				is_failed;
314 	bool				fail_reset;
315 	bool				is_removed;
316 	struct spdk_nvme_transport_id	trid;
317 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
318 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
319 	struct spdk_nvme_ctrlr_opts	opts;
320 };
321 
322 struct spdk_nvme_poll_group {
323 	void				*ctx;
324 	struct spdk_nvme_accel_fn_table	accel_fn_table;
325 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
326 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
327 };
328 
329 struct spdk_nvme_probe_ctx {
330 	struct spdk_nvme_transport_id	trid;
331 	void				*cb_ctx;
332 	spdk_nvme_attach_cb		attach_cb;
333 	struct spdk_nvme_ctrlr		*init_ctrlr;
334 };
335 
336 uint32_t
337 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
338 {
339 	uint32_t nsid;
340 
341 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
342 		if (ctrlr->ns[nsid - 1].is_active) {
343 			return nsid;
344 		}
345 	}
346 
347 	return 0;
348 }
349 
350 uint32_t
351 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
352 {
353 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
354 		if (ctrlr->ns[nsid - 1].is_active) {
355 			return nsid;
356 		}
357 	}
358 
359 	return 0;
360 }
361 
362 uint32_t
363 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
364 {
365 	return qpair->num_outstanding_reqs;
366 }
367 
368 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
369 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
370 			g_ut_attached_ctrlrs);
371 static int g_ut_attach_ctrlr_status;
372 static size_t g_ut_attach_bdev_count;
373 static int g_ut_register_bdev_status;
374 static struct spdk_bdev *g_ut_registered_bdev;
375 static uint16_t g_ut_cntlid;
376 static struct nvme_path_id g_any_path = {};
377 
378 static void
379 ut_init_trid(struct spdk_nvme_transport_id *trid)
380 {
381 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
382 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
383 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
384 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
385 }
386 
387 static void
388 ut_init_trid2(struct spdk_nvme_transport_id *trid)
389 {
390 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
391 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
392 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
393 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
394 }
395 
396 static void
397 ut_init_trid3(struct spdk_nvme_transport_id *trid)
398 {
399 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
400 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
401 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
402 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
403 }
404 
405 static int
406 cmp_int(int a, int b)
407 {
408 	return a - b;
409 }
410 
411 int
412 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
413 			       const struct spdk_nvme_transport_id *trid2)
414 {
415 	int cmp;
416 
417 	/* We assume trtype is TCP for now. */
418 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
419 
420 	cmp = cmp_int(trid1->trtype, trid2->trtype);
421 	if (cmp) {
422 		return cmp;
423 	}
424 
425 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
426 	if (cmp) {
427 		return cmp;
428 	}
429 
430 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
431 	if (cmp) {
432 		return cmp;
433 	}
434 
435 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
436 	if (cmp) {
437 		return cmp;
438 	}
439 
440 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
441 	if (cmp) {
442 		return cmp;
443 	}
444 
445 	return 0;
446 }
447 
448 static struct spdk_nvme_ctrlr *
449 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
450 		bool ana_reporting, bool multipath)
451 {
452 	struct spdk_nvme_ctrlr *ctrlr;
453 	uint32_t i;
454 
455 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
456 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
457 			/* There is a ctrlr whose trid matches. */
458 			return NULL;
459 		}
460 	}
461 
462 	ctrlr = calloc(1, sizeof(*ctrlr));
463 	if (ctrlr == NULL) {
464 		return NULL;
465 	}
466 
467 	ctrlr->attached = true;
468 	ctrlr->adminq.ctrlr = ctrlr;
469 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
470 	ctrlr->adminq.is_connected = true;
471 
472 	if (num_ns != 0) {
473 		ctrlr->num_ns = num_ns;
474 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
475 		if (ctrlr->ns == NULL) {
476 			free(ctrlr);
477 			return NULL;
478 		}
479 
480 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
481 		if (ctrlr->nsdata == NULL) {
482 			free(ctrlr->ns);
483 			free(ctrlr);
484 			return NULL;
485 		}
486 
487 		for (i = 0; i < num_ns; i++) {
488 			ctrlr->ns[i].id = i + 1;
489 			ctrlr->ns[i].ctrlr = ctrlr;
490 			ctrlr->ns[i].is_active = true;
491 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
492 			ctrlr->nsdata[i].nsze = 1024;
493 			ctrlr->nsdata[i].nmic.can_share = multipath;
494 		}
495 
496 		ctrlr->cdata.nn = num_ns;
497 		ctrlr->cdata.mnan = num_ns;
498 		ctrlr->cdata.nanagrpid = num_ns;
499 	}
500 
501 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
502 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
503 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
504 	ctrlr->trid = *trid;
505 	TAILQ_INIT(&ctrlr->active_io_qpairs);
506 
507 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
508 
509 	return ctrlr;
510 }
511 
512 static void
513 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
514 {
515 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
516 
517 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
518 	free(ctrlr->nsdata);
519 	free(ctrlr->ns);
520 	free(ctrlr);
521 }
522 
523 static int
524 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
525 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
526 {
527 	struct ut_nvme_req *req;
528 
529 	req = calloc(1, sizeof(*req));
530 	if (req == NULL) {
531 		return -ENOMEM;
532 	}
533 
534 	req->opc = opc;
535 	req->cb_fn = cb_fn;
536 	req->cb_arg = cb_arg;
537 
538 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
539 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
540 
541 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
542 	qpair->num_outstanding_reqs++;
543 
544 	return 0;
545 }
546 
547 static struct ut_nvme_req *
548 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
549 {
550 	struct ut_nvme_req *req;
551 
552 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
553 		if (req->cb_arg == cb_arg) {
554 			break;
555 		}
556 	}
557 
558 	return req;
559 }
560 
561 static struct spdk_bdev_io *
562 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
563 		 struct spdk_io_channel *ch)
564 {
565 	struct spdk_bdev_io *bdev_io;
566 
567 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
568 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
569 	bdev_io->type = type;
570 	bdev_io->bdev = &nbdev->disk;
571 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
572 
573 	return bdev_io;
574 }
575 
576 static void
577 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
578 {
579 	bdev_io->u.bdev.iovs = &bdev_io->iov;
580 	bdev_io->u.bdev.iovcnt = 1;
581 
582 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
583 	bdev_io->iov.iov_len = 4096;
584 }
585 
586 static void
587 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
588 {
589 	if (ctrlr->is_failed) {
590 		free(ctrlr);
591 		return;
592 	}
593 
594 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
595 	if (probe_ctx->cb_ctx) {
596 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
597 	}
598 
599 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
600 
601 	if (probe_ctx->attach_cb) {
602 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
603 	}
604 }
605 
606 int
607 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
608 {
609 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
610 
611 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
612 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
613 			continue;
614 		}
615 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
616 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
617 	}
618 
619 	free(probe_ctx);
620 
621 	return 0;
622 }
623 
624 struct spdk_nvme_probe_ctx *
625 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
626 			const struct spdk_nvme_ctrlr_opts *opts,
627 			spdk_nvme_attach_cb attach_cb)
628 {
629 	struct spdk_nvme_probe_ctx *probe_ctx;
630 
631 	if (trid == NULL) {
632 		return NULL;
633 	}
634 
635 	probe_ctx = calloc(1, sizeof(*probe_ctx));
636 	if (probe_ctx == NULL) {
637 		return NULL;
638 	}
639 
640 	probe_ctx->trid = *trid;
641 	probe_ctx->cb_ctx = (void *)opts;
642 	probe_ctx->attach_cb = attach_cb;
643 
644 	return probe_ctx;
645 }
646 
647 int
648 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
649 {
650 	if (ctrlr->attached) {
651 		ut_detach_ctrlr(ctrlr);
652 	}
653 
654 	return 0;
655 }
656 
657 int
658 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
659 {
660 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
661 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
662 
663 	return 0;
664 }
665 
666 int
667 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
668 {
669 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
670 }
671 
672 void
673 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
674 {
675 	memset(opts, 0, opts_size);
676 
677 	snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN);
678 }
679 
680 const struct spdk_nvme_ctrlr_data *
681 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
682 {
683 	return &ctrlr->cdata;
684 }
685 
686 uint32_t
687 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
688 {
689 	return ctrlr->num_ns;
690 }
691 
692 struct spdk_nvme_ns *
693 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
694 {
695 	if (nsid < 1 || nsid > ctrlr->num_ns) {
696 		return NULL;
697 	}
698 
699 	return &ctrlr->ns[nsid - 1];
700 }
701 
702 bool
703 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
704 {
705 	if (nsid < 1 || nsid > ctrlr->num_ns) {
706 		return false;
707 	}
708 
709 	return ctrlr->ns[nsid - 1].is_active;
710 }
711 
712 union spdk_nvme_csts_register
713 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
714 {
715 	union spdk_nvme_csts_register csts;
716 
717 	csts.raw = 0;
718 
719 	return csts;
720 }
721 
722 union spdk_nvme_vs_register
723 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
724 {
725 	union spdk_nvme_vs_register vs;
726 
727 	vs.raw = 0;
728 
729 	return vs;
730 }
731 
732 struct spdk_nvme_qpair *
733 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
734 			       const struct spdk_nvme_io_qpair_opts *user_opts,
735 			       size_t opts_size)
736 {
737 	struct spdk_nvme_qpair *qpair;
738 
739 	qpair = calloc(1, sizeof(*qpair));
740 	if (qpair == NULL) {
741 		return NULL;
742 	}
743 
744 	qpair->ctrlr = ctrlr;
745 	TAILQ_INIT(&qpair->outstanding_reqs);
746 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
747 
748 	return qpair;
749 }
750 
751 static void
752 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
753 {
754 	struct spdk_nvme_poll_group *group = qpair->poll_group;
755 
756 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
757 
758 	qpair->poll_group_tailq_head = &group->connected_qpairs;
759 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
760 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
761 }
762 
763 static void
764 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
765 {
766 	struct spdk_nvme_poll_group *group = qpair->poll_group;
767 
768 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
769 
770 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
771 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
772 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
773 }
774 
775 int
776 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
777 				 struct spdk_nvme_qpair *qpair)
778 {
779 	if (qpair->is_connected) {
780 		return -EISCONN;
781 	}
782 
783 	qpair->is_connected = true;
784 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
785 
786 	if (qpair->poll_group) {
787 		nvme_poll_group_connect_qpair(qpair);
788 	}
789 
790 	return 0;
791 }
792 
793 void
794 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
795 {
796 	if (!qpair->is_connected) {
797 		return;
798 	}
799 
800 	qpair->is_connected = false;
801 
802 	if (qpair->poll_group != NULL) {
803 		nvme_poll_group_disconnect_qpair(qpair);
804 	}
805 }
806 
807 int
808 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
809 {
810 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
811 
812 	if (qpair->in_completion_context) {
813 		qpair->delete_after_completion_context = true;
814 		return 0;
815 	}
816 
817 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
818 
819 	if (qpair->poll_group != NULL) {
820 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
821 	}
822 
823 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
824 
825 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
826 
827 	free(qpair);
828 
829 	return 0;
830 }
831 
832 int
833 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
834 {
835 	if (ctrlr->fail_reset) {
836 		ctrlr->is_failed = true;
837 		return -EIO;
838 	}
839 
840 	ctrlr->adminq.is_connected = true;
841 	return 0;
842 }
843 
844 void
845 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
846 {
847 }
848 
849 int
850 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
851 {
852 	if (ctrlr->is_removed) {
853 		return -ENXIO;
854 	}
855 
856 	ctrlr->adminq.is_connected = false;
857 	ctrlr->is_failed = false;
858 
859 	return 0;
860 }
861 
862 void
863 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
864 {
865 	ctrlr->is_failed = true;
866 }
867 
868 bool
869 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
870 {
871 	return ctrlr->is_failed;
872 }
873 
874 spdk_nvme_qp_failure_reason
875 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
876 {
877 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
878 }
879 
880 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
881 				 sizeof(uint32_t))
882 static void
883 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
884 {
885 	struct spdk_nvme_ana_page ana_hdr;
886 	char _ana_desc[UT_ANA_DESC_SIZE];
887 	struct spdk_nvme_ana_group_descriptor *ana_desc;
888 	struct spdk_nvme_ns *ns;
889 	uint32_t i;
890 
891 	memset(&ana_hdr, 0, sizeof(ana_hdr));
892 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
893 
894 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
895 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
896 
897 	buf += sizeof(ana_hdr);
898 	length -= sizeof(ana_hdr);
899 
900 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
901 
902 	for (i = 0; i < ctrlr->num_ns; i++) {
903 		ns = &ctrlr->ns[i];
904 
905 		if (!ns->is_active) {
906 			continue;
907 		}
908 
909 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
910 
911 		ana_desc->ana_group_id = ns->id;
912 		ana_desc->num_of_nsid = 1;
913 		ana_desc->ana_state = ns->ana_state;
914 		ana_desc->nsid[0] = ns->id;
915 
916 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
917 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
918 
919 		buf += UT_ANA_DESC_SIZE;
920 		length -= UT_ANA_DESC_SIZE;
921 	}
922 }
923 
924 int
925 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
926 				 uint8_t log_page, uint32_t nsid,
927 				 void *payload, uint32_t payload_size,
928 				 uint64_t offset,
929 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
930 {
931 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
932 		SPDK_CU_ASSERT_FATAL(offset == 0);
933 		ut_create_ana_log_page(ctrlr, payload, payload_size);
934 	}
935 
936 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
937 				      cb_fn, cb_arg);
938 }
939 
940 int
941 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
942 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
943 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
944 {
945 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
946 }
947 
948 int
949 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
950 			      void *cmd_cb_arg,
951 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
952 {
953 	struct ut_nvme_req *req = NULL, *abort_req;
954 
955 	if (qpair == NULL) {
956 		qpair = &ctrlr->adminq;
957 	}
958 
959 	abort_req = calloc(1, sizeof(*abort_req));
960 	if (abort_req == NULL) {
961 		return -ENOMEM;
962 	}
963 
964 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
965 		if (req->cb_arg == cmd_cb_arg) {
966 			break;
967 		}
968 	}
969 
970 	if (req == NULL) {
971 		free(abort_req);
972 		return -ENOENT;
973 	}
974 
975 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
976 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
977 
978 	abort_req->opc = SPDK_NVME_OPC_ABORT;
979 	abort_req->cb_fn = cb_fn;
980 	abort_req->cb_arg = cb_arg;
981 
982 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
983 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
984 	abort_req->cpl.cdw0 = 0;
985 
986 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
987 	ctrlr->adminq.num_outstanding_reqs++;
988 
989 	return 0;
990 }
991 
992 int32_t
993 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
994 {
995 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
996 }
997 
998 uint32_t
999 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
1000 {
1001 	return ns->id;
1002 }
1003 
1004 struct spdk_nvme_ctrlr *
1005 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
1006 {
1007 	return ns->ctrlr;
1008 }
1009 
1010 static inline struct spdk_nvme_ns_data *
1011 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
1012 {
1013 	return &ns->ctrlr->nsdata[ns->id - 1];
1014 }
1015 
1016 const struct spdk_nvme_ns_data *
1017 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
1018 {
1019 	return _nvme_ns_get_data(ns);
1020 }
1021 
1022 uint64_t
1023 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
1024 {
1025 	return _nvme_ns_get_data(ns)->nsze;
1026 }
1027 
1028 const struct spdk_uuid *
1029 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
1030 {
1031 	return ns->uuid;
1032 }
1033 
1034 enum spdk_nvme_csi
1035 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
1036 	return ns->csi;
1037 }
1038 
1039 int
1040 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1041 			      void *metadata, uint64_t lba, uint32_t lba_count,
1042 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1043 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1044 {
1045 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1046 }
1047 
1048 int
1049 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1050 			       void *buffer, void *metadata, uint64_t lba,
1051 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1052 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1053 {
1054 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1055 }
1056 
1057 int
1058 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1059 			       uint64_t lba, uint32_t lba_count,
1060 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1061 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1062 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1063 			       uint16_t apptag_mask, uint16_t apptag)
1064 {
1065 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1066 }
1067 
1068 int
1069 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1070 				uint64_t lba, uint32_t lba_count,
1071 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1072 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1073 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1074 				uint16_t apptag_mask, uint16_t apptag)
1075 {
1076 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1077 }
1078 
1079 static bool g_ut_readv_ext_called;
1080 int
1081 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1082 			   uint64_t lba, uint32_t lba_count,
1083 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1084 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1085 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1086 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1087 {
1088 	g_ut_readv_ext_called = true;
1089 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1090 }
1091 
1092 static bool g_ut_read_ext_called;
1093 int
1094 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1095 			  uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1096 			  struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1097 {
1098 	g_ut_read_ext_called = true;
1099 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1100 }
1101 
1102 static bool g_ut_writev_ext_called;
1103 int
1104 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1105 			    uint64_t lba, uint32_t lba_count,
1106 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1107 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1108 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1109 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1110 {
1111 	g_ut_writev_ext_called = true;
1112 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1113 }
1114 
1115 static bool g_ut_write_ext_called;
1116 int
1117 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1118 			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1119 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1120 {
1121 	g_ut_write_ext_called = true;
1122 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1123 }
1124 
1125 int
1126 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1127 				  uint64_t lba, uint32_t lba_count,
1128 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1129 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1130 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1131 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1132 {
1133 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1134 }
1135 
1136 int
1137 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1138 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1139 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1140 {
1141 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1142 }
1143 
1144 int
1145 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1146 			      uint64_t lba, uint32_t lba_count,
1147 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1148 			      uint32_t io_flags)
1149 {
1150 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1151 }
1152 
1153 int
1154 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1155 		      const struct spdk_nvme_scc_source_range *ranges,
1156 		      uint16_t num_ranges, uint64_t dest_lba,
1157 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1158 {
1159 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1160 }
1161 
1162 struct spdk_nvme_poll_group *
1163 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1164 {
1165 	struct spdk_nvme_poll_group *group;
1166 
1167 	group = calloc(1, sizeof(*group));
1168 	if (group == NULL) {
1169 		return NULL;
1170 	}
1171 
1172 	group->ctx = ctx;
1173 	if (table != NULL) {
1174 		group->accel_fn_table = *table;
1175 	}
1176 	TAILQ_INIT(&group->connected_qpairs);
1177 	TAILQ_INIT(&group->disconnected_qpairs);
1178 
1179 	return group;
1180 }
1181 
1182 int
1183 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1184 {
1185 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1186 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1187 		return -EBUSY;
1188 	}
1189 
1190 	free(group);
1191 
1192 	return 0;
1193 }
1194 
1195 spdk_nvme_qp_failure_reason
1196 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1197 {
1198 	return qpair->failure_reason;
1199 }
1200 
1201 bool
1202 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair)
1203 {
1204 	return qpair->is_connected;
1205 }
1206 
1207 int32_t
1208 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1209 				    uint32_t max_completions)
1210 {
1211 	struct ut_nvme_req *req, *tmp;
1212 	uint32_t num_completions = 0;
1213 
1214 	if (!qpair->is_connected) {
1215 		return -ENXIO;
1216 	}
1217 
1218 	qpair->in_completion_context = true;
1219 
1220 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1221 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1222 		qpair->num_outstanding_reqs--;
1223 
1224 		req->cb_fn(req->cb_arg, &req->cpl);
1225 
1226 		free(req);
1227 		num_completions++;
1228 	}
1229 
1230 	qpair->in_completion_context = false;
1231 	if (qpair->delete_after_completion_context) {
1232 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1233 	}
1234 
1235 	return num_completions;
1236 }
1237 
1238 int64_t
1239 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1240 		uint32_t completions_per_qpair,
1241 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1242 {
1243 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1244 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1245 
1246 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1247 
1248 	if (disconnected_qpair_cb == NULL) {
1249 		return -EINVAL;
1250 	}
1251 
1252 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1253 		disconnected_qpair_cb(qpair, group->ctx);
1254 	}
1255 
1256 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1257 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1258 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1259 			/* Bump the number of completions so this counts as "busy" */
1260 			num_completions++;
1261 			continue;
1262 		}
1263 
1264 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1265 				    completions_per_qpair);
1266 		if (local_completions < 0 && error_reason == 0) {
1267 			error_reason = local_completions;
1268 		} else {
1269 			num_completions += local_completions;
1270 			assert(num_completions >= 0);
1271 		}
1272 	}
1273 
1274 	return error_reason ? error_reason : num_completions;
1275 }
1276 
1277 int
1278 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1279 			 struct spdk_nvme_qpair *qpair)
1280 {
1281 	CU_ASSERT(!qpair->is_connected);
1282 
1283 	qpair->poll_group = group;
1284 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1285 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1286 
1287 	return 0;
1288 }
1289 
1290 int
1291 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1292 			    struct spdk_nvme_qpair *qpair)
1293 {
1294 	CU_ASSERT(!qpair->is_connected);
1295 
1296 	if (qpair->poll_group == NULL) {
1297 		return -ENOENT;
1298 	}
1299 
1300 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1301 
1302 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1303 
1304 	qpair->poll_group = NULL;
1305 	qpair->poll_group_tailq_head = NULL;
1306 
1307 	return 0;
1308 }
1309 
1310 int
1311 spdk_bdev_register(struct spdk_bdev *bdev)
1312 {
1313 	g_ut_registered_bdev = bdev;
1314 
1315 	return g_ut_register_bdev_status;
1316 }
1317 
1318 void
1319 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1320 {
1321 	int rc;
1322 
1323 	rc = bdev->fn_table->destruct(bdev->ctxt);
1324 
1325 	if (bdev == g_ut_registered_bdev) {
1326 		g_ut_registered_bdev = NULL;
1327 	}
1328 
1329 	if (rc <= 0 && cb_fn != NULL) {
1330 		cb_fn(cb_arg, rc);
1331 	}
1332 }
1333 
1334 int
1335 spdk_bdev_open_ext(const char *bdev_name, bool write,
1336 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1337 		   struct spdk_bdev_desc **desc)
1338 {
1339 	if (g_ut_registered_bdev == NULL ||
1340 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1341 		return -ENODEV;
1342 	}
1343 
1344 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1345 
1346 	return 0;
1347 }
1348 
1349 struct spdk_bdev *
1350 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1351 {
1352 	return (struct spdk_bdev *)desc;
1353 }
1354 
1355 int
1356 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1357 {
1358 	bdev->blockcnt = size;
1359 
1360 	return 0;
1361 }
1362 
1363 struct spdk_io_channel *
1364 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1365 {
1366 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1367 }
1368 
1369 struct spdk_thread *
1370 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
1371 {
1372 	return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io));
1373 }
1374 
1375 void
1376 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1377 {
1378 	bdev_io->internal.status = status;
1379 	bdev_io->internal.f.in_submit_request = false;
1380 }
1381 
1382 void
1383 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1384 {
1385 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1386 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1387 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1388 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1389 	} else {
1390 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1391 	}
1392 
1393 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1394 	bdev_io->internal.error.nvme.sct = sct;
1395 	bdev_io->internal.error.nvme.sc = sc;
1396 
1397 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1398 }
1399 
1400 void
1401 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1402 {
1403 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1404 
1405 	ut_bdev_io_set_buf(bdev_io);
1406 
1407 	cb(ch, bdev_io, true);
1408 }
1409 
1410 static void
1411 test_create_ctrlr(void)
1412 {
1413 	struct spdk_nvme_transport_id trid = {};
1414 	struct spdk_nvme_ctrlr ctrlr = {};
1415 	int rc;
1416 
1417 	ut_init_trid(&trid);
1418 
1419 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1420 	CU_ASSERT(rc == 0);
1421 
1422 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1423 
1424 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1425 	CU_ASSERT(rc == 0);
1426 
1427 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1428 
1429 	poll_threads();
1430 	spdk_delay_us(1000);
1431 	poll_threads();
1432 
1433 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1434 }
1435 
1436 static void
1437 ut_check_hotplug_on_reset(void *cb_arg, int rc)
1438 {
1439 	bool *detect_remove = cb_arg;
1440 
1441 	CU_ASSERT(rc != 0);
1442 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1443 
1444 	*detect_remove = true;
1445 }
1446 
1447 static void
1448 test_reset_ctrlr(void)
1449 {
1450 	struct spdk_nvme_transport_id trid = {};
1451 	struct spdk_nvme_ctrlr ctrlr = {};
1452 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1453 	struct nvme_path_id *curr_trid;
1454 	struct spdk_io_channel *ch1, *ch2;
1455 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1456 	bool detect_remove;
1457 	int rc;
1458 
1459 	ut_init_trid(&trid);
1460 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1461 
1462 	set_thread(0);
1463 
1464 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1465 	CU_ASSERT(rc == 0);
1466 
1467 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1468 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1469 
1470 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1471 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1472 
1473 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1474 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1475 
1476 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1477 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1478 
1479 	set_thread(1);
1480 
1481 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1482 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1483 
1484 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1485 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1486 
1487 	/* Reset starts from thread 1. */
1488 	set_thread(1);
1489 
1490 	/* Case 1: ctrlr is already being destructed. */
1491 	nvme_ctrlr->destruct = true;
1492 
1493 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1494 	CU_ASSERT(rc == -ENXIO);
1495 
1496 	/* Case 2: reset is in progress. */
1497 	nvme_ctrlr->destruct = false;
1498 	nvme_ctrlr->resetting = true;
1499 
1500 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1501 	CU_ASSERT(rc == -EBUSY);
1502 
1503 	/* Case 3: reset completes successfully. */
1504 	nvme_ctrlr->resetting = false;
1505 	curr_trid->last_failed_tsc = spdk_get_ticks();
1506 	ctrlr.is_failed = true;
1507 
1508 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1509 	CU_ASSERT(rc == 0);
1510 	CU_ASSERT(nvme_ctrlr->resetting == true);
1511 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1512 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1513 
1514 	poll_thread_times(0, 3);
1515 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1516 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1517 
1518 	poll_thread_times(0, 1);
1519 	poll_thread_times(1, 1);
1520 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1521 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1522 	CU_ASSERT(ctrlr.is_failed == true);
1523 
1524 	poll_thread_times(1, 1);
1525 	poll_thread_times(0, 1);
1526 	CU_ASSERT(ctrlr.is_failed == false);
1527 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1528 
1529 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1530 	poll_thread_times(0, 2);
1531 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1532 
1533 	poll_thread_times(0, 1);
1534 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1535 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1536 
1537 	poll_thread_times(1, 1);
1538 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1539 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1540 	CU_ASSERT(nvme_ctrlr->resetting == true);
1541 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1542 
1543 	poll_thread_times(0, 2);
1544 	CU_ASSERT(nvme_ctrlr->resetting == true);
1545 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1546 	poll_thread_times(1, 1);
1547 	CU_ASSERT(nvme_ctrlr->resetting == true);
1548 	poll_thread_times(0, 1);
1549 	CU_ASSERT(nvme_ctrlr->resetting == false);
1550 
1551 	/* Case 4: ctrlr is already removed. */
1552 	ctrlr.is_removed = true;
1553 
1554 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1555 	CU_ASSERT(rc == 0);
1556 
1557 	detect_remove = false;
1558 	nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset;
1559 	nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove;
1560 
1561 	poll_threads();
1562 
1563 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
1564 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
1565 	CU_ASSERT(detect_remove == true);
1566 
1567 	ctrlr.is_removed = false;
1568 
1569 	spdk_put_io_channel(ch2);
1570 
1571 	set_thread(0);
1572 
1573 	spdk_put_io_channel(ch1);
1574 
1575 	poll_threads();
1576 
1577 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1578 	CU_ASSERT(rc == 0);
1579 
1580 	poll_threads();
1581 	spdk_delay_us(1000);
1582 	poll_threads();
1583 
1584 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1585 }
1586 
1587 static void
1588 test_race_between_reset_and_destruct_ctrlr(void)
1589 {
1590 	struct spdk_nvme_transport_id trid = {};
1591 	struct spdk_nvme_ctrlr ctrlr = {};
1592 	struct nvme_ctrlr *nvme_ctrlr;
1593 	struct spdk_io_channel *ch1, *ch2;
1594 	int rc;
1595 
1596 	ut_init_trid(&trid);
1597 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1598 
1599 	set_thread(0);
1600 
1601 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1602 	CU_ASSERT(rc == 0);
1603 
1604 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1605 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1606 
1607 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1608 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1609 
1610 	set_thread(1);
1611 
1612 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1613 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1614 
1615 	/* Reset starts from thread 1. */
1616 	set_thread(1);
1617 
1618 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1619 	CU_ASSERT(rc == 0);
1620 	CU_ASSERT(nvme_ctrlr->resetting == true);
1621 
1622 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1623 	set_thread(0);
1624 
1625 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1626 	CU_ASSERT(rc == 0);
1627 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1628 	CU_ASSERT(nvme_ctrlr->destruct == true);
1629 	CU_ASSERT(nvme_ctrlr->resetting == true);
1630 
1631 	poll_threads();
1632 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1633 	poll_threads();
1634 
1635 	/* Reset completed but ctrlr is not still destructed yet. */
1636 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1637 	CU_ASSERT(nvme_ctrlr->destruct == true);
1638 	CU_ASSERT(nvme_ctrlr->resetting == false);
1639 
1640 	/* New reset request is rejected. */
1641 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1642 	CU_ASSERT(rc == -ENXIO);
1643 
1644 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1645 	 * However there are two channels and destruct is not completed yet.
1646 	 */
1647 	poll_threads();
1648 
1649 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1650 
1651 	set_thread(0);
1652 
1653 	spdk_put_io_channel(ch1);
1654 
1655 	set_thread(1);
1656 
1657 	spdk_put_io_channel(ch2);
1658 
1659 	poll_threads();
1660 	spdk_delay_us(1000);
1661 	poll_threads();
1662 
1663 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1664 }
1665 
1666 static void
1667 test_failover_ctrlr(void)
1668 {
1669 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1670 	struct spdk_nvme_ctrlr ctrlr = {};
1671 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1672 	struct nvme_path_id *curr_trid, *next_trid;
1673 	struct spdk_io_channel *ch1, *ch2;
1674 	int rc;
1675 
1676 	ut_init_trid(&trid1);
1677 	ut_init_trid2(&trid2);
1678 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1679 
1680 	set_thread(0);
1681 
1682 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1683 	CU_ASSERT(rc == 0);
1684 
1685 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1686 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1687 
1688 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1689 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1690 
1691 	set_thread(1);
1692 
1693 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1694 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1695 
1696 	/* First, test one trid case. */
1697 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1698 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1699 
1700 	/* Failover starts from thread 1. */
1701 	set_thread(1);
1702 
1703 	/* Case 1: ctrlr is already being destructed. */
1704 	nvme_ctrlr->destruct = true;
1705 
1706 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1707 	CU_ASSERT(rc == -ENXIO);
1708 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1709 
1710 	/* Case 2: reset is in progress. */
1711 	nvme_ctrlr->destruct = false;
1712 	nvme_ctrlr->resetting = true;
1713 
1714 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1715 	CU_ASSERT(rc == -EINPROGRESS);
1716 
1717 	/* Case 3: reset completes successfully. */
1718 	nvme_ctrlr->resetting = false;
1719 
1720 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1721 	CU_ASSERT(rc == 0);
1722 
1723 	CU_ASSERT(nvme_ctrlr->resetting == true);
1724 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1725 
1726 	poll_threads();
1727 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1728 	poll_threads();
1729 
1730 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1731 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1732 
1733 	CU_ASSERT(nvme_ctrlr->resetting == false);
1734 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1735 
1736 	set_thread(0);
1737 
1738 	/* Second, test two trids case. */
1739 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1740 	CU_ASSERT(rc == 0);
1741 
1742 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1743 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1744 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1745 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1746 
1747 	/* Failover starts from thread 1. */
1748 	set_thread(1);
1749 
1750 	/* Case 4: reset is in progress. */
1751 	nvme_ctrlr->resetting = true;
1752 
1753 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1754 	CU_ASSERT(rc == -EINPROGRESS);
1755 
1756 	/* Case 5: failover completes successfully. */
1757 	nvme_ctrlr->resetting = false;
1758 
1759 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1760 	CU_ASSERT(rc == 0);
1761 
1762 	CU_ASSERT(nvme_ctrlr->resetting == true);
1763 
1764 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1765 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1766 	CU_ASSERT(next_trid != curr_trid);
1767 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1768 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1769 
1770 	poll_threads();
1771 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1772 	poll_threads();
1773 
1774 	CU_ASSERT(nvme_ctrlr->resetting == false);
1775 
1776 	spdk_put_io_channel(ch2);
1777 
1778 	set_thread(0);
1779 
1780 	spdk_put_io_channel(ch1);
1781 
1782 	poll_threads();
1783 
1784 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1785 	CU_ASSERT(rc == 0);
1786 
1787 	poll_threads();
1788 	spdk_delay_us(1000);
1789 	poll_threads();
1790 
1791 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1792 }
1793 
1794 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1795  *
1796  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1797  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1798  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1799  * have been active, i.e., the head of the list until the failover completed.
1800  * However trid3 was inserted to the head of the list by mistake.
1801  *
1802  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1803  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1804  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1805  * may be executed repeatedly before failover is executed. Hence this bug is real.
1806  *
1807  * The following test verifies the fix.
1808  */
1809 static void
1810 test_race_between_failover_and_add_secondary_trid(void)
1811 {
1812 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1813 	struct spdk_nvme_ctrlr ctrlr = {};
1814 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1815 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1816 	struct spdk_io_channel *ch1, *ch2;
1817 	int rc;
1818 
1819 	ut_init_trid(&trid1);
1820 	ut_init_trid2(&trid2);
1821 	ut_init_trid3(&trid3);
1822 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1823 
1824 	set_thread(0);
1825 
1826 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1827 	CU_ASSERT(rc == 0);
1828 
1829 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1830 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1831 
1832 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1833 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1834 
1835 	set_thread(1);
1836 
1837 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1838 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1839 
1840 	set_thread(0);
1841 
1842 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1843 	CU_ASSERT(rc == 0);
1844 
1845 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1846 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1847 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1848 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1849 	path_id2 = TAILQ_NEXT(path_id1, link);
1850 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1851 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1852 
1853 	ctrlr.fail_reset = true;
1854 
1855 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1856 	CU_ASSERT(rc == 0);
1857 
1858 	poll_threads();
1859 
1860 	CU_ASSERT(path_id1->last_failed_tsc != 0);
1861 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1862 
1863 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1864 	CU_ASSERT(rc == 0);
1865 
1866 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1867 	CU_ASSERT(rc == 0);
1868 
1869 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1870 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1871 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1872 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1873 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1874 	path_id3 = TAILQ_NEXT(path_id2, link);
1875 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1876 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1877 
1878 	poll_threads();
1879 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1880 	poll_threads();
1881 
1882 	spdk_put_io_channel(ch1);
1883 
1884 	set_thread(1);
1885 
1886 	spdk_put_io_channel(ch2);
1887 
1888 	poll_threads();
1889 
1890 	set_thread(0);
1891 
1892 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1893 	CU_ASSERT(rc == 0);
1894 
1895 	poll_threads();
1896 	spdk_delay_us(1000);
1897 	poll_threads();
1898 
1899 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1900 }
1901 
1902 static void
1903 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1904 {
1905 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1906 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1907 }
1908 
1909 static void
1910 test_pending_reset(void)
1911 {
1912 	struct spdk_nvme_transport_id trid = {};
1913 	struct spdk_nvme_ctrlr *ctrlr;
1914 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
1915 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1916 	const int STRING_SIZE = 32;
1917 	const char *attached_names[STRING_SIZE];
1918 	struct nvme_bdev *bdev;
1919 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1920 	struct spdk_io_channel *ch1, *ch2;
1921 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1922 	struct nvme_io_path *io_path1, *io_path2;
1923 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1924 	int rc;
1925 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
1926 
1927 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
1928 	bdev_opts.multipath = false;
1929 
1930 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1931 	ut_init_trid(&trid);
1932 
1933 	set_thread(0);
1934 
1935 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1936 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1937 
1938 	g_ut_attach_ctrlr_status = 0;
1939 	g_ut_attach_bdev_count = 1;
1940 
1941 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1942 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
1943 	CU_ASSERT(rc == 0);
1944 
1945 	spdk_delay_us(1000);
1946 	poll_threads();
1947 
1948 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1949 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1950 
1951 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1952 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1953 
1954 	ch1 = spdk_get_io_channel(bdev);
1955 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1956 
1957 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1958 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1959 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1960 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1961 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1962 
1963 	set_thread(1);
1964 
1965 	ch2 = spdk_get_io_channel(bdev);
1966 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1967 
1968 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1969 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1970 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1971 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1972 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1973 
1974 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1975 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1976 
1977 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1978 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1979 
1980 	/* The first reset request is submitted on thread 1, and the second reset request
1981 	 * is submitted on thread 0 while processing the first request.
1982 	 */
1983 	bdev_nvme_submit_request(ch2, first_bdev_io);
1984 
1985 	poll_thread_times(0, 1);
1986 	poll_thread_times(1, 2);
1987 
1988 	CU_ASSERT(nvme_ctrlr->resetting == true);
1989 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1990 
1991 	set_thread(0);
1992 
1993 	bdev_nvme_submit_request(ch1, second_bdev_io);
1994 
1995 	poll_thread_times(0, 1);
1996 	poll_thread_times(1, 1);
1997 	poll_thread_times(0, 2);
1998 	poll_thread_times(1, 1);
1999 	poll_thread_times(0, 1);
2000 
2001 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io);
2002 
2003 	poll_threads();
2004 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2005 	poll_threads();
2006 
2007 	CU_ASSERT(nvme_ctrlr->resetting == false);
2008 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2009 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2010 
2011 	/* The first reset request is submitted on thread 1, and the second reset request
2012 	 * is submitted on thread 0 while processing the first request.
2013 	 *
2014 	 * The difference from the above scenario is that the controller is removed while
2015 	 * processing the first request. Hence both reset requests should fail.
2016 	 */
2017 	set_thread(1);
2018 
2019 	bdev_nvme_submit_request(ch2, first_bdev_io);
2020 
2021 	poll_thread_times(0, 1);
2022 	poll_thread_times(1, 2);
2023 
2024 	CU_ASSERT(nvme_ctrlr->resetting == true);
2025 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
2026 
2027 	set_thread(0);
2028 
2029 	bdev_nvme_submit_request(ch1, second_bdev_io);
2030 
2031 	poll_thread_times(0, 1);
2032 	poll_thread_times(1, 1);
2033 	poll_thread_times(0, 2);
2034 	poll_thread_times(1, 1);
2035 	poll_thread_times(0, 1);
2036 
2037 	CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io);
2038 
2039 	ctrlr->fail_reset = true;
2040 
2041 	poll_threads();
2042 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2043 	poll_threads();
2044 
2045 	CU_ASSERT(nvme_ctrlr->resetting == false);
2046 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2047 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2048 
2049 	spdk_put_io_channel(ch1);
2050 
2051 	set_thread(1);
2052 
2053 	spdk_put_io_channel(ch2);
2054 
2055 	poll_threads();
2056 
2057 	set_thread(0);
2058 
2059 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2060 	CU_ASSERT(rc == 0);
2061 
2062 	poll_threads();
2063 	spdk_delay_us(1000);
2064 	poll_threads();
2065 
2066 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2067 
2068 	free(first_bdev_io);
2069 	free(second_bdev_io);
2070 }
2071 
2072 static void
2073 test_attach_ctrlr(void)
2074 {
2075 	struct spdk_nvme_transport_id trid = {};
2076 	struct spdk_nvme_ctrlr *ctrlr;
2077 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2078 	struct nvme_ctrlr *nvme_ctrlr;
2079 	const int STRING_SIZE = 32;
2080 	const char *attached_names[STRING_SIZE];
2081 	struct nvme_bdev *nbdev;
2082 	int rc;
2083 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2084 
2085 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2086 	bdev_opts.multipath = false;
2087 
2088 	set_thread(0);
2089 
2090 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2091 	ut_init_trid(&trid);
2092 
2093 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
2094 	 * by probe polling.
2095 	 */
2096 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2097 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2098 
2099 	ctrlr->is_failed = true;
2100 	g_ut_attach_ctrlr_status = -EIO;
2101 	g_ut_attach_bdev_count = 0;
2102 
2103 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2104 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2105 	CU_ASSERT(rc == 0);
2106 
2107 	spdk_delay_us(1000);
2108 	poll_threads();
2109 
2110 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2111 
2112 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2113 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2114 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2115 
2116 	g_ut_attach_ctrlr_status = 0;
2117 
2118 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2119 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2120 	CU_ASSERT(rc == 0);
2121 
2122 	spdk_delay_us(1000);
2123 	poll_threads();
2124 
2125 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2126 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2127 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2128 
2129 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2130 	CU_ASSERT(rc == 0);
2131 
2132 	poll_threads();
2133 	spdk_delay_us(1000);
2134 	poll_threads();
2135 
2136 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2137 
2138 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2139 	 * one nvme_bdev is created.
2140 	 */
2141 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2142 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2143 
2144 	g_ut_attach_bdev_count = 1;
2145 
2146 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2147 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2148 	CU_ASSERT(rc == 0);
2149 
2150 	spdk_delay_us(1000);
2151 	poll_threads();
2152 
2153 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2154 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2155 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2156 
2157 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2158 	attached_names[0] = NULL;
2159 
2160 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2161 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2162 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2163 
2164 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2165 	CU_ASSERT(rc == 0);
2166 
2167 	poll_threads();
2168 	spdk_delay_us(1000);
2169 	poll_threads();
2170 
2171 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2172 
2173 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2174 	 * created because creating one nvme_bdev failed.
2175 	 */
2176 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2177 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2178 
2179 	g_ut_register_bdev_status = -EINVAL;
2180 	g_ut_attach_bdev_count = 0;
2181 
2182 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2183 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2184 	CU_ASSERT(rc == 0);
2185 
2186 	spdk_delay_us(1000);
2187 	poll_threads();
2188 
2189 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2190 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2191 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2192 
2193 	CU_ASSERT(attached_names[0] == NULL);
2194 
2195 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2196 	CU_ASSERT(rc == 0);
2197 
2198 	poll_threads();
2199 	spdk_delay_us(1000);
2200 	poll_threads();
2201 
2202 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2203 
2204 	g_ut_register_bdev_status = 0;
2205 }
2206 
2207 static void
2208 test_aer_cb(void)
2209 {
2210 	struct spdk_nvme_transport_id trid = {};
2211 	struct spdk_nvme_ctrlr *ctrlr;
2212 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2213 	struct nvme_ctrlr *nvme_ctrlr;
2214 	struct nvme_bdev *bdev;
2215 	const int STRING_SIZE = 32;
2216 	const char *attached_names[STRING_SIZE];
2217 	union spdk_nvme_async_event_completion event = {};
2218 	struct spdk_nvme_cpl cpl = {};
2219 	int rc;
2220 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2221 
2222 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2223 	bdev_opts.multipath = false;
2224 
2225 	set_thread(0);
2226 
2227 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2228 	ut_init_trid(&trid);
2229 
2230 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2231 	 * namespaces are populated.
2232 	 */
2233 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2234 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2235 
2236 	ctrlr->ns[0].is_active = false;
2237 
2238 	g_ut_attach_ctrlr_status = 0;
2239 	g_ut_attach_bdev_count = 3;
2240 
2241 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2242 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2243 	CU_ASSERT(rc == 0);
2244 
2245 	spdk_delay_us(1000);
2246 	poll_threads();
2247 
2248 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2249 	poll_threads();
2250 
2251 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2252 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2253 
2254 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2255 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2256 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2257 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2258 
2259 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2260 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2261 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2262 
2263 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2264 	 * change the size of the 4th namespace.
2265 	 */
2266 	ctrlr->ns[0].is_active = true;
2267 	ctrlr->ns[2].is_active = false;
2268 	ctrlr->nsdata[3].nsze = 2048;
2269 
2270 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2271 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2272 	cpl.cdw0 = event.raw;
2273 
2274 	aer_cb(nvme_ctrlr, &cpl);
2275 
2276 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2277 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2278 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2279 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2280 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2281 
2282 	/* Change ANA state of active namespaces. */
2283 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2284 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2285 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2286 
2287 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2288 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2289 	cpl.cdw0 = event.raw;
2290 
2291 	aer_cb(nvme_ctrlr, &cpl);
2292 
2293 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2294 	poll_threads();
2295 
2296 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2297 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2298 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2299 
2300 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2301 	CU_ASSERT(rc == 0);
2302 
2303 	poll_threads();
2304 	spdk_delay_us(1000);
2305 	poll_threads();
2306 
2307 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2308 }
2309 
2310 static void
2311 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2312 			enum spdk_bdev_io_type io_type)
2313 {
2314 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2315 	struct nvme_io_path *io_path;
2316 	struct spdk_nvme_qpair *qpair;
2317 
2318 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2319 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2320 	qpair = io_path->qpair->qpair;
2321 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2322 
2323 	bdev_io->type = io_type;
2324 	bdev_io->internal.f.in_submit_request = true;
2325 
2326 	bdev_nvme_submit_request(ch, bdev_io);
2327 
2328 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2329 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2330 
2331 	poll_threads();
2332 
2333 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2334 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2335 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2336 }
2337 
2338 static void
2339 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2340 		   enum spdk_bdev_io_type io_type)
2341 {
2342 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2343 	struct nvme_io_path *io_path;
2344 	struct spdk_nvme_qpair *qpair;
2345 
2346 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2347 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2348 	qpair = io_path->qpair->qpair;
2349 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2350 
2351 	bdev_io->type = io_type;
2352 	bdev_io->internal.f.in_submit_request = true;
2353 
2354 	bdev_nvme_submit_request(ch, bdev_io);
2355 
2356 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2357 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2358 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2359 }
2360 
2361 static void
2362 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2363 {
2364 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2365 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2366 	struct ut_nvme_req *req;
2367 	struct nvme_io_path *io_path;
2368 	struct spdk_nvme_qpair *qpair;
2369 
2370 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2371 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2372 	qpair = io_path->qpair->qpair;
2373 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2374 
2375 	/* Only compare and write now. */
2376 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2377 	bdev_io->internal.f.in_submit_request = true;
2378 
2379 	bdev_nvme_submit_request(ch, bdev_io);
2380 
2381 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2382 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2383 	CU_ASSERT(bio->first_fused_submitted == true);
2384 
2385 	/* First outstanding request is compare operation. */
2386 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2387 	SPDK_CU_ASSERT_FATAL(req != NULL);
2388 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2389 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2390 
2391 	poll_threads();
2392 
2393 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2394 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2395 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2396 }
2397 
2398 static void
2399 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2400 			 struct spdk_nvme_ctrlr *ctrlr)
2401 {
2402 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2403 	bdev_io->internal.f.in_submit_request = true;
2404 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2405 
2406 	bdev_nvme_submit_request(ch, bdev_io);
2407 
2408 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2409 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2410 
2411 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2412 	poll_thread_times(1, 1);
2413 
2414 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
2415 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2416 
2417 	poll_thread_times(0, 1);
2418 
2419 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
2420 }
2421 
2422 static void
2423 test_submit_nvme_cmd(void)
2424 {
2425 	struct spdk_nvme_transport_id trid = {};
2426 	struct spdk_nvme_ctrlr *ctrlr;
2427 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2428 	struct nvme_ctrlr *nvme_ctrlr;
2429 	const int STRING_SIZE = 32;
2430 	const char *attached_names[STRING_SIZE];
2431 	struct nvme_bdev *bdev;
2432 	struct spdk_bdev_io *bdev_io;
2433 	struct spdk_io_channel *ch;
2434 	int rc;
2435 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2436 
2437 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2438 	bdev_opts.multipath = false;
2439 
2440 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2441 	ut_init_trid(&trid);
2442 
2443 	set_thread(1);
2444 
2445 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2446 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2447 
2448 	g_ut_attach_ctrlr_status = 0;
2449 	g_ut_attach_bdev_count = 1;
2450 
2451 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2452 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2453 	CU_ASSERT(rc == 0);
2454 
2455 	spdk_delay_us(1000);
2456 	poll_threads();
2457 
2458 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2459 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2460 
2461 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2462 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2463 
2464 	set_thread(0);
2465 
2466 	ch = spdk_get_io_channel(bdev);
2467 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2468 
2469 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2470 
2471 	bdev_io->u.bdev.iovs = NULL;
2472 
2473 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2474 
2475 	ut_bdev_io_set_buf(bdev_io);
2476 
2477 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2478 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2479 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2480 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2481 
2482 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2483 
2484 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2485 
2486 	/* Verify that ext NVME API is called when data is described by memory domain  */
2487 	g_ut_read_ext_called = false;
2488 	bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef;
2489 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2490 	CU_ASSERT(g_ut_read_ext_called == true);
2491 	g_ut_read_ext_called = false;
2492 	bdev_io->u.bdev.memory_domain = NULL;
2493 
2494 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2495 
2496 	free(bdev_io);
2497 
2498 	spdk_put_io_channel(ch);
2499 
2500 	poll_threads();
2501 
2502 	set_thread(1);
2503 
2504 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2505 	CU_ASSERT(rc == 0);
2506 
2507 	poll_threads();
2508 	spdk_delay_us(1000);
2509 	poll_threads();
2510 
2511 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2512 }
2513 
2514 static void
2515 test_add_remove_trid(void)
2516 {
2517 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2518 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2519 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
2520 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2521 	const int STRING_SIZE = 32;
2522 	const char *attached_names[STRING_SIZE];
2523 	struct nvme_path_id *ctrid;
2524 	int rc;
2525 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
2526 
2527 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
2528 	bdev_opts.multipath = false;
2529 
2530 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2531 	ut_init_trid(&path1.trid);
2532 	ut_init_trid2(&path2.trid);
2533 	ut_init_trid3(&path3.trid);
2534 
2535 	set_thread(0);
2536 
2537 	g_ut_attach_ctrlr_status = 0;
2538 	g_ut_attach_bdev_count = 0;
2539 
2540 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2541 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2542 
2543 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2544 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2545 	CU_ASSERT(rc == 0);
2546 
2547 	spdk_delay_us(1000);
2548 	poll_threads();
2549 
2550 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2551 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2552 
2553 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2554 
2555 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2556 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2557 
2558 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2559 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2560 	CU_ASSERT(rc == 0);
2561 
2562 	spdk_delay_us(1000);
2563 	poll_threads();
2564 
2565 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2566 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2567 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2568 			break;
2569 		}
2570 	}
2571 	CU_ASSERT(ctrid != NULL);
2572 
2573 	/* trid3 is not in the registered list. */
2574 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2575 	CU_ASSERT(rc == -ENXIO);
2576 
2577 	/* trid2 is not used, and simply removed. */
2578 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2579 	CU_ASSERT(rc == 0);
2580 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2581 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2582 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2583 	}
2584 
2585 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2586 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2587 
2588 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2589 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2590 	CU_ASSERT(rc == 0);
2591 
2592 	spdk_delay_us(1000);
2593 	poll_threads();
2594 
2595 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2596 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2597 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2598 			break;
2599 		}
2600 	}
2601 	CU_ASSERT(ctrid != NULL);
2602 
2603 	/* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully.
2604 	 * If we add path2 again, path2 should be inserted between path1 and path3.
2605 	 * Then, we remove path2. It is not used, and simply removed.
2606 	 */
2607 	ctrid->last_failed_tsc = spdk_get_ticks() + 1;
2608 
2609 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2610 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2611 
2612 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2613 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2614 	CU_ASSERT(rc == 0);
2615 
2616 	spdk_delay_us(1000);
2617 	poll_threads();
2618 
2619 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2620 
2621 	ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link);
2622 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2623 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0);
2624 
2625 	ctrid = TAILQ_NEXT(ctrid, link);
2626 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2627 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0);
2628 
2629 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2630 	CU_ASSERT(rc == 0);
2631 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2632 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2633 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2634 	}
2635 
2636 	/* path1 is currently used and path3 is an alternative path.
2637 	 * If we remove path1, path is changed to path3.
2638 	 */
2639 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
2640 	CU_ASSERT(rc == 0);
2641 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2642 	CU_ASSERT(nvme_ctrlr->resetting == true);
2643 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2644 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2645 	}
2646 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2647 
2648 	poll_threads();
2649 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2650 	poll_threads();
2651 
2652 	CU_ASSERT(nvme_ctrlr->resetting == false);
2653 
2654 	/* path3 is the current and only path. If we remove path3, the corresponding
2655 	 * nvme_ctrlr is removed.
2656 	 */
2657 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2658 	CU_ASSERT(rc == 0);
2659 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2660 
2661 	poll_threads();
2662 	spdk_delay_us(1000);
2663 	poll_threads();
2664 
2665 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2666 
2667 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2668 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2669 
2670 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2671 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2672 	CU_ASSERT(rc == 0);
2673 
2674 	spdk_delay_us(1000);
2675 	poll_threads();
2676 
2677 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2678 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2679 
2680 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2681 
2682 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2683 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2684 
2685 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2686 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
2687 	CU_ASSERT(rc == 0);
2688 
2689 	spdk_delay_us(1000);
2690 	poll_threads();
2691 
2692 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2693 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2694 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2695 			break;
2696 		}
2697 	}
2698 	CU_ASSERT(ctrid != NULL);
2699 
2700 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2701 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2702 	CU_ASSERT(rc == 0);
2703 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2704 
2705 	poll_threads();
2706 	spdk_delay_us(1000);
2707 	poll_threads();
2708 
2709 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2710 }
2711 
2712 static void
2713 test_abort(void)
2714 {
2715 	struct spdk_nvme_transport_id trid = {};
2716 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
2717 	struct spdk_nvme_ctrlr *ctrlr;
2718 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
2719 	struct nvme_ctrlr *nvme_ctrlr;
2720 	const int STRING_SIZE = 32;
2721 	const char *attached_names[STRING_SIZE];
2722 	struct nvme_bdev *bdev;
2723 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2724 	struct spdk_io_channel *ch1, *ch2;
2725 	struct nvme_bdev_channel *nbdev_ch1;
2726 	struct nvme_io_path *io_path1;
2727 	struct nvme_qpair *nvme_qpair1;
2728 	int rc;
2729 
2730 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2731 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2732 	 * are submitted on thread 1. Both should succeed.
2733 	 */
2734 
2735 	ut_init_trid(&trid);
2736 
2737 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2738 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2739 
2740 	g_ut_attach_ctrlr_status = 0;
2741 	g_ut_attach_bdev_count = 1;
2742 
2743 	set_thread(1);
2744 
2745 	opts.ctrlr_loss_timeout_sec = -1;
2746 	opts.reconnect_delay_sec = 1;
2747 	opts.multipath = false;
2748 
2749 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2750 				   attach_ctrlr_done, NULL, &dopts, &opts);
2751 	CU_ASSERT(rc == 0);
2752 
2753 	spdk_delay_us(1000);
2754 	poll_threads();
2755 
2756 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2757 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2758 
2759 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2760 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2761 
2762 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2763 	ut_bdev_io_set_buf(write_io);
2764 
2765 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2766 	ut_bdev_io_set_buf(fuse_io);
2767 
2768 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2769 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2770 
2771 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2772 
2773 	set_thread(0);
2774 
2775 	ch1 = spdk_get_io_channel(bdev);
2776 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2777 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2778 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2779 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2780 	nvme_qpair1 = io_path1->qpair;
2781 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2782 
2783 	set_thread(1);
2784 
2785 	ch2 = spdk_get_io_channel(bdev);
2786 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2787 
2788 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2789 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2790 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2791 
2792 	/* Aborting the already completed request should fail. */
2793 	write_io->internal.f.in_submit_request = true;
2794 	bdev_nvme_submit_request(ch1, write_io);
2795 	poll_threads();
2796 
2797 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2798 
2799 	abort_io->u.abort.bio_to_abort = write_io;
2800 	abort_io->internal.f.in_submit_request = true;
2801 
2802 	bdev_nvme_submit_request(ch1, abort_io);
2803 
2804 	poll_threads();
2805 
2806 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2807 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2808 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2809 
2810 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2811 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2812 
2813 	admin_io->internal.f.in_submit_request = true;
2814 	bdev_nvme_submit_request(ch1, admin_io);
2815 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2816 	poll_threads();
2817 
2818 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2819 
2820 	abort_io->u.abort.bio_to_abort = admin_io;
2821 	abort_io->internal.f.in_submit_request = true;
2822 
2823 	bdev_nvme_submit_request(ch2, abort_io);
2824 
2825 	poll_threads();
2826 
2827 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2828 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2829 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2830 
2831 	/* Aborting the write request should succeed. */
2832 	write_io->internal.f.in_submit_request = true;
2833 	bdev_nvme_submit_request(ch1, write_io);
2834 
2835 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2836 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2837 
2838 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2839 	abort_io->u.abort.bio_to_abort = write_io;
2840 	abort_io->internal.f.in_submit_request = true;
2841 
2842 	bdev_nvme_submit_request(ch1, abort_io);
2843 
2844 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2845 	poll_threads();
2846 
2847 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2848 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2849 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2850 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2851 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2852 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2853 
2854 	/* Aborting the fuse request should succeed. */
2855 	fuse_io->internal.f.in_submit_request = true;
2856 	bdev_nvme_submit_request(ch1, fuse_io);
2857 
2858 	CU_ASSERT(fuse_io->internal.f.in_submit_request == true);
2859 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2860 
2861 	abort_io->u.abort.bio_to_abort = fuse_io;
2862 	abort_io->internal.f.in_submit_request = true;
2863 
2864 	bdev_nvme_submit_request(ch1, abort_io);
2865 
2866 	spdk_delay_us(10000);
2867 	poll_threads();
2868 
2869 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2870 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2871 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2872 	CU_ASSERT(fuse_io->internal.f.in_submit_request == false);
2873 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2874 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2875 
2876 	/* Aborting the admin request should succeed. */
2877 	admin_io->internal.f.in_submit_request = true;
2878 	bdev_nvme_submit_request(ch1, admin_io);
2879 
2880 	CU_ASSERT(admin_io->internal.f.in_submit_request == true);
2881 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2882 
2883 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2884 	abort_io->u.abort.bio_to_abort = admin_io;
2885 	abort_io->internal.f.in_submit_request = true;
2886 
2887 	bdev_nvme_submit_request(ch2, abort_io);
2888 
2889 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2890 	poll_threads();
2891 
2892 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2893 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2894 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2895 	CU_ASSERT(admin_io->internal.f.in_submit_request == false);
2896 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2897 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2898 
2899 	set_thread(0);
2900 
2901 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2902 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2903 	 * while resetting the nvme_ctrlr.
2904 	 */
2905 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2906 
2907 	poll_thread_times(0, 3);
2908 
2909 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2910 	CU_ASSERT(nvme_ctrlr->resetting == true);
2911 
2912 	write_io->internal.f.in_submit_request = true;
2913 
2914 	bdev_nvme_submit_request(ch1, write_io);
2915 
2916 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
2917 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
2918 
2919 	/* Aborting the queued write request should succeed immediately. */
2920 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2921 	abort_io->u.abort.bio_to_abort = write_io;
2922 	abort_io->internal.f.in_submit_request = true;
2923 
2924 	bdev_nvme_submit_request(ch1, abort_io);
2925 
2926 	CU_ASSERT(abort_io->internal.f.in_submit_request == false);
2927 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2928 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2929 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
2930 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2931 
2932 	poll_threads();
2933 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2934 	poll_threads();
2935 
2936 	spdk_put_io_channel(ch1);
2937 
2938 	set_thread(1);
2939 
2940 	spdk_put_io_channel(ch2);
2941 
2942 	poll_threads();
2943 
2944 	free(write_io);
2945 	free(fuse_io);
2946 	free(admin_io);
2947 	free(abort_io);
2948 
2949 	set_thread(1);
2950 
2951 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2952 	CU_ASSERT(rc == 0);
2953 
2954 	poll_threads();
2955 	spdk_delay_us(1000);
2956 	poll_threads();
2957 
2958 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2959 }
2960 
2961 static void
2962 test_get_io_qpair(void)
2963 {
2964 	struct spdk_nvme_transport_id trid = {};
2965 	struct spdk_nvme_ctrlr ctrlr = {};
2966 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2967 	struct spdk_io_channel *ch;
2968 	struct nvme_ctrlr_channel *ctrlr_ch;
2969 	struct spdk_nvme_qpair *qpair;
2970 	int rc;
2971 
2972 	ut_init_trid(&trid);
2973 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2974 
2975 	set_thread(0);
2976 
2977 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2978 	CU_ASSERT(rc == 0);
2979 
2980 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2981 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2982 
2983 	ch = spdk_get_io_channel(nvme_ctrlr);
2984 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2985 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2986 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2987 
2988 	qpair = bdev_nvme_get_io_qpair(ch);
2989 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2990 
2991 	spdk_put_io_channel(ch);
2992 
2993 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2994 	CU_ASSERT(rc == 0);
2995 
2996 	poll_threads();
2997 	spdk_delay_us(1000);
2998 	poll_threads();
2999 
3000 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3001 }
3002 
3003 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
3004  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
3005  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
3006  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
3007  */
3008 static void
3009 test_bdev_unregister(void)
3010 {
3011 	struct spdk_nvme_transport_id trid = {};
3012 	struct spdk_nvme_ctrlr *ctrlr;
3013 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3014 	struct nvme_ctrlr *nvme_ctrlr;
3015 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3016 	const int STRING_SIZE = 32;
3017 	const char *attached_names[STRING_SIZE];
3018 	struct nvme_bdev *bdev1, *bdev2;
3019 	int rc;
3020 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3021 
3022 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3023 	bdev_opts.multipath = false;
3024 
3025 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3026 	ut_init_trid(&trid);
3027 
3028 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
3029 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3030 
3031 	g_ut_attach_ctrlr_status = 0;
3032 	g_ut_attach_bdev_count = 2;
3033 
3034 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3035 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3036 	CU_ASSERT(rc == 0);
3037 
3038 	spdk_delay_us(1000);
3039 	poll_threads();
3040 
3041 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3042 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3043 
3044 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
3045 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3046 
3047 	bdev1 = nvme_ns1->bdev;
3048 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3049 
3050 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
3051 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3052 
3053 	bdev2 = nvme_ns2->bdev;
3054 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3055 
3056 	bdev_nvme_destruct(&bdev1->disk);
3057 	bdev_nvme_destruct(&bdev2->disk);
3058 
3059 	poll_threads();
3060 
3061 	CU_ASSERT(nvme_ns1->bdev == NULL);
3062 	CU_ASSERT(nvme_ns2->bdev == NULL);
3063 
3064 	nvme_ctrlr->destruct = true;
3065 	_nvme_ctrlr_destruct(nvme_ctrlr);
3066 
3067 	poll_threads();
3068 	spdk_delay_us(1000);
3069 	poll_threads();
3070 
3071 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3072 }
3073 
3074 static void
3075 test_compare_ns(void)
3076 {
3077 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
3078 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
3079 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
3080 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
3081 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
3082 
3083 	/* No IDs are defined. */
3084 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3085 
3086 	/* Only EUI64 are defined and not matched. */
3087 	nsdata1.eui64 = 0xABCDEF0123456789;
3088 	nsdata2.eui64 = 0xBBCDEF0123456789;
3089 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3090 
3091 	/* Only EUI64 are defined and matched. */
3092 	nsdata2.eui64 = 0xABCDEF0123456789;
3093 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3094 
3095 	/* Only NGUID are defined and not matched. */
3096 	nsdata1.eui64 = 0x0;
3097 	nsdata2.eui64 = 0x0;
3098 	nsdata1.nguid[0] = 0x12;
3099 	nsdata2.nguid[0] = 0x10;
3100 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3101 
3102 	/* Only NGUID are defined and matched. */
3103 	nsdata2.nguid[0] = 0x12;
3104 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3105 
3106 	/* Only UUID are defined and not matched. */
3107 	nsdata1.nguid[0] = 0x0;
3108 	nsdata2.nguid[0] = 0x0;
3109 	ns1.uuid = &uuid1;
3110 	ns2.uuid = &uuid2;
3111 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3112 
3113 	/* Only one UUID is defined. */
3114 	ns1.uuid = NULL;
3115 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3116 
3117 	/* Only UUID are defined and matched. */
3118 	ns1.uuid = &uuid2;
3119 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3120 
3121 	/* All EUI64, NGUID, and UUID are defined and matched. */
3122 	nsdata1.eui64 = 0x123456789ABCDEF;
3123 	nsdata2.eui64 = 0x123456789ABCDEF;
3124 	nsdata1.nguid[15] = 0x34;
3125 	nsdata2.nguid[15] = 0x34;
3126 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3127 
3128 	/* CSI are not matched. */
3129 	ns1.csi = SPDK_NVME_CSI_ZNS;
3130 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3131 }
3132 
3133 static void
3134 test_init_ana_log_page(void)
3135 {
3136 	struct spdk_nvme_transport_id trid = {};
3137 	struct spdk_nvme_ctrlr *ctrlr;
3138 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3139 	struct nvme_ctrlr *nvme_ctrlr;
3140 	const int STRING_SIZE = 32;
3141 	const char *attached_names[STRING_SIZE];
3142 	int rc;
3143 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3144 
3145 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3146 	bdev_opts.multipath = false;
3147 
3148 	set_thread(0);
3149 
3150 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3151 	ut_init_trid(&trid);
3152 
3153 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
3154 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3155 
3156 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3157 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3158 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3159 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3160 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3161 
3162 	g_ut_attach_ctrlr_status = 0;
3163 	g_ut_attach_bdev_count = 5;
3164 
3165 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3166 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3167 	CU_ASSERT(rc == 0);
3168 
3169 	spdk_delay_us(1000);
3170 	poll_threads();
3171 
3172 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3173 	poll_threads();
3174 
3175 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3176 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3177 
3178 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3179 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3180 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3181 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3182 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3183 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3184 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3185 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3186 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3187 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3188 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3189 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3190 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3191 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3192 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3193 
3194 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3195 	CU_ASSERT(rc == 0);
3196 
3197 	poll_threads();
3198 	spdk_delay_us(1000);
3199 	poll_threads();
3200 
3201 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3202 }
3203 
3204 static void
3205 init_accel(void)
3206 {
3207 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3208 				sizeof(int), "accel_p");
3209 }
3210 
3211 static void
3212 fini_accel(void)
3213 {
3214 	spdk_io_device_unregister(g_accel_p, NULL);
3215 }
3216 
3217 static void
3218 test_get_memory_domains(void)
3219 {
3220 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3221 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3222 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3223 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3224 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3225 	struct spdk_memory_domain *domains[4] = {};
3226 	int rc = 0;
3227 
3228 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3229 
3230 	/* nvme controller doesn't have memory domains */
3231 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3232 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3233 	CU_ASSERT(rc == 0);
3234 	CU_ASSERT(domains[0] == NULL);
3235 	CU_ASSERT(domains[1] == NULL);
3236 
3237 	/* nvme controller has a memory domain */
3238 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3239 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3240 	CU_ASSERT(rc == 1);
3241 	CU_ASSERT(domains[0] != NULL);
3242 	memset(domains, 0, sizeof(domains));
3243 
3244 	/* multipath, 2 controllers report 1 memory domain each */
3245 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3246 
3247 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3248 	CU_ASSERT(rc == 2);
3249 	CU_ASSERT(domains[0] != NULL);
3250 	CU_ASSERT(domains[1] != NULL);
3251 	memset(domains, 0, sizeof(domains));
3252 
3253 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3254 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3255 	CU_ASSERT(rc == 2);
3256 
3257 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3258 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3259 	CU_ASSERT(rc == 2);
3260 	CU_ASSERT(domains[0] == NULL);
3261 	CU_ASSERT(domains[1] == NULL);
3262 
3263 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3264 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3265 	CU_ASSERT(rc == 2);
3266 	CU_ASSERT(domains[0] != NULL);
3267 	CU_ASSERT(domains[1] == NULL);
3268 	memset(domains, 0, sizeof(domains));
3269 
3270 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3271 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3272 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3273 	CU_ASSERT(rc == 4);
3274 	CU_ASSERT(domains[0] != NULL);
3275 	CU_ASSERT(domains[1] != NULL);
3276 	CU_ASSERT(domains[2] != NULL);
3277 	CU_ASSERT(domains[3] != NULL);
3278 	memset(domains, 0, sizeof(domains));
3279 
3280 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3281 	 * Array size is less than the number of memory domains */
3282 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3283 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3284 	CU_ASSERT(rc == 4);
3285 	CU_ASSERT(domains[0] != NULL);
3286 	CU_ASSERT(domains[1] != NULL);
3287 	CU_ASSERT(domains[2] != NULL);
3288 	CU_ASSERT(domains[3] == NULL);
3289 	memset(domains, 0, sizeof(domains));
3290 
3291 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3292 }
3293 
3294 static void
3295 test_reconnect_qpair(void)
3296 {
3297 	struct spdk_nvme_transport_id trid = {};
3298 	struct spdk_nvme_ctrlr *ctrlr;
3299 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3300 	struct nvme_ctrlr *nvme_ctrlr;
3301 	const int STRING_SIZE = 32;
3302 	const char *attached_names[STRING_SIZE];
3303 	struct nvme_bdev *bdev;
3304 	struct spdk_io_channel *ch1, *ch2;
3305 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3306 	struct nvme_io_path *io_path1, *io_path2;
3307 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3308 	int rc;
3309 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3310 
3311 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3312 	bdev_opts.multipath = false;
3313 
3314 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3315 	ut_init_trid(&trid);
3316 
3317 	set_thread(0);
3318 
3319 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3320 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3321 
3322 	g_ut_attach_ctrlr_status = 0;
3323 	g_ut_attach_bdev_count = 1;
3324 
3325 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3326 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3327 	CU_ASSERT(rc == 0);
3328 
3329 	spdk_delay_us(1000);
3330 	poll_threads();
3331 
3332 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3333 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3334 
3335 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3336 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3337 
3338 	ch1 = spdk_get_io_channel(bdev);
3339 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3340 
3341 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3342 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3343 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3344 	nvme_qpair1 = io_path1->qpair;
3345 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3346 
3347 	set_thread(1);
3348 
3349 	ch2 = spdk_get_io_channel(bdev);
3350 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3351 
3352 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3353 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3354 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3355 	nvme_qpair2 = io_path2->qpair;
3356 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3357 
3358 	/* If a qpair is disconnected, it is freed and then reconnected via
3359 	 * resetting the corresponding nvme_ctrlr.
3360 	 */
3361 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3362 	ctrlr->is_failed = true;
3363 
3364 	poll_thread_times(1, 3);
3365 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3366 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3367 	CU_ASSERT(nvme_ctrlr->resetting == true);
3368 
3369 	poll_thread_times(0, 3);
3370 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3371 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3372 	CU_ASSERT(ctrlr->is_failed == true);
3373 
3374 	poll_thread_times(1, 2);
3375 	poll_thread_times(0, 1);
3376 	CU_ASSERT(ctrlr->is_failed == false);
3377 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3378 
3379 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3380 	poll_thread_times(0, 2);
3381 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3382 
3383 	poll_thread_times(0, 1);
3384 	poll_thread_times(1, 1);
3385 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3386 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3387 	CU_ASSERT(nvme_ctrlr->resetting == true);
3388 
3389 	poll_thread_times(0, 2);
3390 	poll_thread_times(1, 1);
3391 	poll_thread_times(0, 1);
3392 	CU_ASSERT(nvme_ctrlr->resetting == false);
3393 
3394 	poll_threads();
3395 
3396 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3397 	 * fails, the qpair is just freed.
3398 	 */
3399 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3400 	ctrlr->is_failed = true;
3401 	ctrlr->fail_reset = true;
3402 
3403 	poll_thread_times(1, 3);
3404 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3405 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3406 	CU_ASSERT(nvme_ctrlr->resetting == true);
3407 
3408 	poll_thread_times(0, 3);
3409 	poll_thread_times(1, 1);
3410 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3411 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3412 	CU_ASSERT(ctrlr->is_failed == true);
3413 
3414 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3415 	poll_thread_times(0, 3);
3416 	poll_thread_times(1, 1);
3417 	poll_thread_times(0, 1);
3418 	CU_ASSERT(ctrlr->is_failed == true);
3419 	CU_ASSERT(nvme_ctrlr->resetting == false);
3420 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3421 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3422 
3423 	poll_threads();
3424 
3425 	spdk_put_io_channel(ch2);
3426 
3427 	set_thread(0);
3428 
3429 	spdk_put_io_channel(ch1);
3430 
3431 	poll_threads();
3432 
3433 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3434 	CU_ASSERT(rc == 0);
3435 
3436 	poll_threads();
3437 	spdk_delay_us(1000);
3438 	poll_threads();
3439 
3440 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3441 }
3442 
3443 static void
3444 test_create_bdev_ctrlr(void)
3445 {
3446 	struct nvme_path_id path1 = {}, path2 = {};
3447 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3448 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3449 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3450 	const int STRING_SIZE = 32;
3451 	const char *attached_names[STRING_SIZE];
3452 	int rc;
3453 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3454 
3455 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3456 	bdev_opts.multipath = true;
3457 
3458 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3459 	ut_init_trid(&path1.trid);
3460 	ut_init_trid2(&path2.trid);
3461 
3462 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3463 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3464 
3465 	g_ut_attach_ctrlr_status = 0;
3466 	g_ut_attach_bdev_count = 0;
3467 
3468 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3469 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3470 	CU_ASSERT(rc == 0);
3471 
3472 	spdk_delay_us(1000);
3473 	poll_threads();
3474 
3475 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3476 	poll_threads();
3477 
3478 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3479 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3480 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3481 
3482 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3483 	g_ut_attach_ctrlr_status = -EINVAL;
3484 
3485 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3486 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3487 
3488 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3489 
3490 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3491 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3492 	CU_ASSERT(rc == 0);
3493 
3494 	spdk_delay_us(1000);
3495 	poll_threads();
3496 
3497 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3498 	poll_threads();
3499 
3500 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3501 
3502 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3503 	g_ut_attach_ctrlr_status = 0;
3504 
3505 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3506 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3507 
3508 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3509 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3510 	CU_ASSERT(rc == 0);
3511 
3512 	spdk_delay_us(1000);
3513 	poll_threads();
3514 
3515 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3516 	poll_threads();
3517 
3518 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3519 
3520 	/* Delete two ctrlrs at once. */
3521 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3522 	CU_ASSERT(rc == 0);
3523 
3524 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3525 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3526 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3527 
3528 	poll_threads();
3529 	spdk_delay_us(1000);
3530 	poll_threads();
3531 
3532 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3533 
3534 	/* Add two ctrlrs and delete one by one. */
3535 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3536 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3537 
3538 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3539 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3540 
3541 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3542 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3543 	CU_ASSERT(rc == 0);
3544 
3545 	spdk_delay_us(1000);
3546 	poll_threads();
3547 
3548 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3549 	poll_threads();
3550 
3551 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3552 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3553 	CU_ASSERT(rc == 0);
3554 
3555 	spdk_delay_us(1000);
3556 	poll_threads();
3557 
3558 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3559 	poll_threads();
3560 
3561 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3562 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3563 
3564 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3565 	CU_ASSERT(rc == 0);
3566 
3567 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3568 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL);
3569 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3570 
3571 	poll_threads();
3572 	spdk_delay_us(1000);
3573 	poll_threads();
3574 
3575 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3576 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3577 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3578 
3579 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3580 	CU_ASSERT(rc == 0);
3581 
3582 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3583 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3584 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL);
3585 
3586 	poll_threads();
3587 	spdk_delay_us(1000);
3588 	poll_threads();
3589 
3590 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3591 }
3592 
3593 static struct nvme_ns *
3594 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3595 {
3596 	struct nvme_ns *nvme_ns;
3597 
3598 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3599 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3600 			return nvme_ns;
3601 		}
3602 	}
3603 
3604 	return NULL;
3605 }
3606 
3607 static void
3608 test_add_multi_ns_to_bdev(void)
3609 {
3610 	struct nvme_path_id path1 = {}, path2 = {};
3611 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3612 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3613 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3614 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3615 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3616 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3617 	const int STRING_SIZE = 32;
3618 	const char *attached_names[STRING_SIZE];
3619 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3620 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3621 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3622 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3623 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3624 	int rc;
3625 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3626 
3627 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3628 	bdev_opts.multipath = true;
3629 
3630 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3631 	ut_init_trid(&path1.trid);
3632 	ut_init_trid2(&path2.trid);
3633 
3634 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3635 
3636 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3637 	 * namespaces are populated.
3638 	 */
3639 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3640 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3641 
3642 	ctrlr1->ns[1].is_active = false;
3643 	ctrlr1->ns[4].is_active = false;
3644 	ctrlr1->ns[0].uuid = &uuid1;
3645 	ctrlr1->ns[2].uuid = &uuid3;
3646 	ctrlr1->ns[3].uuid = &uuid4;
3647 
3648 	g_ut_attach_ctrlr_status = 0;
3649 	g_ut_attach_bdev_count = 3;
3650 
3651 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3652 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3653 	CU_ASSERT(rc == 0);
3654 
3655 	spdk_delay_us(1000);
3656 	poll_threads();
3657 
3658 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3659 	poll_threads();
3660 
3661 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3662 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3663 	 * adding 4th namespace to a bdev should fail.
3664 	 */
3665 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3666 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3667 
3668 	ctrlr2->ns[2].is_active = false;
3669 	ctrlr2->ns[4].is_active = false;
3670 	ctrlr2->ns[0].uuid = &uuid1;
3671 	ctrlr2->ns[1].uuid = &uuid2;
3672 	ctrlr2->ns[3].uuid = &uuid44;
3673 
3674 	g_ut_attach_ctrlr_status = 0;
3675 	g_ut_attach_bdev_count = 2;
3676 
3677 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3678 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3679 	CU_ASSERT(rc == 0);
3680 
3681 	spdk_delay_us(1000);
3682 	poll_threads();
3683 
3684 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3685 	poll_threads();
3686 
3687 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3688 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3689 
3690 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3691 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3692 
3693 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3694 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3695 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3696 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3697 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3698 
3699 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3700 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3701 
3702 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3703 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3704 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3705 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3706 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3707 
3708 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3709 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3710 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3711 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3712 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3713 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3714 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3715 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3716 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3717 
3718 	CU_ASSERT(bdev1->ref == 2);
3719 	CU_ASSERT(bdev2->ref == 1);
3720 	CU_ASSERT(bdev3->ref == 1);
3721 	CU_ASSERT(bdev4->ref == 1);
3722 
3723 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3724 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3725 	CU_ASSERT(rc == 0);
3726 
3727 	poll_threads();
3728 	spdk_delay_us(1000);
3729 	poll_threads();
3730 
3731 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3732 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL);
3733 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2);
3734 
3735 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3736 	CU_ASSERT(rc == 0);
3737 
3738 	poll_threads();
3739 	spdk_delay_us(1000);
3740 	poll_threads();
3741 
3742 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3743 
3744 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3745 	 * can be deleted when the bdev subsystem shutdown.
3746 	 */
3747 	g_ut_attach_bdev_count = 1;
3748 
3749 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3750 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3751 
3752 	ctrlr1->ns[0].uuid = &uuid1;
3753 
3754 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3755 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3756 	CU_ASSERT(rc == 0);
3757 
3758 	spdk_delay_us(1000);
3759 	poll_threads();
3760 
3761 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3762 	poll_threads();
3763 
3764 	ut_init_trid2(&path2.trid);
3765 
3766 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3767 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3768 
3769 	ctrlr2->ns[0].uuid = &uuid1;
3770 
3771 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3772 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3773 	CU_ASSERT(rc == 0);
3774 
3775 	spdk_delay_us(1000);
3776 	poll_threads();
3777 
3778 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3779 	poll_threads();
3780 
3781 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3782 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3783 
3784 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3785 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3786 
3787 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3788 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3789 
3790 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3791 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3792 
3793 	/* Check if a nvme_bdev has two nvme_ns. */
3794 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3795 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3796 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3797 
3798 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3799 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3800 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3801 
3802 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3803 	bdev_nvme_destruct(&bdev1->disk);
3804 
3805 	poll_threads();
3806 
3807 	CU_ASSERT(nvme_ns1->bdev == NULL);
3808 	CU_ASSERT(nvme_ns2->bdev == NULL);
3809 
3810 	nvme_ctrlr1->destruct = true;
3811 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3812 
3813 	poll_threads();
3814 	spdk_delay_us(1000);
3815 	poll_threads();
3816 
3817 	nvme_ctrlr2->destruct = true;
3818 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3819 
3820 	poll_threads();
3821 	spdk_delay_us(1000);
3822 	poll_threads();
3823 
3824 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3825 }
3826 
3827 static void
3828 test_add_multi_io_paths_to_nbdev_ch(void)
3829 {
3830 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3831 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3832 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3833 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3834 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3835 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3836 	const int STRING_SIZE = 32;
3837 	const char *attached_names[STRING_SIZE];
3838 	struct nvme_bdev *bdev;
3839 	struct spdk_io_channel *ch;
3840 	struct nvme_bdev_channel *nbdev_ch;
3841 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3842 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3843 	int rc;
3844 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3845 
3846 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3847 	bdev_opts.multipath = true;
3848 
3849 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3850 	ut_init_trid(&path1.trid);
3851 	ut_init_trid2(&path2.trid);
3852 	ut_init_trid3(&path3.trid);
3853 	g_ut_attach_ctrlr_status = 0;
3854 	g_ut_attach_bdev_count = 1;
3855 
3856 	set_thread(1);
3857 
3858 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3859 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3860 
3861 	ctrlr1->ns[0].uuid = &uuid1;
3862 
3863 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3864 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3865 	CU_ASSERT(rc == 0);
3866 
3867 	spdk_delay_us(1000);
3868 	poll_threads();
3869 
3870 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3871 	poll_threads();
3872 
3873 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3874 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3875 
3876 	ctrlr2->ns[0].uuid = &uuid1;
3877 
3878 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3879 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3880 	CU_ASSERT(rc == 0);
3881 
3882 	spdk_delay_us(1000);
3883 	poll_threads();
3884 
3885 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3886 	poll_threads();
3887 
3888 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3889 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3890 
3891 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
3892 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3893 
3894 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
3895 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3896 
3897 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3898 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3899 
3900 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3901 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3902 
3903 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3904 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3905 
3906 	set_thread(0);
3907 
3908 	ch = spdk_get_io_channel(bdev);
3909 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3910 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3911 
3912 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3913 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3914 
3915 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3916 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3917 
3918 	set_thread(1);
3919 
3920 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3921 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3922 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3923 
3924 	ctrlr3->ns[0].uuid = &uuid1;
3925 
3926 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3927 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
3928 	CU_ASSERT(rc == 0);
3929 
3930 	spdk_delay_us(1000);
3931 	poll_threads();
3932 
3933 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3934 	poll_threads();
3935 
3936 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn);
3937 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3938 
3939 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3940 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3941 
3942 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3943 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3944 
3945 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3946 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3947 	CU_ASSERT(rc == 0);
3948 
3949 	poll_threads();
3950 	spdk_delay_us(1000);
3951 	poll_threads();
3952 
3953 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1);
3954 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
3955 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3);
3956 
3957 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3958 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3959 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3960 
3961 	set_thread(0);
3962 
3963 	spdk_put_io_channel(ch);
3964 
3965 	poll_threads();
3966 
3967 	set_thread(1);
3968 
3969 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3970 	CU_ASSERT(rc == 0);
3971 
3972 	poll_threads();
3973 	spdk_delay_us(1000);
3974 	poll_threads();
3975 
3976 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3977 }
3978 
3979 static void
3980 test_admin_path(void)
3981 {
3982 	struct nvme_path_id path1 = {}, path2 = {};
3983 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3984 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
3985 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3986 	const int STRING_SIZE = 32;
3987 	const char *attached_names[STRING_SIZE];
3988 	struct nvme_bdev *bdev;
3989 	struct spdk_io_channel *ch;
3990 	struct spdk_bdev_io *bdev_io;
3991 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3992 	int rc;
3993 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
3994 
3995 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
3996 	bdev_opts.multipath = true;
3997 
3998 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3999 	ut_init_trid(&path1.trid);
4000 	ut_init_trid2(&path2.trid);
4001 	g_ut_attach_ctrlr_status = 0;
4002 	g_ut_attach_bdev_count = 1;
4003 
4004 	set_thread(0);
4005 
4006 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4007 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4008 
4009 	ctrlr1->ns[0].uuid = &uuid1;
4010 
4011 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4012 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4013 	CU_ASSERT(rc == 0);
4014 
4015 	spdk_delay_us(1000);
4016 	poll_threads();
4017 
4018 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4019 	poll_threads();
4020 
4021 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4022 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4023 
4024 	ctrlr2->ns[0].uuid = &uuid1;
4025 
4026 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4027 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4028 	CU_ASSERT(rc == 0);
4029 
4030 	spdk_delay_us(1000);
4031 	poll_threads();
4032 
4033 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4034 	poll_threads();
4035 
4036 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4037 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4038 
4039 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4040 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4041 
4042 	ch = spdk_get_io_channel(bdev);
4043 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4044 
4045 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
4046 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4047 
4048 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
4049 	 * submitted to ctrlr2.
4050 	 */
4051 	ctrlr1->is_failed = true;
4052 	bdev_io->internal.f.in_submit_request = true;
4053 
4054 	bdev_nvme_submit_request(ch, bdev_io);
4055 
4056 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4057 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
4058 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4059 
4060 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4061 	poll_threads();
4062 
4063 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4064 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4065 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4066 
4067 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
4068 	ctrlr2->is_failed = true;
4069 	bdev_io->internal.f.in_submit_request = true;
4070 
4071 	bdev_nvme_submit_request(ch, bdev_io);
4072 
4073 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4074 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
4075 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4076 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4077 
4078 	free(bdev_io);
4079 
4080 	spdk_put_io_channel(ch);
4081 
4082 	poll_threads();
4083 
4084 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4085 	CU_ASSERT(rc == 0);
4086 
4087 	poll_threads();
4088 	spdk_delay_us(1000);
4089 	poll_threads();
4090 
4091 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4092 }
4093 
4094 static struct nvme_io_path *
4095 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
4096 			struct nvme_ctrlr *nvme_ctrlr)
4097 {
4098 	struct nvme_io_path *io_path;
4099 
4100 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
4101 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
4102 			return io_path;
4103 		}
4104 	}
4105 
4106 	return NULL;
4107 }
4108 
4109 static void
4110 test_reset_bdev_ctrlr(void)
4111 {
4112 	struct nvme_path_id path1 = {}, path2 = {};
4113 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4114 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4115 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4116 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4117 	struct nvme_path_id *curr_path1, *curr_path2;
4118 	const int STRING_SIZE = 32;
4119 	const char *attached_names[STRING_SIZE];
4120 	struct nvme_bdev *bdev;
4121 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
4122 	struct nvme_bdev_io *first_bio;
4123 	struct spdk_io_channel *ch1, *ch2;
4124 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
4125 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
4126 	int rc;
4127 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4128 
4129 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4130 	bdev_opts.multipath = true;
4131 
4132 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4133 	ut_init_trid(&path1.trid);
4134 	ut_init_trid2(&path2.trid);
4135 	g_ut_attach_ctrlr_status = 0;
4136 	g_ut_attach_bdev_count = 1;
4137 
4138 	set_thread(0);
4139 
4140 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4141 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4142 
4143 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4144 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4145 	CU_ASSERT(rc == 0);
4146 
4147 	spdk_delay_us(1000);
4148 	poll_threads();
4149 
4150 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4151 	poll_threads();
4152 
4153 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4154 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4155 
4156 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4157 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4158 	CU_ASSERT(rc == 0);
4159 
4160 	spdk_delay_us(1000);
4161 	poll_threads();
4162 
4163 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4164 	poll_threads();
4165 
4166 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4167 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4168 
4169 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4170 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
4171 
4172 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
4173 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
4174 
4175 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4176 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
4177 
4178 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
4179 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
4180 
4181 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4182 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4183 
4184 	set_thread(0);
4185 
4186 	ch1 = spdk_get_io_channel(bdev);
4187 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
4188 
4189 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
4190 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
4191 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
4192 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
4193 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4194 
4195 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
4196 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4197 
4198 	set_thread(1);
4199 
4200 	ch2 = spdk_get_io_channel(bdev);
4201 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4202 
4203 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4204 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4205 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4206 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4207 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4208 
4209 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
4210 
4211 	/* The first reset request from bdev_io is submitted on thread 0.
4212 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4213 	 *
4214 	 * A few extra polls are necessary after resetting ctrlr1 to check
4215 	 * pending reset requests for ctrlr1.
4216 	 */
4217 	ctrlr1->is_failed = true;
4218 	curr_path1->last_failed_tsc = spdk_get_ticks();
4219 	ctrlr2->is_failed = true;
4220 	curr_path2->last_failed_tsc = spdk_get_ticks();
4221 
4222 	set_thread(0);
4223 
4224 	bdev_nvme_submit_request(ch1, first_bdev_io);
4225 
4226 	poll_thread_times(0, 1);
4227 	poll_thread_times(1, 1);
4228 	poll_thread_times(0, 2);
4229 	poll_thread_times(1, 1);
4230 	poll_thread_times(0, 1);
4231 
4232 	CU_ASSERT(first_bio->io_path == io_path11);
4233 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4234 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4235 
4236 	poll_thread_times(0, 3);
4237 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4238 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4239 
4240 	poll_thread_times(1, 2);
4241 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4242 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4243 	CU_ASSERT(ctrlr1->is_failed == true);
4244 
4245 	poll_thread_times(0, 1);
4246 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4247 	CU_ASSERT(ctrlr1->is_failed == false);
4248 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4249 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4250 
4251 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4252 	poll_thread_times(0, 2);
4253 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4254 
4255 	poll_thread_times(0, 1);
4256 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4257 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4258 
4259 	poll_thread_times(1, 1);
4260 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4261 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4262 
4263 	poll_thread_times(0, 2);
4264 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4265 	poll_thread_times(1, 1);
4266 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4267 	poll_thread_times(0, 2);
4268 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4269 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4270 	CU_ASSERT(first_bio->io_path == io_path12);
4271 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4272 
4273 	poll_thread_times(0, 3);
4274 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4275 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4276 
4277 	poll_thread_times(1, 2);
4278 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4279 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4280 	CU_ASSERT(ctrlr2->is_failed == true);
4281 
4282 	poll_thread_times(0, 1);
4283 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4284 	CU_ASSERT(ctrlr2->is_failed == false);
4285 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4286 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4287 
4288 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4289 	poll_thread_times(0, 2);
4290 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4291 
4292 	poll_thread_times(0, 1);
4293 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4294 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4295 
4296 	poll_thread_times(1, 2);
4297 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4298 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4299 
4300 	poll_thread_times(0, 2);
4301 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4302 	poll_thread_times(1, 1);
4303 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4304 	poll_thread_times(0, 2);
4305 	CU_ASSERT(first_bio->io_path == NULL);
4306 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4307 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4308 
4309 	poll_threads();
4310 
4311 	/* There is a race between two reset requests from bdev_io.
4312 	 *
4313 	 * The first reset request is submitted on thread 0, and the second reset
4314 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4315 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4316 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4317 	 * The second is pending on ctrlr2 again. After the first completes resetting
4318 	 * ctrl2, both complete successfully.
4319 	 */
4320 	ctrlr1->is_failed = true;
4321 	curr_path1->last_failed_tsc = spdk_get_ticks();
4322 	ctrlr2->is_failed = true;
4323 	curr_path2->last_failed_tsc = spdk_get_ticks();
4324 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4325 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4326 
4327 	set_thread(0);
4328 
4329 	bdev_nvme_submit_request(ch1, first_bdev_io);
4330 
4331 	set_thread(1);
4332 
4333 	bdev_nvme_submit_request(ch2, second_bdev_io);
4334 
4335 	poll_thread_times(0, 1);
4336 	poll_thread_times(1, 1);
4337 	poll_thread_times(0, 2);
4338 	poll_thread_times(1, 1);
4339 	poll_thread_times(0, 1);
4340 	poll_thread_times(1, 1);
4341 
4342 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4343 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4344 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4345 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4346 
4347 	poll_threads();
4348 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4349 	poll_threads();
4350 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4351 	poll_threads();
4352 
4353 	CU_ASSERT(ctrlr1->is_failed == false);
4354 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4355 	CU_ASSERT(ctrlr2->is_failed == false);
4356 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4357 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4358 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4359 
4360 	/* Reset of the first path succeeds, reset of the second path fails.
4361 	 * Since we have at least one working path we should not fail RESET IO.
4362 	 */
4363 	ctrlr1->is_failed = true;
4364 	curr_path1->last_failed_tsc = spdk_get_ticks();
4365 	ctrlr2->is_failed = true;
4366 	curr_path2->last_failed_tsc = spdk_get_ticks();
4367 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4368 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4369 
4370 	set_thread(0);
4371 	bdev_nvme_submit_request(ch1, first_bdev_io);
4372 
4373 	set_thread(1);
4374 	bdev_nvme_submit_request(ch2, second_bdev_io);
4375 
4376 	poll_thread_times(0, 1);
4377 	poll_thread_times(1, 1);
4378 	poll_thread_times(0, 2);
4379 	poll_thread_times(1, 1);
4380 	poll_thread_times(0, 1);
4381 	poll_thread_times(1, 1);
4382 
4383 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4384 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4385 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4386 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4387 
4388 	ctrlr2->fail_reset = true;
4389 
4390 	poll_threads();
4391 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4392 	poll_threads();
4393 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4394 	poll_threads();
4395 
4396 	CU_ASSERT(ctrlr1->is_failed == false);
4397 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4398 	CU_ASSERT(ctrlr2->is_failed == true);
4399 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4400 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4401 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4402 
4403 	/* Path 2 recovers */
4404 	ctrlr2->fail_reset = false;
4405 	poll_threads();
4406 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4407 	poll_threads();
4408 
4409 	CU_ASSERT(ctrlr2->is_failed == false);
4410 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4411 
4412 	/* Reset of the first path fails, reset of the second path succeeds.
4413 	 * Since we have at least one working path we should not fail RESET IO.
4414 	 */
4415 	ctrlr1->is_failed = true;
4416 	curr_path1->last_failed_tsc = spdk_get_ticks();
4417 	ctrlr2->is_failed = true;
4418 	curr_path2->last_failed_tsc = spdk_get_ticks();
4419 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4420 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4421 
4422 	set_thread(0);
4423 	bdev_nvme_submit_request(ch1, first_bdev_io);
4424 
4425 	set_thread(1);
4426 	bdev_nvme_submit_request(ch2, second_bdev_io);
4427 
4428 	poll_thread_times(0, 1);
4429 	poll_thread_times(1, 1);
4430 	poll_thread_times(0, 2);
4431 	poll_thread_times(1, 1);
4432 	poll_thread_times(0, 1);
4433 	poll_thread_times(1, 1);
4434 
4435 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4436 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4437 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4438 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4439 
4440 	ctrlr1->fail_reset = true;
4441 
4442 	poll_threads();
4443 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4444 	poll_threads();
4445 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4446 	poll_threads();
4447 
4448 	CU_ASSERT(ctrlr1->is_failed == true);
4449 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4450 	CU_ASSERT(ctrlr2->is_failed == false);
4451 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4452 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4453 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4454 
4455 	/* Path 1 recovers */
4456 	ctrlr1->fail_reset = false;
4457 	poll_threads();
4458 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4459 	poll_threads();
4460 
4461 	CU_ASSERT(ctrlr1->is_failed == false);
4462 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4463 
4464 	/* Reset of both paths fail.
4465 	 * Since we have no working paths we should fail RESET IO.
4466 	 */
4467 	ctrlr1->is_failed = true;
4468 	curr_path1->last_failed_tsc = spdk_get_ticks();
4469 	ctrlr2->is_failed = true;
4470 	curr_path2->last_failed_tsc = spdk_get_ticks();
4471 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4472 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4473 
4474 	set_thread(0);
4475 	bdev_nvme_submit_request(ch1, first_bdev_io);
4476 
4477 	set_thread(1);
4478 	bdev_nvme_submit_request(ch2, second_bdev_io);
4479 
4480 	poll_thread_times(0, 1);
4481 	poll_thread_times(1, 1);
4482 	poll_thread_times(0, 2);
4483 	poll_thread_times(1, 1);
4484 	poll_thread_times(0, 1);
4485 	poll_thread_times(1, 1);
4486 
4487 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4488 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4489 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) ==
4490 		  (struct nvme_bdev_io *)second_bdev_io->driver_ctx);
4491 
4492 	ctrlr1->fail_reset = true;
4493 	ctrlr2->fail_reset = true;
4494 
4495 	poll_threads();
4496 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4497 	poll_threads();
4498 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4499 	poll_threads();
4500 
4501 	CU_ASSERT(ctrlr1->is_failed == true);
4502 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4503 	CU_ASSERT(ctrlr2->is_failed == true);
4504 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4505 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4506 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
4507 
4508 	/* Paths 1 and 2 recover */
4509 	ctrlr1->fail_reset = false;
4510 	ctrlr2->fail_reset = false;
4511 	poll_threads();
4512 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4513 	poll_threads();
4514 
4515 	CU_ASSERT(ctrlr1->is_failed == false);
4516 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4517 	CU_ASSERT(ctrlr2->is_failed == false);
4518 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4519 
4520 	/* Reset of the first path failes, reset of the second path succeeds.
4521 	 * Since we have at least one working path we should not fail RESET IO.
4522 	 *
4523 	 * Here, reset of the first path fails immediately because it is disabled.
4524 	 *
4525 	 * The purpose is to verify the fix. We had a bug that bdev_io did not
4526 	 * hold io_path when reset of it failed immediately, and then continue
4527 	 * operation caused NULL pointer access.
4528 	 */
4529 	nvme_ctrlr1->disabled = true;
4530 	ctrlr1->is_failed = true;
4531 	curr_path1->last_failed_tsc = spdk_get_ticks();
4532 	ctrlr2->is_failed = true;
4533 	curr_path2->last_failed_tsc = spdk_get_ticks();
4534 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4535 
4536 	set_thread(0);
4537 	bdev_nvme_submit_request(ch1, first_bdev_io);
4538 
4539 	poll_threads();
4540 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4541 	poll_threads();
4542 
4543 	CU_ASSERT(ctrlr1->is_failed == true);
4544 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4545 	CU_ASSERT(ctrlr2->is_failed == false);
4546 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4547 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4548 
4549 	nvme_ctrlr1->disabled = false;
4550 	ctrlr1->is_failed = false;
4551 	curr_path1->last_failed_tsc = 0;
4552 
4553 	set_thread(0);
4554 
4555 	spdk_put_io_channel(ch1);
4556 
4557 	set_thread(1);
4558 
4559 	spdk_put_io_channel(ch2);
4560 
4561 	poll_threads();
4562 
4563 	set_thread(0);
4564 
4565 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4566 	CU_ASSERT(rc == 0);
4567 
4568 	poll_threads();
4569 	spdk_delay_us(1000);
4570 	poll_threads();
4571 
4572 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4573 
4574 	free(first_bdev_io);
4575 	free(second_bdev_io);
4576 }
4577 
4578 static void
4579 test_find_io_path(void)
4580 {
4581 	struct nvme_bdev_channel nbdev_ch = {
4582 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4583 	};
4584 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4585 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4586 	struct spdk_nvme_ns ns1 = {}, ns2 = {};
4587 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4588 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4589 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4590 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4591 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, };
4592 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4593 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4594 
4595 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4596 
4597 	/* Test if io_path whose ANA state is not accessible is excluded. */
4598 
4599 	nvme_qpair1.qpair = &qpair1;
4600 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4601 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4602 
4603 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4604 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4605 
4606 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4607 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4608 
4609 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4610 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4611 
4612 	nbdev_ch.current_io_path = NULL;
4613 
4614 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4615 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4616 
4617 	nbdev_ch.current_io_path = NULL;
4618 
4619 	/* Test if io_path whose qpair is resetting is excluded. */
4620 
4621 	nvme_qpair1.qpair = NULL;
4622 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4623 
4624 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4625 
4626 	/* Test if ANA optimized state or the first found ANA non-optimized state
4627 	 * is prioritized.
4628 	 */
4629 
4630 	nvme_qpair1.qpair = &qpair1;
4631 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4632 	nvme_qpair2.qpair = &qpair2;
4633 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4634 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4635 
4636 	nbdev_ch.current_io_path = NULL;
4637 
4638 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4639 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4640 
4641 	nbdev_ch.current_io_path = NULL;
4642 }
4643 
4644 static void
4645 test_retry_io_if_ana_state_is_updating(void)
4646 {
4647 	struct nvme_path_id path = {};
4648 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
4649 	struct spdk_nvme_ctrlr *ctrlr;
4650 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
4651 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4652 	struct nvme_ctrlr *nvme_ctrlr;
4653 	const int STRING_SIZE = 32;
4654 	const char *attached_names[STRING_SIZE];
4655 	struct nvme_bdev *bdev;
4656 	struct nvme_ns *nvme_ns;
4657 	struct spdk_bdev_io *bdev_io1;
4658 	struct spdk_io_channel *ch;
4659 	struct nvme_bdev_channel *nbdev_ch;
4660 	struct nvme_io_path *io_path;
4661 	struct nvme_qpair *nvme_qpair;
4662 	int rc;
4663 
4664 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4665 	ut_init_trid(&path.trid);
4666 
4667 	set_thread(0);
4668 
4669 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4670 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4671 
4672 	g_ut_attach_ctrlr_status = 0;
4673 	g_ut_attach_bdev_count = 1;
4674 
4675 	opts.ctrlr_loss_timeout_sec = -1;
4676 	opts.reconnect_delay_sec = 1;
4677 	opts.multipath = false;
4678 
4679 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4680 				   attach_ctrlr_done, NULL, &dopts, &opts);
4681 	CU_ASSERT(rc == 0);
4682 
4683 	spdk_delay_us(1000);
4684 	poll_threads();
4685 
4686 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4687 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4688 
4689 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
4690 	CU_ASSERT(nvme_ctrlr != NULL);
4691 
4692 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4693 	CU_ASSERT(bdev != NULL);
4694 
4695 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4696 	CU_ASSERT(nvme_ns != NULL);
4697 
4698 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4699 	ut_bdev_io_set_buf(bdev_io1);
4700 
4701 	ch = spdk_get_io_channel(bdev);
4702 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4703 
4704 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4705 
4706 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4707 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4708 
4709 	nvme_qpair = io_path->qpair;
4710 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4711 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4712 
4713 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4714 
4715 	/* If qpair is connected, I/O should succeed. */
4716 	bdev_io1->internal.f.in_submit_request = true;
4717 
4718 	bdev_nvme_submit_request(ch, bdev_io1);
4719 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4720 
4721 	poll_threads();
4722 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4723 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4724 
4725 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4726 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4727 	nbdev_ch->current_io_path = NULL;
4728 
4729 	bdev_io1->internal.f.in_submit_request = true;
4730 
4731 	bdev_nvme_submit_request(ch, bdev_io1);
4732 
4733 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4734 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4735 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4736 
4737 	/* ANA state became accessible while I/O was queued. */
4738 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4739 
4740 	spdk_delay_us(1000000);
4741 
4742 	poll_thread_times(0, 1);
4743 
4744 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4745 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
4746 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4747 
4748 	poll_threads();
4749 
4750 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4751 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
4752 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4753 
4754 	free(bdev_io1);
4755 
4756 	spdk_put_io_channel(ch);
4757 
4758 	poll_threads();
4759 
4760 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4761 	CU_ASSERT(rc == 0);
4762 
4763 	poll_threads();
4764 	spdk_delay_us(1000);
4765 	poll_threads();
4766 
4767 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4768 }
4769 
4770 static void
4771 test_retry_io_for_io_path_error(void)
4772 {
4773 	struct nvme_path_id path1 = {}, path2 = {};
4774 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4775 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4776 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4777 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4778 	const int STRING_SIZE = 32;
4779 	const char *attached_names[STRING_SIZE];
4780 	struct nvme_bdev *bdev;
4781 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4782 	struct spdk_bdev_io *bdev_io;
4783 	struct nvme_bdev_io *bio;
4784 	struct spdk_io_channel *ch;
4785 	struct nvme_bdev_channel *nbdev_ch;
4786 	struct nvme_io_path *io_path1, *io_path2;
4787 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4788 	struct ut_nvme_req *req;
4789 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4790 	int rc;
4791 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
4792 
4793 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
4794 	bdev_opts.multipath = true;
4795 
4796 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4797 	ut_init_trid(&path1.trid);
4798 	ut_init_trid2(&path2.trid);
4799 
4800 	g_opts.bdev_retry_count = 1;
4801 
4802 	set_thread(0);
4803 
4804 	g_ut_attach_ctrlr_status = 0;
4805 	g_ut_attach_bdev_count = 1;
4806 
4807 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4808 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4809 
4810 	ctrlr1->ns[0].uuid = &uuid1;
4811 
4812 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4813 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4814 	CU_ASSERT(rc == 0);
4815 
4816 	spdk_delay_us(1000);
4817 	poll_threads();
4818 
4819 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4820 	poll_threads();
4821 
4822 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4823 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4824 
4825 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
4826 	CU_ASSERT(nvme_ctrlr1 != NULL);
4827 
4828 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4829 	CU_ASSERT(bdev != NULL);
4830 
4831 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4832 	CU_ASSERT(nvme_ns1 != NULL);
4833 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4834 
4835 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4836 	ut_bdev_io_set_buf(bdev_io);
4837 
4838 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4839 
4840 	ch = spdk_get_io_channel(bdev);
4841 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4842 
4843 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4844 
4845 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4846 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4847 
4848 	nvme_qpair1 = io_path1->qpair;
4849 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4850 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4851 
4852 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4853 
4854 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4855 	bdev_io->internal.f.in_submit_request = true;
4856 
4857 	bdev_nvme_submit_request(ch, bdev_io);
4858 
4859 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4860 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4861 
4862 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4863 	SPDK_CU_ASSERT_FATAL(req != NULL);
4864 
4865 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4866 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4867 	req->cpl.status.dnr = 1;
4868 
4869 	poll_thread_times(0, 1);
4870 
4871 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4872 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4873 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4874 
4875 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4876 	bdev_io->internal.f.in_submit_request = true;
4877 
4878 	bdev_nvme_submit_request(ch, bdev_io);
4879 
4880 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4881 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4882 
4883 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4884 	SPDK_CU_ASSERT_FATAL(req != NULL);
4885 
4886 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4887 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4888 
4889 	poll_thread_times(0, 1);
4890 
4891 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4892 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4893 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4894 
4895 	poll_threads();
4896 
4897 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4898 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4899 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4900 
4901 	/* Add io_path2 dynamically, and create a multipath configuration. */
4902 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4903 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4904 
4905 	ctrlr2->ns[0].uuid = &uuid1;
4906 
4907 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4908 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
4909 	CU_ASSERT(rc == 0);
4910 
4911 	spdk_delay_us(1000);
4912 	poll_threads();
4913 
4914 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4915 	poll_threads();
4916 
4917 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
4918 	CU_ASSERT(nvme_ctrlr2 != NULL);
4919 
4920 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4921 	CU_ASSERT(nvme_ns2 != NULL);
4922 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4923 
4924 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4925 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4926 
4927 	nvme_qpair2 = io_path2->qpair;
4928 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4929 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4930 
4931 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4932 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4933 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4934 	 */
4935 	bdev_io->internal.f.in_submit_request = true;
4936 
4937 	bdev_nvme_submit_request(ch, bdev_io);
4938 
4939 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4940 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4941 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4942 
4943 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4944 	SPDK_CU_ASSERT_FATAL(req != NULL);
4945 
4946 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4947 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4948 
4949 	poll_thread_times(0, 1);
4950 
4951 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4952 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4953 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
4954 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
4955 
4956 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4957 	nvme_qpair1->qpair = NULL;
4958 
4959 	poll_threads();
4960 
4961 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4962 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
4963 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4964 
4965 	free(bdev_io);
4966 
4967 	spdk_put_io_channel(ch);
4968 
4969 	poll_threads();
4970 
4971 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4972 	CU_ASSERT(rc == 0);
4973 
4974 	poll_threads();
4975 	spdk_delay_us(1000);
4976 	poll_threads();
4977 
4978 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4979 
4980 	g_opts.bdev_retry_count = 0;
4981 }
4982 
4983 static void
4984 test_retry_io_count(void)
4985 {
4986 	struct nvme_path_id path = {};
4987 	struct spdk_nvme_ctrlr *ctrlr;
4988 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
4989 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4990 	struct nvme_ctrlr *nvme_ctrlr;
4991 	const int STRING_SIZE = 32;
4992 	const char *attached_names[STRING_SIZE];
4993 	struct nvme_bdev *bdev;
4994 	struct nvme_ns *nvme_ns;
4995 	struct spdk_bdev_io *bdev_io;
4996 	struct nvme_bdev_io *bio;
4997 	struct spdk_io_channel *ch;
4998 	struct nvme_bdev_channel *nbdev_ch;
4999 	struct nvme_io_path *io_path;
5000 	struct nvme_qpair *nvme_qpair;
5001 	struct ut_nvme_req *req;
5002 	int rc;
5003 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5004 
5005 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5006 	bdev_opts.multipath = false;
5007 
5008 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5009 	ut_init_trid(&path.trid);
5010 
5011 	set_thread(0);
5012 
5013 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5014 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5015 
5016 	g_ut_attach_ctrlr_status = 0;
5017 	g_ut_attach_bdev_count = 1;
5018 
5019 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5020 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5021 	CU_ASSERT(rc == 0);
5022 
5023 	spdk_delay_us(1000);
5024 	poll_threads();
5025 
5026 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5027 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5028 
5029 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5030 	CU_ASSERT(nvme_ctrlr != NULL);
5031 
5032 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5033 	CU_ASSERT(bdev != NULL);
5034 
5035 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5036 	CU_ASSERT(nvme_ns != NULL);
5037 
5038 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5039 	ut_bdev_io_set_buf(bdev_io);
5040 
5041 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5042 
5043 	ch = spdk_get_io_channel(bdev);
5044 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5045 
5046 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5047 
5048 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5049 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5050 
5051 	nvme_qpair = io_path->qpair;
5052 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5053 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5054 
5055 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5056 
5057 	/* If I/O is aborted by request, it should not be retried. */
5058 	g_opts.bdev_retry_count = 1;
5059 
5060 	bdev_io->internal.f.in_submit_request = true;
5061 
5062 	bdev_nvme_submit_request(ch, bdev_io);
5063 
5064 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5065 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5066 
5067 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5068 	SPDK_CU_ASSERT_FATAL(req != NULL);
5069 
5070 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5071 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5072 
5073 	poll_thread_times(0, 1);
5074 
5075 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5076 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5077 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
5078 
5079 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
5080 	 * the failed I/O should not be retried.
5081 	 */
5082 	g_opts.bdev_retry_count = 4;
5083 
5084 	bdev_io->internal.f.in_submit_request = true;
5085 
5086 	bdev_nvme_submit_request(ch, bdev_io);
5087 
5088 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5089 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5090 
5091 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5092 	SPDK_CU_ASSERT_FATAL(req != NULL);
5093 
5094 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5095 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5096 	bio->retry_count = 4;
5097 
5098 	poll_thread_times(0, 1);
5099 
5100 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5101 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5102 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
5103 
5104 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
5105 	g_opts.bdev_retry_count = -1;
5106 
5107 	bdev_io->internal.f.in_submit_request = true;
5108 
5109 	bdev_nvme_submit_request(ch, bdev_io);
5110 
5111 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5112 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5113 
5114 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5115 	SPDK_CU_ASSERT_FATAL(req != NULL);
5116 
5117 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5118 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5119 	bio->retry_count = 4;
5120 
5121 	poll_thread_times(0, 1);
5122 
5123 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5124 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5125 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5126 
5127 	poll_threads();
5128 
5129 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5130 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5131 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5132 
5133 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
5134 	 * the failed I/O should be retried.
5135 	 */
5136 	g_opts.bdev_retry_count = 4;
5137 
5138 	bdev_io->internal.f.in_submit_request = true;
5139 
5140 	bdev_nvme_submit_request(ch, bdev_io);
5141 
5142 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5143 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5144 
5145 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5146 	SPDK_CU_ASSERT_FATAL(req != NULL);
5147 
5148 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
5149 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5150 	bio->retry_count = 3;
5151 
5152 	poll_thread_times(0, 1);
5153 
5154 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5155 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5156 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5157 
5158 	poll_threads();
5159 
5160 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5161 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5162 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5163 
5164 	free(bdev_io);
5165 
5166 	spdk_put_io_channel(ch);
5167 
5168 	poll_threads();
5169 
5170 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5171 	CU_ASSERT(rc == 0);
5172 
5173 	poll_threads();
5174 	spdk_delay_us(1000);
5175 	poll_threads();
5176 
5177 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5178 
5179 	g_opts.bdev_retry_count = 0;
5180 }
5181 
5182 static void
5183 test_concurrent_read_ana_log_page(void)
5184 {
5185 	struct spdk_nvme_transport_id trid = {};
5186 	struct spdk_nvme_ctrlr *ctrlr;
5187 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5188 	struct nvme_ctrlr *nvme_ctrlr;
5189 	const int STRING_SIZE = 32;
5190 	const char *attached_names[STRING_SIZE];
5191 	int rc;
5192 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5193 
5194 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5195 	bdev_opts.multipath = false;
5196 
5197 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5198 	ut_init_trid(&trid);
5199 
5200 	set_thread(0);
5201 
5202 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
5203 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5204 
5205 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5206 
5207 	g_ut_attach_ctrlr_status = 0;
5208 	g_ut_attach_bdev_count = 1;
5209 
5210 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
5211 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5212 	CU_ASSERT(rc == 0);
5213 
5214 	spdk_delay_us(1000);
5215 	poll_threads();
5216 
5217 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5218 	poll_threads();
5219 
5220 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5221 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5222 
5223 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5224 
5225 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5226 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5227 
5228 	/* Following read request should be rejected. */
5229 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5230 
5231 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5232 
5233 	set_thread(1);
5234 
5235 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5236 
5237 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5238 
5239 	/* Reset request while reading ANA log page should not be rejected. */
5240 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5241 	CU_ASSERT(rc == 0);
5242 
5243 	poll_threads();
5244 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5245 	poll_threads();
5246 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5247 	poll_threads();
5248 
5249 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5250 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5251 
5252 	/* Read ANA log page while resetting ctrlr should be rejected. */
5253 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5254 	CU_ASSERT(rc == 0);
5255 
5256 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
5257 
5258 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5259 
5260 	poll_threads();
5261 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5262 	poll_threads();
5263 
5264 	set_thread(0);
5265 
5266 	/* It is possible that target sent ANA change for inactive namespaces.
5267 	 *
5268 	 * Previously, assert() was added because this case was unlikely.
5269 	 * However, assert() was hit in real environment.
5270 
5271 	 * Hence, remove assert() and add unit test case.
5272 	 *
5273 	 * Simulate this case by depopulating namespaces and then parsing ANA
5274 	 * log page created when all namespaces are active.
5275 	 * Then, check if parsing ANA log page completes successfully.
5276 	 */
5277 	nvme_ctrlr_depopulate_namespaces(nvme_ctrlr);
5278 
5279 	rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr);
5280 	CU_ASSERT(rc == 0);
5281 
5282 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5283 	CU_ASSERT(rc == 0);
5284 
5285 	poll_threads();
5286 	spdk_delay_us(1000);
5287 	poll_threads();
5288 
5289 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5290 }
5291 
5292 static void
5293 test_retry_io_for_ana_error(void)
5294 {
5295 	struct nvme_path_id path = {};
5296 	struct spdk_nvme_ctrlr *ctrlr;
5297 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
5298 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5299 	struct nvme_ctrlr *nvme_ctrlr;
5300 	const int STRING_SIZE = 32;
5301 	const char *attached_names[STRING_SIZE];
5302 	struct nvme_bdev *bdev;
5303 	struct nvme_ns *nvme_ns;
5304 	struct spdk_bdev_io *bdev_io;
5305 	struct nvme_bdev_io *bio;
5306 	struct spdk_io_channel *ch;
5307 	struct nvme_bdev_channel *nbdev_ch;
5308 	struct nvme_io_path *io_path;
5309 	struct nvme_qpair *nvme_qpair;
5310 	struct ut_nvme_req *req;
5311 	uint64_t now;
5312 	int rc;
5313 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
5314 
5315 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
5316 	bdev_opts.multipath = false;
5317 
5318 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5319 	ut_init_trid(&path.trid);
5320 
5321 	g_opts.bdev_retry_count = 1;
5322 
5323 	set_thread(0);
5324 
5325 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
5326 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5327 
5328 	g_ut_attach_ctrlr_status = 0;
5329 	g_ut_attach_bdev_count = 1;
5330 
5331 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5332 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
5333 	CU_ASSERT(rc == 0);
5334 
5335 	spdk_delay_us(1000);
5336 	poll_threads();
5337 
5338 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5339 	poll_threads();
5340 
5341 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5342 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5343 
5344 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn);
5345 	CU_ASSERT(nvme_ctrlr != NULL);
5346 
5347 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5348 	CU_ASSERT(bdev != NULL);
5349 
5350 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5351 	CU_ASSERT(nvme_ns != NULL);
5352 
5353 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5354 	ut_bdev_io_set_buf(bdev_io);
5355 
5356 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
5357 
5358 	ch = spdk_get_io_channel(bdev);
5359 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5360 
5361 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5362 
5363 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5364 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5365 
5366 	nvme_qpair = io_path->qpair;
5367 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5368 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5369 
5370 	now = spdk_get_ticks();
5371 
5372 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5373 
5374 	/* If I/O got ANA error, it should be queued, the corresponding namespace
5375 	 * should be freezed and its ANA state should be updated.
5376 	 */
5377 	bdev_io->internal.f.in_submit_request = true;
5378 
5379 	bdev_nvme_submit_request(ch, bdev_io);
5380 
5381 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5382 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5383 
5384 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5385 	SPDK_CU_ASSERT_FATAL(req != NULL);
5386 
5387 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5388 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
5389 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
5390 
5391 	poll_thread_times(0, 1);
5392 
5393 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5394 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5395 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5396 	/* I/O should be retried immediately. */
5397 	CU_ASSERT(bio->retry_ticks == now);
5398 	CU_ASSERT(nvme_ns->ana_state_updating == true);
5399 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5400 
5401 	poll_threads();
5402 
5403 	/* Namespace is inaccessible, and hence I/O should be queued again. */
5404 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5405 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
5406 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5407 	/* I/O should be retried after a second if no I/O path was found but
5408 	 * any I/O path may become available.
5409 	 */
5410 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
5411 
5412 	/* Namespace should be unfreezed after completing to update its ANA state. */
5413 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5414 	poll_threads();
5415 
5416 	CU_ASSERT(nvme_ns->ana_state_updating == false);
5417 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5418 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5419 
5420 	/* Retry the queued I/O should succeed. */
5421 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
5422 	poll_threads();
5423 
5424 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5425 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
5426 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5427 
5428 	free(bdev_io);
5429 
5430 	spdk_put_io_channel(ch);
5431 
5432 	poll_threads();
5433 
5434 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5435 	CU_ASSERT(rc == 0);
5436 
5437 	poll_threads();
5438 	spdk_delay_us(1000);
5439 	poll_threads();
5440 
5441 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5442 
5443 	g_opts.bdev_retry_count = 0;
5444 }
5445 
5446 static void
5447 test_check_io_error_resiliency_params(void)
5448 {
5449 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5450 	 * 3rd parameter is fast_io_fail_timeout_sec.
5451 	 */
5452 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5453 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5454 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5455 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5456 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5457 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5458 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5459 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5460 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5461 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5462 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5463 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5464 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5465 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5466 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5467 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5468 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5469 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5470 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5471 }
5472 
5473 static void
5474 test_retry_io_if_ctrlr_is_resetting(void)
5475 {
5476 	struct nvme_path_id path = {};
5477 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5478 	struct spdk_nvme_ctrlr *ctrlr;
5479 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5480 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5481 	struct nvme_ctrlr *nvme_ctrlr;
5482 	const int STRING_SIZE = 32;
5483 	const char *attached_names[STRING_SIZE];
5484 	struct nvme_bdev *bdev;
5485 	struct nvme_ns *nvme_ns;
5486 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5487 	struct spdk_io_channel *ch;
5488 	struct nvme_bdev_channel *nbdev_ch;
5489 	struct nvme_io_path *io_path;
5490 	struct nvme_qpair *nvme_qpair;
5491 	int rc;
5492 
5493 	g_opts.bdev_retry_count = 1;
5494 
5495 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5496 	ut_init_trid(&path.trid);
5497 
5498 	set_thread(0);
5499 
5500 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5501 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5502 
5503 	g_ut_attach_ctrlr_status = 0;
5504 	g_ut_attach_bdev_count = 1;
5505 
5506 	opts.ctrlr_loss_timeout_sec = -1;
5507 	opts.reconnect_delay_sec = 1;
5508 	opts.multipath = false;
5509 
5510 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5511 				   attach_ctrlr_done, NULL, &dopts, &opts);
5512 	CU_ASSERT(rc == 0);
5513 
5514 	spdk_delay_us(1000);
5515 	poll_threads();
5516 
5517 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5518 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5519 
5520 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5521 	CU_ASSERT(nvme_ctrlr != NULL);
5522 
5523 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5524 	CU_ASSERT(bdev != NULL);
5525 
5526 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5527 	CU_ASSERT(nvme_ns != NULL);
5528 
5529 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5530 	ut_bdev_io_set_buf(bdev_io1);
5531 
5532 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5533 	ut_bdev_io_set_buf(bdev_io2);
5534 
5535 	ch = spdk_get_io_channel(bdev);
5536 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5537 
5538 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5539 
5540 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5541 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5542 
5543 	nvme_qpair = io_path->qpair;
5544 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5545 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5546 
5547 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5548 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5549 
5550 	/* If qpair is connected, I/O should succeed. */
5551 	bdev_io1->internal.f.in_submit_request = true;
5552 
5553 	bdev_nvme_submit_request(ch, bdev_io1);
5554 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5555 
5556 	poll_threads();
5557 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5558 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5559 
5560 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5561 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5562 	 * while resetting the nvme_ctrlr.
5563 	 */
5564 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5565 	ctrlr->is_failed = true;
5566 
5567 	poll_thread_times(0, 5);
5568 
5569 	CU_ASSERT(nvme_qpair->qpair == NULL);
5570 	CU_ASSERT(nvme_ctrlr->resetting == true);
5571 	CU_ASSERT(ctrlr->is_failed == false);
5572 
5573 	bdev_io1->internal.f.in_submit_request = true;
5574 
5575 	bdev_nvme_submit_request(ch, bdev_io1);
5576 
5577 	spdk_delay_us(1);
5578 
5579 	bdev_io2->internal.f.in_submit_request = true;
5580 
5581 	bdev_nvme_submit_request(ch, bdev_io2);
5582 
5583 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5584 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5585 	CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5586 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(
5587 			  TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx,
5588 				     retry_link)));
5589 
5590 	poll_threads();
5591 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5592 	poll_threads();
5593 
5594 	CU_ASSERT(nvme_qpair->qpair != NULL);
5595 	CU_ASSERT(nvme_ctrlr->resetting == false);
5596 
5597 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5598 
5599 	poll_thread_times(0, 1);
5600 
5601 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5602 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == true);
5603 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5604 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5605 
5606 	poll_threads();
5607 
5608 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5609 	CU_ASSERT(bdev_io1->internal.f.in_submit_request == false);
5610 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5611 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5612 	CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
5613 
5614 	spdk_delay_us(1);
5615 
5616 	poll_thread_times(0, 1);
5617 
5618 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5619 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == true);
5620 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5621 
5622 	poll_threads();
5623 
5624 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5625 	CU_ASSERT(bdev_io2->internal.f.in_submit_request == false);
5626 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5627 
5628 	free(bdev_io1);
5629 	free(bdev_io2);
5630 
5631 	spdk_put_io_channel(ch);
5632 
5633 	poll_threads();
5634 
5635 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5636 	CU_ASSERT(rc == 0);
5637 
5638 	poll_threads();
5639 	spdk_delay_us(1000);
5640 	poll_threads();
5641 
5642 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5643 
5644 	g_opts.bdev_retry_count = 0;
5645 }
5646 
5647 static void
5648 test_reconnect_ctrlr(void)
5649 {
5650 	struct spdk_nvme_transport_id trid = {};
5651 	struct spdk_nvme_ctrlr ctrlr = {};
5652 	struct nvme_ctrlr *nvme_ctrlr;
5653 	struct spdk_io_channel *ch1, *ch2;
5654 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5655 	int rc;
5656 
5657 	ut_init_trid(&trid);
5658 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5659 
5660 	set_thread(0);
5661 
5662 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5663 	CU_ASSERT(rc == 0);
5664 
5665 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5666 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5667 
5668 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5669 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5670 
5671 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5672 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5673 
5674 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5675 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5676 
5677 	set_thread(1);
5678 
5679 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5680 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5681 
5682 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5683 
5684 	/* Reset starts from thread 1. */
5685 	set_thread(1);
5686 
5687 	/* The reset should fail and a reconnect timer should be registered. */
5688 	ctrlr.fail_reset = true;
5689 	ctrlr.is_failed = true;
5690 
5691 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5692 	CU_ASSERT(rc == 0);
5693 	CU_ASSERT(nvme_ctrlr->resetting == true);
5694 	CU_ASSERT(ctrlr.is_failed == true);
5695 
5696 	poll_threads();
5697 
5698 	CU_ASSERT(nvme_ctrlr->resetting == false);
5699 	CU_ASSERT(ctrlr.is_failed == false);
5700 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5701 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5702 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5703 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5704 
5705 	/* A new reset starts from thread 0. */
5706 	set_thread(1);
5707 
5708 	/* The reset should cancel the reconnect timer and should start from reconnection.
5709 	 * Then, the reset should fail and a reconnect timer should be registered again.
5710 	 */
5711 	ctrlr.fail_reset = true;
5712 	ctrlr.is_failed = true;
5713 
5714 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5715 	CU_ASSERT(rc == 0);
5716 	CU_ASSERT(nvme_ctrlr->resetting == true);
5717 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5718 	CU_ASSERT(ctrlr.is_failed == true);
5719 
5720 	poll_threads();
5721 
5722 	CU_ASSERT(nvme_ctrlr->resetting == false);
5723 	CU_ASSERT(ctrlr.is_failed == false);
5724 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5725 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5726 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5727 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5728 
5729 	/* Then a reconnect retry should suceeed. */
5730 	ctrlr.fail_reset = false;
5731 
5732 	spdk_delay_us(SPDK_SEC_TO_USEC);
5733 	poll_thread_times(0, 1);
5734 
5735 	CU_ASSERT(nvme_ctrlr->resetting == true);
5736 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5737 
5738 	poll_threads();
5739 
5740 	CU_ASSERT(nvme_ctrlr->resetting == false);
5741 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5742 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5743 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5744 
5745 	/* The reset should fail and a reconnect timer should be registered. */
5746 	ctrlr.fail_reset = true;
5747 	ctrlr.is_failed = true;
5748 
5749 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5750 	CU_ASSERT(rc == 0);
5751 	CU_ASSERT(nvme_ctrlr->resetting == true);
5752 	CU_ASSERT(ctrlr.is_failed == true);
5753 
5754 	poll_threads();
5755 
5756 	CU_ASSERT(nvme_ctrlr->resetting == false);
5757 	CU_ASSERT(ctrlr.is_failed == false);
5758 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5759 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5760 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5761 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5762 
5763 	/* Then a reconnect retry should still fail. */
5764 	spdk_delay_us(SPDK_SEC_TO_USEC);
5765 	poll_thread_times(0, 1);
5766 
5767 	CU_ASSERT(nvme_ctrlr->resetting == true);
5768 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5769 
5770 	poll_threads();
5771 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5772 	poll_threads();
5773 
5774 	CU_ASSERT(nvme_ctrlr->resetting == false);
5775 	CU_ASSERT(ctrlr.is_failed == false);
5776 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5777 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5778 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5779 
5780 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5781 	spdk_delay_us(SPDK_SEC_TO_USEC);
5782 	poll_threads();
5783 
5784 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5785 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5786 	CU_ASSERT(nvme_ctrlr->destruct == true);
5787 
5788 	spdk_put_io_channel(ch2);
5789 
5790 	set_thread(0);
5791 
5792 	spdk_put_io_channel(ch1);
5793 
5794 	poll_threads();
5795 	spdk_delay_us(1000);
5796 	poll_threads();
5797 
5798 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5799 }
5800 
5801 static struct nvme_path_id *
5802 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5803 		       const struct spdk_nvme_transport_id *trid)
5804 {
5805 	struct nvme_path_id *p;
5806 
5807 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5808 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5809 			break;
5810 		}
5811 	}
5812 
5813 	return p;
5814 }
5815 
5816 static void
5817 test_retry_failover_ctrlr(void)
5818 {
5819 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5820 	struct spdk_nvme_ctrlr ctrlr = {};
5821 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5822 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5823 	struct spdk_io_channel *ch;
5824 	struct nvme_ctrlr_channel *ctrlr_ch;
5825 	int rc;
5826 
5827 	ut_init_trid(&trid1);
5828 	ut_init_trid2(&trid2);
5829 	ut_init_trid3(&trid3);
5830 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5831 
5832 	set_thread(0);
5833 
5834 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5835 	CU_ASSERT(rc == 0);
5836 
5837 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5838 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5839 
5840 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5841 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5842 
5843 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5844 	CU_ASSERT(rc == 0);
5845 
5846 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5847 	CU_ASSERT(rc == 0);
5848 
5849 	ch = spdk_get_io_channel(nvme_ctrlr);
5850 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5851 
5852 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5853 
5854 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5855 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5856 	CU_ASSERT(path_id1->last_failed_tsc == 0);
5857 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5858 
5859 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5860 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5861 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5862 
5863 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5864 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5865 
5866 	/* It is expected that connecting both of trid1, trid2, and trid3 fail,
5867 	 * and a reconnect timer is started. */
5868 	ctrlr.fail_reset = true;
5869 	ctrlr.is_failed = true;
5870 
5871 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5872 	CU_ASSERT(rc == 0);
5873 
5874 	poll_threads();
5875 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5876 	poll_threads();
5877 
5878 	CU_ASSERT(nvme_ctrlr->resetting == false);
5879 	CU_ASSERT(ctrlr.is_failed == false);
5880 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5881 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5882 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5883 	CU_ASSERT(path_id1->last_failed_tsc != 0);
5884 
5885 	CU_ASSERT(path_id2->last_failed_tsc != 0);
5886 	CU_ASSERT(path_id3->last_failed_tsc != 0);
5887 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5888 
5889 	/* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is
5890 	 * switched to trid2 but reset is not started.
5891 	 */
5892 	rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true);
5893 	CU_ASSERT(rc == -EALREADY);
5894 
5895 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL);
5896 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5897 
5898 	CU_ASSERT(nvme_ctrlr->resetting == false);
5899 
5900 	/* If reconnect succeeds, trid2 should be the active path_id */
5901 	ctrlr.fail_reset = false;
5902 
5903 	spdk_delay_us(SPDK_SEC_TO_USEC);
5904 	poll_thread_times(0, 1);
5905 
5906 	CU_ASSERT(nvme_ctrlr->resetting == true);
5907 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5908 
5909 	poll_threads();
5910 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5911 	poll_threads();
5912 
5913 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL);
5914 	CU_ASSERT(path_id2->last_failed_tsc == 0);
5915 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5916 	CU_ASSERT(nvme_ctrlr->resetting == false);
5917 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5918 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5919 
5920 	spdk_put_io_channel(ch);
5921 
5922 	poll_threads();
5923 
5924 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5925 	CU_ASSERT(rc == 0);
5926 
5927 	poll_threads();
5928 	spdk_delay_us(1000);
5929 	poll_threads();
5930 
5931 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5932 }
5933 
5934 static void
5935 test_fail_path(void)
5936 {
5937 	struct nvme_path_id path = {};
5938 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
5939 	struct spdk_nvme_ctrlr *ctrlr;
5940 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
5941 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5942 	struct nvme_ctrlr *nvme_ctrlr;
5943 	const int STRING_SIZE = 32;
5944 	const char *attached_names[STRING_SIZE];
5945 	struct nvme_bdev *bdev;
5946 	struct nvme_ns *nvme_ns;
5947 	struct spdk_bdev_io *bdev_io;
5948 	struct spdk_io_channel *ch;
5949 	struct nvme_bdev_channel *nbdev_ch;
5950 	struct nvme_io_path *io_path;
5951 	struct nvme_ctrlr_channel *ctrlr_ch;
5952 	int rc;
5953 
5954 	/* The test scenario is the following.
5955 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5956 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5957 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5958 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5959 	 *   comes first. The queued I/O is failed.
5960 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5961 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5962 	 */
5963 
5964 	g_opts.bdev_retry_count = 1;
5965 
5966 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5967 	ut_init_trid(&path.trid);
5968 
5969 	set_thread(0);
5970 
5971 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5972 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5973 
5974 	g_ut_attach_ctrlr_status = 0;
5975 	g_ut_attach_bdev_count = 1;
5976 
5977 	opts.ctrlr_loss_timeout_sec = 4;
5978 	opts.reconnect_delay_sec = 1;
5979 	opts.fast_io_fail_timeout_sec = 2;
5980 	opts.multipath = false;
5981 
5982 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5983 				   attach_ctrlr_done, NULL, &dopts, &opts);
5984 	CU_ASSERT(rc == 0);
5985 
5986 	spdk_delay_us(1000);
5987 	poll_threads();
5988 
5989 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5990 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5991 
5992 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
5993 	CU_ASSERT(nvme_ctrlr != NULL);
5994 
5995 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5996 	CU_ASSERT(bdev != NULL);
5997 
5998 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5999 	CU_ASSERT(nvme_ns != NULL);
6000 
6001 	ch = spdk_get_io_channel(bdev);
6002 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6003 
6004 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6005 
6006 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
6007 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6008 
6009 	ctrlr_ch = io_path->qpair->ctrlr_ch;
6010 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
6011 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
6012 
6013 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6014 	ut_bdev_io_set_buf(bdev_io);
6015 
6016 
6017 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
6018 	ctrlr->fail_reset = true;
6019 	ctrlr->is_failed = true;
6020 
6021 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
6022 	CU_ASSERT(rc == 0);
6023 	CU_ASSERT(nvme_ctrlr->resetting == true);
6024 	CU_ASSERT(ctrlr->is_failed == true);
6025 
6026 	poll_threads();
6027 
6028 	CU_ASSERT(nvme_ctrlr->resetting == false);
6029 	CU_ASSERT(ctrlr->is_failed == false);
6030 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6031 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6032 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
6033 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6034 
6035 	/* I/O should be queued. */
6036 	bdev_io->internal.f.in_submit_request = true;
6037 
6038 	bdev_nvme_submit_request(ch, bdev_io);
6039 
6040 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6041 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6042 
6043 	/* After a second, the I/O should be still queued and the ctrlr should be
6044 	 * still recovering.
6045 	 */
6046 	spdk_delay_us(SPDK_SEC_TO_USEC);
6047 	poll_threads();
6048 
6049 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6050 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6051 
6052 	CU_ASSERT(nvme_ctrlr->resetting == false);
6053 	CU_ASSERT(ctrlr->is_failed == false);
6054 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6055 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6056 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6057 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
6058 
6059 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6060 
6061 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
6062 	spdk_delay_us(SPDK_SEC_TO_USEC);
6063 	poll_threads();
6064 
6065 	CU_ASSERT(nvme_ctrlr->resetting == false);
6066 	CU_ASSERT(ctrlr->is_failed == false);
6067 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
6068 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
6069 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
6070 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
6071 
6072 	/* Then within a second, pending I/O should be failed. */
6073 	spdk_delay_us(SPDK_SEC_TO_USEC);
6074 	poll_threads();
6075 
6076 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6077 	poll_threads();
6078 
6079 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6080 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6081 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
6082 
6083 	/* Another I/O submission should be failed immediately. */
6084 	bdev_io->internal.f.in_submit_request = true;
6085 
6086 	bdev_nvme_submit_request(ch, bdev_io);
6087 
6088 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6089 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
6090 
6091 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
6092 	 * be deleted.
6093 	 */
6094 	spdk_delay_us(SPDK_SEC_TO_USEC);
6095 	poll_threads();
6096 
6097 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6098 	poll_threads();
6099 
6100 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
6101 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
6102 	CU_ASSERT(nvme_ctrlr->destruct == true);
6103 
6104 	spdk_put_io_channel(ch);
6105 
6106 	poll_threads();
6107 	spdk_delay_us(1000);
6108 	poll_threads();
6109 
6110 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6111 
6112 	free(bdev_io);
6113 
6114 	g_opts.bdev_retry_count = 0;
6115 }
6116 
6117 static void
6118 test_nvme_ns_cmp(void)
6119 {
6120 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
6121 
6122 	nvme_ns1.id = 0;
6123 	nvme_ns2.id = UINT32_MAX;
6124 
6125 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
6126 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
6127 }
6128 
6129 static void
6130 test_ana_transition(void)
6131 {
6132 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
6133 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
6134 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
6135 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
6136 
6137 	/* case 1: ANA transition timedout is canceled. */
6138 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6139 	nvme_ns.ana_transition_timedout = true;
6140 
6141 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6142 
6143 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6144 
6145 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
6146 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6147 
6148 	/* case 2: ANATT timer is kept. */
6149 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6150 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
6151 			      &nvme_ns,
6152 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6153 
6154 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6155 
6156 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6157 
6158 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6159 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
6160 
6161 	/* case 3: ANATT timer is stopped. */
6162 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6163 
6164 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6165 
6166 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6167 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
6168 
6169 	/* ANATT timer is started. */
6170 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
6171 
6172 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
6173 
6174 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
6175 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
6176 
6177 	/* ANATT timer is expired. */
6178 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
6179 
6180 	poll_threads();
6181 
6182 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
6183 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
6184 }
6185 
6186 static void
6187 _set_preferred_path_cb(void *cb_arg, int rc)
6188 {
6189 	bool *done = cb_arg;
6190 
6191 	*done = true;
6192 }
6193 
6194 static void
6195 test_set_preferred_path(void)
6196 {
6197 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
6198 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
6199 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6200 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6201 	const int STRING_SIZE = 32;
6202 	const char *attached_names[STRING_SIZE];
6203 	struct nvme_bdev *bdev;
6204 	struct spdk_io_channel *ch;
6205 	struct nvme_bdev_channel *nbdev_ch;
6206 	struct nvme_io_path *io_path;
6207 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6208 	const struct spdk_nvme_ctrlr_data *cdata;
6209 	bool done;
6210 	int rc;
6211 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6212 
6213 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6214 	bdev_opts.multipath = true;
6215 
6216 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6217 	ut_init_trid(&path1.trid);
6218 	ut_init_trid2(&path2.trid);
6219 	ut_init_trid3(&path3.trid);
6220 	g_ut_attach_ctrlr_status = 0;
6221 	g_ut_attach_bdev_count = 1;
6222 
6223 	set_thread(0);
6224 
6225 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6226 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6227 
6228 	ctrlr1->ns[0].uuid = &uuid1;
6229 
6230 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6231 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6232 	CU_ASSERT(rc == 0);
6233 
6234 	spdk_delay_us(1000);
6235 	poll_threads();
6236 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6237 	poll_threads();
6238 
6239 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6240 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6241 
6242 	ctrlr2->ns[0].uuid = &uuid1;
6243 
6244 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6245 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6246 	CU_ASSERT(rc == 0);
6247 
6248 	spdk_delay_us(1000);
6249 	poll_threads();
6250 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6251 	poll_threads();
6252 
6253 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
6254 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
6255 
6256 	ctrlr3->ns[0].uuid = &uuid1;
6257 
6258 	rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
6259 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6260 	CU_ASSERT(rc == 0);
6261 
6262 	spdk_delay_us(1000);
6263 	poll_threads();
6264 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6265 	poll_threads();
6266 
6267 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6268 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6269 
6270 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6271 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6272 
6273 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6274 
6275 	ch = spdk_get_io_channel(bdev);
6276 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6277 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6278 
6279 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6280 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6281 
6282 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6283 
6284 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
6285 	 * should return io_path to ctrlr2.
6286 	 */
6287 
6288 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
6289 	done = false;
6290 
6291 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6292 
6293 	poll_threads();
6294 	CU_ASSERT(done == true);
6295 
6296 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6297 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6298 
6299 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6300 
6301 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
6302 	 * acquired, find_io_path() should return io_path to ctrlr3.
6303 	 */
6304 
6305 	spdk_put_io_channel(ch);
6306 
6307 	poll_threads();
6308 
6309 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
6310 	done = false;
6311 
6312 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6313 
6314 	poll_threads();
6315 	CU_ASSERT(done == true);
6316 
6317 	ch = spdk_get_io_channel(bdev);
6318 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6319 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6320 
6321 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6322 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6323 
6324 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
6325 
6326 	spdk_put_io_channel(ch);
6327 
6328 	poll_threads();
6329 
6330 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6331 	CU_ASSERT(rc == 0);
6332 
6333 	poll_threads();
6334 	spdk_delay_us(1000);
6335 	poll_threads();
6336 
6337 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6338 }
6339 
6340 static void
6341 test_find_next_io_path(void)
6342 {
6343 	struct nvme_bdev_channel nbdev_ch = {
6344 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6345 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6346 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
6347 	};
6348 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6349 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6350 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6351 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6352 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6353 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6354 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6355 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6356 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6357 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6358 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6359 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6360 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6361 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6362 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6363 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6364 
6365 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6366 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6367 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6368 
6369 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
6370 	 * is covered in test_find_io_path.
6371 	 */
6372 
6373 	nbdev_ch.current_io_path = &io_path2;
6374 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6375 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6376 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6377 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6378 
6379 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6380 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6381 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6382 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6383 
6384 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6385 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6386 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6387 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6388 
6389 	nbdev_ch.current_io_path = &io_path3;
6390 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6391 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6392 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6393 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6394 
6395 	/* Test if next io_path is selected according to rr_min_io */
6396 
6397 	nbdev_ch.current_io_path = NULL;
6398 	nbdev_ch.rr_min_io = 2;
6399 	nbdev_ch.rr_counter = 0;
6400 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6401 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6402 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6403 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6404 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6405 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6406 
6407 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6408 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6409 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6410 }
6411 
6412 static void
6413 test_find_io_path_min_qd(void)
6414 {
6415 	struct nvme_bdev_channel nbdev_ch = {
6416 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6417 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6418 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
6419 	};
6420 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6421 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6422 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
6423 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6424 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6425 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6426 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6427 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6428 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6429 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6430 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6431 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6432 	struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, };
6433 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6434 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6435 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6436 
6437 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6438 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6439 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6440 
6441 	/* Test if the minimum io_outstanding or the ANA optimized state is
6442 	 * prioritized when using least queue depth selector
6443 	 */
6444 	qpair1.num_outstanding_reqs = 2;
6445 	qpair2.num_outstanding_reqs = 1;
6446 	qpair3.num_outstanding_reqs = 0;
6447 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6448 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6449 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6450 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6451 
6452 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6453 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6454 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6455 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6456 
6457 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6458 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6459 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6460 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6461 
6462 	qpair2.num_outstanding_reqs = 4;
6463 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6464 }
6465 
6466 static void
6467 test_disable_auto_failback(void)
6468 {
6469 	struct nvme_path_id path1 = {}, path2 = {};
6470 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6471 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6472 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6473 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6474 	struct nvme_ctrlr *nvme_ctrlr1;
6475 	const int STRING_SIZE = 32;
6476 	const char *attached_names[STRING_SIZE];
6477 	struct nvme_bdev *bdev;
6478 	struct spdk_io_channel *ch;
6479 	struct nvme_bdev_channel *nbdev_ch;
6480 	struct nvme_io_path *io_path;
6481 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6482 	const struct spdk_nvme_ctrlr_data *cdata;
6483 	bool done;
6484 	int rc;
6485 
6486 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6487 	ut_init_trid(&path1.trid);
6488 	ut_init_trid2(&path2.trid);
6489 	g_ut_attach_ctrlr_status = 0;
6490 	g_ut_attach_bdev_count = 1;
6491 
6492 	g_opts.disable_auto_failback = true;
6493 
6494 	opts.ctrlr_loss_timeout_sec = -1;
6495 	opts.reconnect_delay_sec = 1;
6496 	opts.multipath = true;
6497 
6498 	set_thread(0);
6499 
6500 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6501 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6502 
6503 	ctrlr1->ns[0].uuid = &uuid1;
6504 
6505 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6506 				   attach_ctrlr_done, NULL, &dopts, &opts);
6507 	CU_ASSERT(rc == 0);
6508 
6509 	spdk_delay_us(1000);
6510 	poll_threads();
6511 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6512 	poll_threads();
6513 
6514 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6515 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6516 
6517 	ctrlr2->ns[0].uuid = &uuid1;
6518 
6519 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6520 				   attach_ctrlr_done, NULL, &dopts, &opts);
6521 	CU_ASSERT(rc == 0);
6522 
6523 	spdk_delay_us(1000);
6524 	poll_threads();
6525 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6526 	poll_threads();
6527 
6528 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6529 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6530 
6531 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6532 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6533 
6534 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn);
6535 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6536 
6537 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6538 
6539 	ch = spdk_get_io_channel(bdev);
6540 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6541 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6542 
6543 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6544 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6545 
6546 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6547 
6548 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6549 	ctrlr1->fail_reset = true;
6550 	ctrlr1->is_failed = true;
6551 
6552 	bdev_nvme_reset_ctrlr(nvme_ctrlr1);
6553 
6554 	poll_threads();
6555 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6556 	poll_threads();
6557 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6558 	poll_threads();
6559 
6560 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6561 
6562 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6563 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6564 
6565 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6566 
6567 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6568 	 * Hence, io_path to ctrlr2 should still be used.
6569 	 */
6570 	ctrlr1->fail_reset = false;
6571 
6572 	spdk_delay_us(SPDK_SEC_TO_USEC);
6573 	poll_threads();
6574 
6575 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6576 
6577 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6578 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6579 
6580 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6581 
6582 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6583 	 * be used again.
6584 	 */
6585 
6586 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6587 	done = false;
6588 
6589 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6590 
6591 	poll_threads();
6592 	CU_ASSERT(done == true);
6593 
6594 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6595 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6596 
6597 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6598 
6599 	spdk_put_io_channel(ch);
6600 
6601 	poll_threads();
6602 
6603 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6604 	CU_ASSERT(rc == 0);
6605 
6606 	poll_threads();
6607 	spdk_delay_us(1000);
6608 	poll_threads();
6609 
6610 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6611 
6612 	g_opts.disable_auto_failback = false;
6613 }
6614 
6615 static void
6616 ut_set_multipath_policy_done(void *cb_arg, int rc)
6617 {
6618 	int *done = cb_arg;
6619 
6620 	SPDK_CU_ASSERT_FATAL(done != NULL);
6621 	*done = rc;
6622 }
6623 
6624 static void
6625 test_set_multipath_policy(void)
6626 {
6627 	struct nvme_path_id path1 = {}, path2 = {};
6628 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
6629 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6630 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
6631 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6632 	const int STRING_SIZE = 32;
6633 	const char *attached_names[STRING_SIZE];
6634 	struct nvme_bdev *bdev;
6635 	struct spdk_io_channel *ch;
6636 	struct nvme_bdev_channel *nbdev_ch;
6637 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6638 	int done;
6639 	int rc;
6640 
6641 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6642 	ut_init_trid(&path1.trid);
6643 	ut_init_trid2(&path2.trid);
6644 	g_ut_attach_ctrlr_status = 0;
6645 	g_ut_attach_bdev_count = 1;
6646 
6647 	g_opts.disable_auto_failback = true;
6648 
6649 	opts.ctrlr_loss_timeout_sec = -1;
6650 	opts.reconnect_delay_sec = 1;
6651 	opts.multipath = true;
6652 
6653 	set_thread(0);
6654 
6655 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6656 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6657 
6658 	ctrlr1->ns[0].uuid = &uuid1;
6659 
6660 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6661 				   attach_ctrlr_done, NULL, &dopts, &opts);
6662 	CU_ASSERT(rc == 0);
6663 
6664 	spdk_delay_us(1000);
6665 	poll_threads();
6666 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6667 	poll_threads();
6668 
6669 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6670 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6671 
6672 	ctrlr2->ns[0].uuid = &uuid1;
6673 
6674 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6675 				   attach_ctrlr_done, NULL, &dopts, &opts);
6676 	CU_ASSERT(rc == 0);
6677 
6678 	spdk_delay_us(1000);
6679 	poll_threads();
6680 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6681 	poll_threads();
6682 
6683 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6684 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6685 
6686 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6687 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6688 
6689 	/* If multipath policy is updated before getting any I/O channel,
6690 	 * an new I/O channel should have the update.
6691 	 */
6692 	done = -1;
6693 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6694 					    BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6695 					    ut_set_multipath_policy_done, &done);
6696 	poll_threads();
6697 	CU_ASSERT(done == 0);
6698 
6699 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6700 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6701 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6702 
6703 	ch = spdk_get_io_channel(bdev);
6704 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6705 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6706 
6707 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6708 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6709 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6710 
6711 	/* If multipath policy is updated while a I/O channel is active,
6712 	 * the update should be applied to the I/O channel immediately.
6713 	 */
6714 	done = -1;
6715 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6716 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6717 					    ut_set_multipath_policy_done, &done);
6718 	poll_threads();
6719 	CU_ASSERT(done == 0);
6720 
6721 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6722 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6723 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6724 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6725 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6726 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6727 
6728 	spdk_put_io_channel(ch);
6729 
6730 	poll_threads();
6731 
6732 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6733 	CU_ASSERT(rc == 0);
6734 
6735 	poll_threads();
6736 	spdk_delay_us(1000);
6737 	poll_threads();
6738 
6739 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6740 }
6741 
6742 static void
6743 test_uuid_generation(void)
6744 {
6745 	uint32_t nsid1 = 1, nsid2 = 2;
6746 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6747 	char sn3[21] = "                    ";
6748 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6749 	struct spdk_uuid uuid1, uuid2;
6750 	int rc;
6751 
6752 	/* Test case 1:
6753 	 * Serial numbers are the same, nsids are different.
6754 	 * Compare two generated UUID - they should be different. */
6755 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6756 	CU_ASSERT(rc == 0);
6757 	rc = nvme_generate_uuid(sn1, nsid2, &uuid2);
6758 	CU_ASSERT(rc == 0);
6759 
6760 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6761 
6762 	/* Test case 2:
6763 	 * Serial numbers differ only by one character, nsids are the same.
6764 	 * Compare two generated UUID - they should be different. */
6765 	rc = nvme_generate_uuid(sn1, nsid1, &uuid1);
6766 	CU_ASSERT(rc == 0);
6767 	rc = nvme_generate_uuid(sn2, nsid1, &uuid2);
6768 	CU_ASSERT(rc == 0);
6769 
6770 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6771 
6772 	/* Test case 3:
6773 	 * Serial number comprises only of space characters.
6774 	 * Validate the generated UUID. */
6775 	rc = nvme_generate_uuid(sn3, nsid1, &uuid1);
6776 	CU_ASSERT(rc == 0);
6777 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6778 
6779 }
6780 
6781 static void
6782 test_retry_io_to_same_path(void)
6783 {
6784 	struct nvme_path_id path1 = {}, path2 = {};
6785 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6786 	struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN};
6787 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6788 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6789 	const int STRING_SIZE = 32;
6790 	const char *attached_names[STRING_SIZE];
6791 	struct nvme_bdev *bdev;
6792 	struct spdk_bdev_io *bdev_io;
6793 	struct nvme_bdev_io *bio;
6794 	struct spdk_io_channel *ch;
6795 	struct nvme_bdev_channel *nbdev_ch;
6796 	struct nvme_io_path *io_path1, *io_path2;
6797 	struct ut_nvme_req *req;
6798 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6799 	int done;
6800 	int rc;
6801 	struct spdk_bdev_nvme_ctrlr_opts bdev_opts = {0};
6802 
6803 	spdk_bdev_nvme_get_default_ctrlr_opts(&bdev_opts);
6804 	bdev_opts.multipath = true;
6805 
6806 	g_opts.nvme_ioq_poll_period_us = 1;
6807 
6808 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6809 	ut_init_trid(&path1.trid);
6810 	ut_init_trid2(&path2.trid);
6811 	g_ut_attach_ctrlr_status = 0;
6812 	g_ut_attach_bdev_count = 1;
6813 
6814 	set_thread(0);
6815 
6816 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6817 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6818 
6819 	ctrlr1->ns[0].uuid = &uuid1;
6820 
6821 	rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6822 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6823 	CU_ASSERT(rc == 0);
6824 
6825 	spdk_delay_us(1000);
6826 	poll_threads();
6827 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6828 	poll_threads();
6829 
6830 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6831 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6832 
6833 	ctrlr2->ns[0].uuid = &uuid1;
6834 
6835 	rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6836 				   attach_ctrlr_done, NULL, &opts, &bdev_opts);
6837 	CU_ASSERT(rc == 0);
6838 
6839 	spdk_delay_us(1000);
6840 	poll_threads();
6841 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6842 	poll_threads();
6843 
6844 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6845 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6846 
6847 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn);
6848 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6849 
6850 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn);
6851 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6852 
6853 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6854 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6855 
6856 	done = -1;
6857 	spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6858 					    BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6859 	poll_threads();
6860 	CU_ASSERT(done == 0);
6861 
6862 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6863 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6864 	CU_ASSERT(bdev->rr_min_io == 1);
6865 
6866 	ch = spdk_get_io_channel(bdev);
6867 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6868 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6869 
6870 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6871 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6872 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6873 
6874 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6875 	ut_bdev_io_set_buf(bdev_io);
6876 
6877 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6878 
6879 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6880 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6881 
6882 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6883 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6884 
6885 	/* The 1st I/O should be submitted to io_path1. */
6886 	bdev_io->internal.f.in_submit_request = true;
6887 
6888 	bdev_nvme_submit_request(ch, bdev_io);
6889 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6890 	CU_ASSERT(bio->io_path == io_path1);
6891 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6892 
6893 	spdk_delay_us(1);
6894 
6895 	poll_threads();
6896 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6897 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
6898 
6899 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6900 	 * policy is round-robin.
6901 	 */
6902 	bdev_io->internal.f.in_submit_request = true;
6903 
6904 	bdev_nvme_submit_request(ch, bdev_io);
6905 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6906 	CU_ASSERT(bio->io_path == io_path2);
6907 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6908 
6909 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6910 	SPDK_CU_ASSERT_FATAL(req != NULL);
6911 
6912 	/* Set retry count to non-zero. */
6913 	g_opts.bdev_retry_count = 2;
6914 
6915 	/* Inject an I/O error. */
6916 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6917 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6918 
6919 	/* The 2nd I/O should be queued to nbdev_ch. */
6920 	spdk_delay_us(1);
6921 	poll_thread_times(0, 1);
6922 
6923 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6924 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6925 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6926 
6927 	/* The 2nd I/O should keep caching io_path2. */
6928 	CU_ASSERT(bio->io_path == io_path2);
6929 
6930 	/* The 2nd I/O should be submitted to io_path2 again. */
6931 	poll_thread_times(0, 1);
6932 
6933 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6934 	CU_ASSERT(bio->io_path == io_path2);
6935 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6936 
6937 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6938 	SPDK_CU_ASSERT_FATAL(req != NULL);
6939 
6940 	/* Inject an I/O error again. */
6941 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6942 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6943 	req->cpl.status.crd = 1;
6944 
6945 	ctrlr2->cdata.crdt[1] = 1;
6946 
6947 	/* The 2nd I/O should be queued to nbdev_ch. */
6948 	spdk_delay_us(1);
6949 	poll_thread_times(0, 1);
6950 
6951 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6952 	CU_ASSERT(bdev_io->internal.f.in_submit_request == true);
6953 	CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list)));
6954 
6955 	/* The 2nd I/O should keep caching io_path2. */
6956 	CU_ASSERT(bio->io_path == io_path2);
6957 
6958 	/* Detach ctrlr2 dynamically. */
6959 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
6960 	CU_ASSERT(rc == 0);
6961 
6962 	spdk_delay_us(1000);
6963 	poll_threads();
6964 	spdk_delay_us(1000);
6965 	poll_threads();
6966 	spdk_delay_us(1000);
6967 	poll_threads();
6968 	spdk_delay_us(1000);
6969 	poll_threads();
6970 
6971 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL);
6972 
6973 	poll_threads();
6974 	spdk_delay_us(100000);
6975 	poll_threads();
6976 	spdk_delay_us(1);
6977 	poll_threads();
6978 
6979 	/* The 2nd I/O should succeed by io_path1. */
6980 	CU_ASSERT(bdev_io->internal.f.in_submit_request == false);
6981 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
6982 	CU_ASSERT(bio->io_path == io_path1);
6983 
6984 	free(bdev_io);
6985 
6986 	spdk_put_io_channel(ch);
6987 
6988 	poll_threads();
6989 	spdk_delay_us(1);
6990 	poll_threads();
6991 
6992 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6993 	CU_ASSERT(rc == 0);
6994 
6995 	poll_threads();
6996 	spdk_delay_us(1000);
6997 	poll_threads();
6998 
6999 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7000 
7001 	g_opts.nvme_ioq_poll_period_us = 0;
7002 	g_opts.bdev_retry_count = 0;
7003 }
7004 
7005 /* This case is to verify a fix for a complex race condition that
7006  * failover is lost if fabric connect command gets timeout while
7007  * controller is being reset.
7008  */
7009 static void
7010 test_race_between_reset_and_disconnected(void)
7011 {
7012 	struct spdk_nvme_transport_id trid = {};
7013 	struct spdk_nvme_ctrlr ctrlr = {};
7014 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7015 	struct nvme_path_id *curr_trid;
7016 	struct spdk_io_channel *ch1, *ch2;
7017 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7018 	int rc;
7019 
7020 	ut_init_trid(&trid);
7021 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7022 
7023 	set_thread(0);
7024 
7025 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7026 	CU_ASSERT(rc == 0);
7027 
7028 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7029 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7030 
7031 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7032 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7033 
7034 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7035 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7036 
7037 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7038 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7039 
7040 	set_thread(1);
7041 
7042 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7043 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7044 
7045 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7046 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7047 
7048 	/* Reset starts from thread 1. */
7049 	set_thread(1);
7050 
7051 	nvme_ctrlr->resetting = false;
7052 	curr_trid->last_failed_tsc = spdk_get_ticks();
7053 	ctrlr.is_failed = true;
7054 
7055 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7056 	CU_ASSERT(rc == 0);
7057 	CU_ASSERT(nvme_ctrlr->resetting == true);
7058 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7059 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7060 
7061 	poll_thread_times(0, 3);
7062 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7063 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7064 
7065 	poll_thread_times(0, 1);
7066 	poll_thread_times(1, 1);
7067 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7068 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7069 	CU_ASSERT(ctrlr.is_failed == true);
7070 
7071 	poll_thread_times(1, 1);
7072 	poll_thread_times(0, 1);
7073 	CU_ASSERT(ctrlr.is_failed == false);
7074 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7075 
7076 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7077 	poll_thread_times(0, 2);
7078 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7079 
7080 	poll_thread_times(0, 1);
7081 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7082 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7083 
7084 	poll_thread_times(1, 1);
7085 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7086 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7087 	CU_ASSERT(nvme_ctrlr->resetting == true);
7088 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
7089 
7090 	poll_thread_times(0, 2);
7091 	CU_ASSERT(nvme_ctrlr->resetting == true);
7092 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7093 	poll_thread_times(1, 1);
7094 	CU_ASSERT(nvme_ctrlr->resetting == true);
7095 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7096 
7097 	/* Here is just one poll before _bdev_nvme_reset_complete() is executed.
7098 	 *
7099 	 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric
7100 	 * connect command is executed. If fabric connect command gets timeout,
7101 	 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until
7102 	 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false.
7103 	 *
7104 	 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr().
7105 	 */
7106 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
7107 	CU_ASSERT(rc == -EINPROGRESS);
7108 	CU_ASSERT(nvme_ctrlr->resetting == true);
7109 	CU_ASSERT(nvme_ctrlr->pending_failover == true);
7110 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7111 
7112 	poll_thread_times(0, 1);
7113 
7114 	CU_ASSERT(nvme_ctrlr->resetting == true);
7115 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7116 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
7117 
7118 	poll_threads();
7119 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7120 	poll_threads();
7121 
7122 	CU_ASSERT(nvme_ctrlr->resetting == false);
7123 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
7124 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7125 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7126 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7127 
7128 	spdk_put_io_channel(ch2);
7129 
7130 	set_thread(0);
7131 
7132 	spdk_put_io_channel(ch1);
7133 
7134 	poll_threads();
7135 
7136 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7137 	CU_ASSERT(rc == 0);
7138 
7139 	poll_threads();
7140 	spdk_delay_us(1000);
7141 	poll_threads();
7142 
7143 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7144 }
7145 static void
7146 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
7147 {
7148 	int *_rc = (int *)cb_arg;
7149 
7150 	SPDK_CU_ASSERT_FATAL(_rc != NULL);
7151 	*_rc = rc;
7152 }
7153 
7154 static void
7155 test_ctrlr_op_rpc(void)
7156 {
7157 	struct spdk_nvme_transport_id trid = {};
7158 	struct spdk_nvme_ctrlr ctrlr = {};
7159 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7160 	struct nvme_path_id *curr_trid;
7161 	struct spdk_io_channel *ch1, *ch2;
7162 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7163 	int ctrlr_op_rc;
7164 	int rc;
7165 
7166 	ut_init_trid(&trid);
7167 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7168 
7169 	set_thread(0);
7170 
7171 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7172 	CU_ASSERT(rc == 0);
7173 
7174 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7175 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7176 
7177 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7178 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7179 
7180 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7181 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7182 
7183 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7184 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7185 
7186 	set_thread(1);
7187 
7188 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7189 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7190 
7191 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7192 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7193 
7194 	/* Reset starts from thread 1. */
7195 	set_thread(1);
7196 
7197 	/* Case 1: ctrlr is already being destructed. */
7198 	nvme_ctrlr->destruct = true;
7199 	ctrlr_op_rc = 0;
7200 
7201 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7202 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7203 
7204 	poll_threads();
7205 
7206 	CU_ASSERT(ctrlr_op_rc == -ENXIO);
7207 
7208 	/* Case 2: reset is in progress. */
7209 	nvme_ctrlr->destruct = false;
7210 	nvme_ctrlr->resetting = true;
7211 	ctrlr_op_rc = 0;
7212 
7213 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7214 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7215 
7216 	poll_threads();
7217 
7218 	CU_ASSERT(ctrlr_op_rc == -EBUSY);
7219 
7220 	/* Case 3: reset completes successfully. */
7221 	nvme_ctrlr->resetting = false;
7222 	curr_trid->last_failed_tsc = spdk_get_ticks();
7223 	ctrlr.is_failed = true;
7224 	ctrlr_op_rc = -1;
7225 
7226 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
7227 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7228 
7229 	CU_ASSERT(nvme_ctrlr->resetting == true);
7230 	CU_ASSERT(ctrlr_op_rc == -1);
7231 
7232 	poll_threads();
7233 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7234 	poll_threads();
7235 
7236 	CU_ASSERT(nvme_ctrlr->resetting == false);
7237 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
7238 	CU_ASSERT(ctrlr.is_failed == false);
7239 	CU_ASSERT(ctrlr_op_rc == 0);
7240 
7241 	/* Case 4: invalid operation. */
7242 	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
7243 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7244 
7245 	poll_threads();
7246 
7247 	CU_ASSERT(ctrlr_op_rc == -EINVAL);
7248 
7249 	spdk_put_io_channel(ch2);
7250 
7251 	set_thread(0);
7252 
7253 	spdk_put_io_channel(ch1);
7254 
7255 	poll_threads();
7256 
7257 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7258 	CU_ASSERT(rc == 0);
7259 
7260 	poll_threads();
7261 	spdk_delay_us(1000);
7262 	poll_threads();
7263 
7264 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7265 }
7266 
7267 static void
7268 test_bdev_ctrlr_op_rpc(void)
7269 {
7270 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
7271 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
7272 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7273 	struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL;
7274 	struct nvme_path_id *curr_trid1, *curr_trid2;
7275 	struct spdk_io_channel *ch11, *ch12, *ch21, *ch22;
7276 	struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22;
7277 	int ctrlr_op_rc;
7278 	int rc;
7279 
7280 	ut_init_trid(&trid1);
7281 	ut_init_trid2(&trid2);
7282 	TAILQ_INIT(&ctrlr1.active_io_qpairs);
7283 	TAILQ_INIT(&ctrlr2.active_io_qpairs);
7284 	ctrlr1.cdata.cmic.multi_ctrlr = 1;
7285 	ctrlr2.cdata.cmic.multi_ctrlr = 1;
7286 	ctrlr1.cdata.cntlid = 1;
7287 	ctrlr2.cdata.cntlid = 2;
7288 	ctrlr1.adminq.is_connected = true;
7289 	ctrlr2.adminq.is_connected = true;
7290 
7291 	set_thread(0);
7292 
7293 	rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL);
7294 	CU_ASSERT(rc == 0);
7295 
7296 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7297 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7298 
7299 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN);
7300 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
7301 
7302 	curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
7303 	SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL);
7304 
7305 	ch11 = spdk_get_io_channel(nvme_ctrlr1);
7306 	SPDK_CU_ASSERT_FATAL(ch11 != NULL);
7307 
7308 	ctrlr_ch11 = spdk_io_channel_get_ctx(ch11);
7309 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7310 
7311 	set_thread(1);
7312 
7313 	ch12 = spdk_get_io_channel(nvme_ctrlr1);
7314 	SPDK_CU_ASSERT_FATAL(ch12 != NULL);
7315 
7316 	ctrlr_ch12 = spdk_io_channel_get_ctx(ch12);
7317 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7318 
7319 	set_thread(0);
7320 
7321 	rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL);
7322 	CU_ASSERT(rc == 0);
7323 
7324 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN);
7325 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
7326 
7327 	curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
7328 	SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL);
7329 
7330 	ch21 = spdk_get_io_channel(nvme_ctrlr2);
7331 	SPDK_CU_ASSERT_FATAL(ch21 != NULL);
7332 
7333 	ctrlr_ch21 = spdk_io_channel_get_ctx(ch21);
7334 	CU_ASSERT(ctrlr_ch21->qpair != NULL);
7335 
7336 	set_thread(1);
7337 
7338 	ch22 = spdk_get_io_channel(nvme_ctrlr2);
7339 	SPDK_CU_ASSERT_FATAL(ch22 != NULL);
7340 
7341 	ctrlr_ch22 = spdk_io_channel_get_ctx(ch22);
7342 	CU_ASSERT(ctrlr_ch22->qpair != NULL);
7343 
7344 	/* Reset starts from thread 1. */
7345 	set_thread(1);
7346 
7347 	nvme_ctrlr1->resetting = false;
7348 	nvme_ctrlr2->resetting = false;
7349 	curr_trid1->last_failed_tsc = spdk_get_ticks();
7350 	curr_trid2->last_failed_tsc = spdk_get_ticks();
7351 	ctrlr_op_rc = -1;
7352 
7353 	nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET,
7354 			       ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
7355 
7356 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7357 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
7358 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
7359 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7360 
7361 	poll_thread_times(0, 3);
7362 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7363 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7364 
7365 	poll_thread_times(0, 1);
7366 	poll_thread_times(1, 1);
7367 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
7368 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7369 
7370 	poll_thread_times(1, 1);
7371 	poll_thread_times(0, 1);
7372 	CU_ASSERT(ctrlr1.adminq.is_connected == false);
7373 
7374 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7375 	poll_thread_times(0, 2);
7376 	CU_ASSERT(ctrlr1.adminq.is_connected == true);
7377 
7378 	poll_thread_times(0, 1);
7379 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7380 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
7381 
7382 	poll_thread_times(1, 1);
7383 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
7384 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
7385 	CU_ASSERT(nvme_ctrlr1->resetting == true);
7386 	CU_ASSERT(curr_trid1->last_failed_tsc != 0);
7387 
7388 	poll_thread_times(0, 2);
7389 	poll_thread_times(1, 1);
7390 	poll_thread_times(0, 1);
7391 	poll_thread_times(1, 1);
7392 	poll_thread_times(0, 1);
7393 	poll_thread_times(1, 1);
7394 	poll_thread_times(0, 1);
7395 
7396 	CU_ASSERT(nvme_ctrlr1->resetting == false);
7397 	CU_ASSERT(curr_trid1->last_failed_tsc == 0);
7398 	CU_ASSERT(nvme_ctrlr2->resetting == true);
7399 
7400 	poll_threads();
7401 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7402 	poll_threads();
7403 
7404 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7405 	CU_ASSERT(ctrlr_op_rc == 0);
7406 
7407 	set_thread(1);
7408 
7409 	spdk_put_io_channel(ch12);
7410 	spdk_put_io_channel(ch22);
7411 
7412 	set_thread(0);
7413 
7414 	spdk_put_io_channel(ch11);
7415 	spdk_put_io_channel(ch21);
7416 
7417 	poll_threads();
7418 
7419 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7420 	CU_ASSERT(rc == 0);
7421 
7422 	poll_threads();
7423 	spdk_delay_us(1000);
7424 	poll_threads();
7425 
7426 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7427 }
7428 
7429 static void
7430 test_disable_enable_ctrlr(void)
7431 {
7432 	struct spdk_nvme_transport_id trid = {};
7433 	struct spdk_nvme_ctrlr ctrlr = {};
7434 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7435 	struct nvme_path_id *curr_trid;
7436 	struct spdk_io_channel *ch1, *ch2;
7437 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7438 	int rc;
7439 
7440 	ut_init_trid(&trid);
7441 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7442 	ctrlr.adminq.is_connected = true;
7443 
7444 	set_thread(0);
7445 
7446 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7447 	CU_ASSERT(rc == 0);
7448 
7449 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7450 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7451 
7452 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7453 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7454 
7455 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7456 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7457 
7458 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7459 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7460 
7461 	set_thread(1);
7462 
7463 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7464 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7465 
7466 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7467 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7468 
7469 	/* Disable starts from thread 1. */
7470 	set_thread(1);
7471 
7472 	/* Case 1: ctrlr is already disabled. */
7473 	nvme_ctrlr->disabled = true;
7474 
7475 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7476 	CU_ASSERT(rc == -EALREADY);
7477 
7478 	/* Case 2: ctrlr is already being destructed. */
7479 	nvme_ctrlr->disabled = false;
7480 	nvme_ctrlr->destruct = true;
7481 
7482 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7483 	CU_ASSERT(rc == -ENXIO);
7484 
7485 	/* Case 3: reset is in progress. */
7486 	nvme_ctrlr->destruct = false;
7487 	nvme_ctrlr->resetting = true;
7488 
7489 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7490 	CU_ASSERT(rc == -EBUSY);
7491 
7492 	/* Case 4: disable completes successfully. */
7493 	nvme_ctrlr->resetting = false;
7494 
7495 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7496 	CU_ASSERT(rc == 0);
7497 	CU_ASSERT(nvme_ctrlr->resetting == true);
7498 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7499 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7500 
7501 	poll_thread_times(0, 3);
7502 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7503 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7504 
7505 	poll_thread_times(0, 1);
7506 	poll_thread_times(1, 1);
7507 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7508 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7509 
7510 	poll_thread_times(1, 1);
7511 	poll_thread_times(0, 1);
7512 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7513 	poll_thread_times(1, 1);
7514 	poll_thread_times(0, 1);
7515 	poll_thread_times(1, 1);
7516 	poll_thread_times(0, 1);
7517 	CU_ASSERT(nvme_ctrlr->resetting == false);
7518 	CU_ASSERT(nvme_ctrlr->disabled == true);
7519 
7520 	/* Case 5: enable completes successfully. */
7521 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7522 	CU_ASSERT(rc == 0);
7523 
7524 	CU_ASSERT(nvme_ctrlr->resetting == true);
7525 	CU_ASSERT(nvme_ctrlr->disabled == false);
7526 
7527 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7528 	poll_thread_times(0, 2);
7529 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7530 
7531 	poll_thread_times(0, 1);
7532 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7533 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7534 
7535 	poll_thread_times(1, 1);
7536 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7537 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7538 	CU_ASSERT(nvme_ctrlr->resetting == true);
7539 
7540 	poll_thread_times(0, 2);
7541 	CU_ASSERT(nvme_ctrlr->resetting == true);
7542 	poll_thread_times(1, 1);
7543 	CU_ASSERT(nvme_ctrlr->resetting == true);
7544 	poll_thread_times(0, 1);
7545 	CU_ASSERT(nvme_ctrlr->resetting == false);
7546 
7547 	/* Case 6: ctrlr is already enabled. */
7548 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7549 	CU_ASSERT(rc == -EALREADY);
7550 
7551 	set_thread(0);
7552 
7553 	/* Case 7: disable cancels delayed reconnect. */
7554 	nvme_ctrlr->opts.reconnect_delay_sec = 10;
7555 	ctrlr.fail_reset = true;
7556 
7557 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7558 	CU_ASSERT(rc == 0);
7559 
7560 	poll_threads();
7561 
7562 	CU_ASSERT(nvme_ctrlr->resetting == false);
7563 	CU_ASSERT(ctrlr.is_failed == false);
7564 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7565 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7566 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
7567 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
7568 
7569 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7570 	CU_ASSERT(rc == 0);
7571 
7572 	CU_ASSERT(nvme_ctrlr->resetting == true);
7573 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
7574 
7575 	poll_threads();
7576 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7577 	poll_threads();
7578 
7579 	CU_ASSERT(nvme_ctrlr->resetting == false);
7580 	CU_ASSERT(nvme_ctrlr->disabled == true);
7581 
7582 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7583 	CU_ASSERT(rc == 0);
7584 
7585 	CU_ASSERT(nvme_ctrlr->resetting == true);
7586 	CU_ASSERT(nvme_ctrlr->disabled == false);
7587 
7588 	poll_threads();
7589 
7590 	CU_ASSERT(nvme_ctrlr->resetting == false);
7591 
7592 	set_thread(1);
7593 
7594 	spdk_put_io_channel(ch2);
7595 
7596 	set_thread(0);
7597 
7598 	spdk_put_io_channel(ch1);
7599 
7600 	poll_threads();
7601 
7602 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7603 	CU_ASSERT(rc == 0);
7604 
7605 	poll_threads();
7606 	spdk_delay_us(1000);
7607 	poll_threads();
7608 
7609 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7610 }
7611 
7612 static void
7613 ut_delete_done(void *ctx, int rc)
7614 {
7615 	int *delete_done_rc = ctx;
7616 	*delete_done_rc = rc;
7617 }
7618 
7619 static void
7620 test_delete_ctrlr_done(void)
7621 {
7622 	struct spdk_nvme_transport_id trid = {};
7623 	struct spdk_nvme_ctrlr ctrlr = {};
7624 	int delete_done_rc = 0xDEADBEEF;
7625 	int rc;
7626 
7627 	ut_init_trid(&trid);
7628 
7629 	nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7630 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
7631 
7632 	rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc);
7633 	CU_ASSERT(rc == 0);
7634 
7635 	for (int i = 0; i < 20; i++) {
7636 		poll_threads();
7637 		if (delete_done_rc == 0) {
7638 			break;
7639 		}
7640 		spdk_delay_us(1000);
7641 	}
7642 
7643 	CU_ASSERT(delete_done_rc == 0);
7644 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7645 }
7646 
7647 static void
7648 test_ns_remove_during_reset(void)
7649 {
7650 	struct nvme_path_id path = {};
7651 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7652 	struct spdk_nvme_ctrlr *ctrlr;
7653 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7654 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
7655 	struct nvme_ctrlr *nvme_ctrlr;
7656 	const int STRING_SIZE = 32;
7657 	const char *attached_names[STRING_SIZE];
7658 	struct nvme_bdev *bdev;
7659 	struct nvme_ns *nvme_ns;
7660 	union spdk_nvme_async_event_completion event = {};
7661 	struct spdk_nvme_cpl cpl = {};
7662 	int rc;
7663 
7664 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
7665 	ut_init_trid(&path.trid);
7666 
7667 	set_thread(0);
7668 
7669 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
7670 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7671 
7672 	g_ut_attach_ctrlr_status = 0;
7673 	g_ut_attach_bdev_count = 1;
7674 
7675 	opts.multipath = false;
7676 
7677 	rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
7678 				   attach_ctrlr_done, NULL, &dopts, &opts);
7679 	CU_ASSERT(rc == 0);
7680 
7681 	spdk_delay_us(1000);
7682 	poll_threads();
7683 
7684 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
7685 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
7686 
7687 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn);
7688 	CU_ASSERT(nvme_ctrlr != NULL);
7689 
7690 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
7691 	CU_ASSERT(bdev != NULL);
7692 
7693 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
7694 	CU_ASSERT(nvme_ns != NULL);
7695 
7696 	/* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist,
7697 	 * but nvme_ns->ns should be NULL.
7698 	 */
7699 
7700 	CU_ASSERT(ctrlr->ns[0].is_active == true);
7701 	ctrlr->ns[0].is_active = false;
7702 
7703 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7704 	CU_ASSERT(rc == 0);
7705 
7706 	poll_threads();
7707 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7708 	poll_threads();
7709 
7710 	CU_ASSERT(nvme_ctrlr->resetting == false);
7711 	CU_ASSERT(ctrlr->adminq.is_connected == true);
7712 
7713 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7714 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7715 	CU_ASSERT(nvme_ns->bdev == bdev);
7716 	CU_ASSERT(nvme_ns->ns == NULL);
7717 
7718 	/* Then, async event should fill nvme_ns->ns again. */
7719 
7720 	ctrlr->ns[0].is_active = true;
7721 
7722 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
7723 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
7724 	cpl.cdw0 = event.raw;
7725 
7726 	aer_cb(nvme_ctrlr, &cpl);
7727 
7728 	CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr));
7729 	CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1));
7730 	CU_ASSERT(nvme_ns->bdev == bdev);
7731 	CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]);
7732 
7733 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7734 	CU_ASSERT(rc == 0);
7735 
7736 	poll_threads();
7737 	spdk_delay_us(1000);
7738 	poll_threads();
7739 
7740 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7741 }
7742 
7743 static void
7744 test_io_path_is_current(void)
7745 {
7746 	struct nvme_bdev_channel nbdev_ch = {
7747 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
7748 	};
7749 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
7750 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
7751 	struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {};
7752 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, },
7753 	nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
7754 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {};
7755 	struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
7756 	struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
7757 	struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, };
7758 	struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, };
7759 	struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, };
7760 	struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, };
7761 	struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
7762 	struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
7763 	struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
7764 
7765 	/* io_path1 is deleting */
7766 	io_path1.nbdev_ch = NULL;
7767 
7768 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == false);
7769 
7770 	io_path1.nbdev_ch = &nbdev_ch;
7771 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
7772 	io_path2.nbdev_ch = &nbdev_ch;
7773 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
7774 	io_path3.nbdev_ch = &nbdev_ch;
7775 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
7776 
7777 	/* active/active: io_path is current if it is available and ANA optimized. */
7778 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7779 
7780 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7781 
7782 	/* active/active: io_path is not current if it is disconnected even if it is
7783 	 * ANA optimized.
7784 	 */
7785 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7786 
7787 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7788 
7789 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7790 
7791 	/* active/passive: io_path is current if it is available and cached.
7792 	 * (only ANA optimized path is cached for active/passive.)
7793 	 */
7794 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7795 	nbdev_ch.current_io_path = &io_path2;
7796 
7797 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7798 
7799 	/* active:passive: io_path is not current if it is disconnected even if it is cached */
7800 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7801 
7802 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7803 
7804 	qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
7805 
7806 	/* active/active and active/passive: io_path is not current if it is ANA inaccessible. */
7807 	nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
7808 
7809 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7810 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7811 
7812 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7813 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7814 
7815 	/* active/active: non-optimized path is current only if there is no optimized path. */
7816 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE;
7817 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7818 
7819 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7820 
7821 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7822 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
7823 
7824 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == true);
7825 
7826 	/* active/passive: current is true if it is the first one when there is no optimized path. */
7827 	nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
7828 	nbdev_ch.current_io_path = NULL;
7829 
7830 	CU_ASSERT(nvme_io_path_is_current(&io_path1) == true);
7831 	CU_ASSERT(nvme_io_path_is_current(&io_path2) == false);
7832 	CU_ASSERT(nvme_io_path_is_current(&io_path3) == false);
7833 }
7834 
7835 static void
7836 test_bdev_reset_abort_io(void)
7837 {
7838 	struct spdk_nvme_transport_id trid = {};
7839 	struct spdk_bdev_nvme_ctrlr_opts opts = {};
7840 	struct spdk_nvme_ctrlr *ctrlr;
7841 	struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN};
7842 	struct nvme_ctrlr *nvme_ctrlr;
7843 	const int STRING_SIZE = 32;
7844 	const char *attached_names[STRING_SIZE];
7845 	struct nvme_bdev *bdev;
7846 	struct spdk_bdev_io *write_io, *read_io, *reset_io;
7847 	struct spdk_io_channel *ch1, *ch2;
7848 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
7849 	struct nvme_io_path *io_path1, *io_path2;
7850 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
7851 	int rc;
7852 
7853 	g_opts.bdev_retry_count = -1;
7854 
7855 	ut_init_trid(&trid);
7856 
7857 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
7858 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
7859 
7860 	g_ut_attach_ctrlr_status = 0;
7861 	g_ut_attach_bdev_count = 1;
7862 
7863 	set_thread(1);
7864 
7865 	opts.ctrlr_loss_timeout_sec = -1;
7866 	opts.reconnect_delay_sec = 1;
7867 	opts.multipath = false;
7868 
7869 	rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
7870 				   attach_ctrlr_done, NULL, &dopts, &opts);
7871 	CU_ASSERT(rc == 0);
7872 
7873 	spdk_delay_us(1000);
7874 	poll_threads();
7875 
7876 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7877 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7878 
7879 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
7880 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
7881 
7882 	set_thread(0);
7883 
7884 	ch1 = spdk_get_io_channel(bdev);
7885 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7886 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
7887 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
7888 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
7889 	nvme_qpair1 = io_path1->qpair;
7890 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
7891 
7892 	set_thread(1);
7893 
7894 	ch2 = spdk_get_io_channel(bdev);
7895 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7896 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
7897 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
7898 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
7899 	nvme_qpair2 = io_path2->qpair;
7900 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
7901 
7902 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch1);
7903 	ut_bdev_io_set_buf(write_io);
7904 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7905 
7906 	read_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_READ, bdev, ch1);
7907 	ut_bdev_io_set_buf(read_io);
7908 	read_io->internal.ch = (struct spdk_bdev_channel *)ch1;
7909 
7910 	reset_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
7911 
7912 	/* If qpair is disconnected, it is freed and then reconnected via resetting
7913 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
7914 	 * while resetting the nvme_ctrlr.
7915 	 */
7916 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
7917 
7918 	poll_thread_times(0, 3);
7919 
7920 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7921 	CU_ASSERT(nvme_ctrlr->resetting == true);
7922 
7923 	set_thread(0);
7924 
7925 	write_io->internal.f.in_submit_request = true;
7926 
7927 	bdev_nvme_submit_request(ch1, write_io);
7928 
7929 	CU_ASSERT(write_io->internal.f.in_submit_request == true);
7930 	CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list)));
7931 
7932 	set_thread(1);
7933 
7934 	/* Submit a reset request to a bdev while resetting a nvme_ctrlr.
7935 	 * Further I/O queueing should be disabled and queued I/Os should be aborted.
7936 	 * Verify these behaviors.
7937 	 */
7938 	reset_io->internal.f.in_submit_request = true;
7939 
7940 	bdev_nvme_submit_request(ch2, reset_io);
7941 
7942 	poll_thread_times(0, 1);
7943 	poll_thread_times(1, 2);
7944 
7945 	CU_ASSERT(nbdev_ch1->resetting == true);
7946 
7947 	/* qpair1 should be still disconnected. */
7948 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7949 
7950 	set_thread(0);
7951 
7952 	read_io->internal.f.in_submit_request = true;
7953 
7954 	bdev_nvme_submit_request(ch1, read_io);
7955 
7956 	CU_ASSERT(nvme_qpair1->qpair == NULL);
7957 
7958 	poll_thread_times(0, 1);
7959 
7960 	/* The I/O which was submitted during bdev_reset should fail immediately. */
7961 	CU_ASSERT(read_io->internal.f.in_submit_request == false);
7962 	CU_ASSERT(read_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
7963 
7964 	poll_threads();
7965 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7966 	poll_threads();
7967 
7968 	/* The completion of bdev_reset should ensure queued I/O is aborted. */
7969 	CU_ASSERT(write_io->internal.f.in_submit_request == false);
7970 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
7971 
7972 	/* The reset request itself should complete with success. */
7973 	CU_ASSERT(reset_io->internal.f.in_submit_request == false);
7974 	CU_ASSERT(reset_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
7975 
7976 	set_thread(0);
7977 
7978 	spdk_put_io_channel(ch1);
7979 
7980 	set_thread(1);
7981 
7982 	spdk_put_io_channel(ch2);
7983 
7984 	poll_threads();
7985 
7986 	set_thread(0);
7987 
7988 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7989 	CU_ASSERT(rc == 0);
7990 
7991 	poll_threads();
7992 	spdk_delay_us(1000);
7993 	poll_threads();
7994 
7995 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7996 
7997 	free(write_io);
7998 	free(read_io);
7999 	free(reset_io);
8000 
8001 	g_opts.bdev_retry_count = 0;
8002 }
8003 
8004 int
8005 main(int argc, char **argv)
8006 {
8007 	CU_pSuite	suite = NULL;
8008 	unsigned int	num_failures;
8009 
8010 	CU_initialize_registry();
8011 
8012 	suite = CU_add_suite("nvme", NULL, NULL);
8013 
8014 	CU_ADD_TEST(suite, test_create_ctrlr);
8015 	CU_ADD_TEST(suite, test_reset_ctrlr);
8016 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
8017 	CU_ADD_TEST(suite, test_failover_ctrlr);
8018 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
8019 	CU_ADD_TEST(suite, test_pending_reset);
8020 	CU_ADD_TEST(suite, test_attach_ctrlr);
8021 	CU_ADD_TEST(suite, test_aer_cb);
8022 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
8023 	CU_ADD_TEST(suite, test_add_remove_trid);
8024 	CU_ADD_TEST(suite, test_abort);
8025 	CU_ADD_TEST(suite, test_get_io_qpair);
8026 	CU_ADD_TEST(suite, test_bdev_unregister);
8027 	CU_ADD_TEST(suite, test_compare_ns);
8028 	CU_ADD_TEST(suite, test_init_ana_log_page);
8029 	CU_ADD_TEST(suite, test_get_memory_domains);
8030 	CU_ADD_TEST(suite, test_reconnect_qpair);
8031 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
8032 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
8033 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
8034 	CU_ADD_TEST(suite, test_admin_path);
8035 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
8036 	CU_ADD_TEST(suite, test_find_io_path);
8037 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
8038 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
8039 	CU_ADD_TEST(suite, test_retry_io_count);
8040 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
8041 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
8042 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
8043 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
8044 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
8045 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
8046 	CU_ADD_TEST(suite, test_fail_path);
8047 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
8048 	CU_ADD_TEST(suite, test_ana_transition);
8049 	CU_ADD_TEST(suite, test_set_preferred_path);
8050 	CU_ADD_TEST(suite, test_find_next_io_path);
8051 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
8052 	CU_ADD_TEST(suite, test_disable_auto_failback);
8053 	CU_ADD_TEST(suite, test_set_multipath_policy);
8054 	CU_ADD_TEST(suite, test_uuid_generation);
8055 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
8056 	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
8057 	CU_ADD_TEST(suite, test_ctrlr_op_rpc);
8058 	CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc);
8059 	CU_ADD_TEST(suite, test_disable_enable_ctrlr);
8060 	CU_ADD_TEST(suite, test_delete_ctrlr_done);
8061 	CU_ADD_TEST(suite, test_ns_remove_during_reset);
8062 	CU_ADD_TEST(suite, test_io_path_is_current);
8063 	CU_ADD_TEST(suite, test_bdev_reset_abort_io);
8064 
8065 	allocate_threads(3);
8066 	set_thread(0);
8067 	bdev_nvme_library_init();
8068 	init_accel();
8069 
8070 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
8071 
8072 	set_thread(0);
8073 	bdev_nvme_library_fini();
8074 	fini_accel();
8075 	free_threads();
8076 
8077 	CU_cleanup_registry();
8078 
8079 	return num_failures;
8080 }
8081