xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 14e26b9d0410a98689caffcba7bfacac8d85c74d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 
12 #include "common/lib/ut_multithread.c"
13 
14 #include "bdev/nvme/bdev_nvme.c"
15 
16 #include "unit/lib/json_mock.c"
17 
18 #include "bdev/nvme/bdev_mdns_client.c"
19 
20 static void *g_accel_p = (void *)0xdeadbeaf;
21 
22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
23 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
24 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
25 	     spdk_nvme_remove_cb remove_cb), NULL);
26 
27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
28 		enum spdk_nvme_transport_type trtype));
29 
30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
31 	    NULL);
32 
33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
34 
35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
36 		struct spdk_nvme_transport_id *trid), 0);
37 
38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
39 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
40 
41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0);
43 
44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
46 
47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
48 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
49 
50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
51 
52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
53 		int error_code, const char *msg));
54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *,
55 	    (struct spdk_jsonrpc_request *request), NULL);
56 DEFINE_STUB_V(spdk_jsonrpc_end_result,
57 	      (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w));
58 
59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
60 		size_t opts_size));
61 
62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
63 		size_t opts_size), 0);
64 
65 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
66 
67 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
68 					enum spdk_bdev_reset_stat_mode mode));
69 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
70 				      struct spdk_bdev_io_stat *add));
71 
72 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr));
73 
74 int
75 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
76 				   struct spdk_memory_domain **domains, int array_size)
77 {
78 	int i, min_array_size;
79 
80 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
81 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
82 		for (i = 0; i < min_array_size; i++) {
83 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
84 		}
85 	}
86 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
87 
88 	return 0;
89 }
90 
91 struct spdk_io_channel *
92 spdk_accel_get_io_channel(void)
93 {
94 	return spdk_get_io_channel(g_accel_p);
95 }
96 
97 void
98 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
99 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
100 {
101 	/* Avoid warning that opts is used uninitialised */
102 	memset(opts, 0, opts_size);
103 }
104 
105 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
106 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
107 
108 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
109 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
110 
111 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
112 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
113 
114 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
115 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
116 
117 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
118 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
119 
120 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
121 
122 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
123 
124 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
125 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
126 
127 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
128 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
129 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
130 
131 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
132 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
133 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
134 
135 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, (
136 		    struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
137 		    struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf,
138 		    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
139 		    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
140 		    spdk_nvme_req_next_sge_cb next_sge_fn), 0);
141 
142 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
143 		size_t *size), 0);
144 
145 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
146 
147 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
148 
149 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
150 
151 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
152 
153 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
154 
155 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
156 
157 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
158 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
161 
162 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
163 		char *name, size_t *size), 0);
164 
165 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
166 	    (struct spdk_nvme_ns *ns), 0);
167 
168 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
169 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
170 
171 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
172 	    (struct spdk_nvme_ns *ns), 0);
173 
174 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
175 	    (struct spdk_nvme_ns *ns), 0);
176 
177 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
178 	    (struct spdk_nvme_ns *ns), 0);
179 
180 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
181 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
182 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
183 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
184 
185 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
186 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
187 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
188 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
189 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
190 
191 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
192 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
193 	     void *payload, uint32_t payload_size, uint64_t slba,
194 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
195 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
196 
197 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
198 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
199 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
200 
201 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
202 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
203 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
204 
205 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
206 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
207 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
208 
209 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
210 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
211 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
212 
213 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
214 
215 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
216 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
217 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
218 
219 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
220 	    (const struct spdk_nvme_status *status), NULL);
221 
222 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
223 	    (const struct spdk_nvme_status *status), NULL);
224 
225 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
226 
227 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
228 
229 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
230 
231 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
232 
233 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
234 
235 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
236 		struct iovec *iov,
237 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
238 DEFINE_STUB(spdk_accel_append_crc32c, int,
239 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst,
240 	     struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx,
241 	     uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
242 DEFINE_STUB_V(spdk_accel_sequence_finish,
243 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
244 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
245 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
246 
247 struct ut_nvme_req {
248 	uint16_t			opc;
249 	spdk_nvme_cmd_cb		cb_fn;
250 	void				*cb_arg;
251 	struct spdk_nvme_cpl		cpl;
252 	TAILQ_ENTRY(ut_nvme_req)	tailq;
253 };
254 
255 struct spdk_nvme_ns {
256 	struct spdk_nvme_ctrlr		*ctrlr;
257 	uint32_t			id;
258 	bool				is_active;
259 	struct spdk_uuid		*uuid;
260 	enum spdk_nvme_ana_state	ana_state;
261 	enum spdk_nvme_csi		csi;
262 };
263 
264 struct spdk_nvme_qpair {
265 	struct spdk_nvme_ctrlr		*ctrlr;
266 	uint8_t				failure_reason;
267 	bool				is_connected;
268 	bool				in_completion_context;
269 	bool				delete_after_completion_context;
270 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
271 	uint32_t			num_outstanding_reqs;
272 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
273 	struct spdk_nvme_poll_group	*poll_group;
274 	void				*poll_group_tailq_head;
275 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
276 };
277 
278 struct spdk_nvme_ctrlr {
279 	uint32_t			num_ns;
280 	struct spdk_nvme_ns		*ns;
281 	struct spdk_nvme_ns_data	*nsdata;
282 	struct spdk_nvme_qpair		adminq;
283 	struct spdk_nvme_ctrlr_data	cdata;
284 	bool				attached;
285 	bool				is_failed;
286 	bool				fail_reset;
287 	bool				is_removed;
288 	struct spdk_nvme_transport_id	trid;
289 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
290 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
291 	struct spdk_nvme_ctrlr_opts	opts;
292 };
293 
294 struct spdk_nvme_poll_group {
295 	void				*ctx;
296 	struct spdk_nvme_accel_fn_table	accel_fn_table;
297 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
298 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
299 };
300 
301 struct spdk_nvme_probe_ctx {
302 	struct spdk_nvme_transport_id	trid;
303 	void				*cb_ctx;
304 	spdk_nvme_attach_cb		attach_cb;
305 	struct spdk_nvme_ctrlr		*init_ctrlr;
306 };
307 
308 uint32_t
309 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
310 {
311 	uint32_t nsid;
312 
313 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
314 		if (ctrlr->ns[nsid - 1].is_active) {
315 			return nsid;
316 		}
317 	}
318 
319 	return 0;
320 }
321 
322 uint32_t
323 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
324 {
325 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
326 		if (ctrlr->ns[nsid - 1].is_active) {
327 			return nsid;
328 		}
329 	}
330 
331 	return 0;
332 }
333 
334 uint32_t
335 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
336 {
337 	return qpair->num_outstanding_reqs;
338 }
339 
340 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
341 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
342 			g_ut_attached_ctrlrs);
343 static int g_ut_attach_ctrlr_status;
344 static size_t g_ut_attach_bdev_count;
345 static int g_ut_register_bdev_status;
346 static struct spdk_bdev *g_ut_registered_bdev;
347 static uint16_t g_ut_cntlid;
348 static struct nvme_path_id g_any_path = {};
349 
350 static void
351 ut_init_trid(struct spdk_nvme_transport_id *trid)
352 {
353 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
354 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
355 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
356 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
357 }
358 
359 static void
360 ut_init_trid2(struct spdk_nvme_transport_id *trid)
361 {
362 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
363 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
364 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
365 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
366 }
367 
368 static void
369 ut_init_trid3(struct spdk_nvme_transport_id *trid)
370 {
371 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
372 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
373 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
374 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
375 }
376 
377 static int
378 cmp_int(int a, int b)
379 {
380 	return a - b;
381 }
382 
383 int
384 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
385 			       const struct spdk_nvme_transport_id *trid2)
386 {
387 	int cmp;
388 
389 	/* We assume trtype is TCP for now. */
390 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
391 
392 	cmp = cmp_int(trid1->trtype, trid2->trtype);
393 	if (cmp) {
394 		return cmp;
395 	}
396 
397 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
398 	if (cmp) {
399 		return cmp;
400 	}
401 
402 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
403 	if (cmp) {
404 		return cmp;
405 	}
406 
407 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
408 	if (cmp) {
409 		return cmp;
410 	}
411 
412 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
413 	if (cmp) {
414 		return cmp;
415 	}
416 
417 	return 0;
418 }
419 
420 static struct spdk_nvme_ctrlr *
421 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
422 		bool ana_reporting, bool multipath)
423 {
424 	struct spdk_nvme_ctrlr *ctrlr;
425 	uint32_t i;
426 
427 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
428 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
429 			/* There is a ctrlr whose trid matches. */
430 			return NULL;
431 		}
432 	}
433 
434 	ctrlr = calloc(1, sizeof(*ctrlr));
435 	if (ctrlr == NULL) {
436 		return NULL;
437 	}
438 
439 	ctrlr->attached = true;
440 	ctrlr->adminq.ctrlr = ctrlr;
441 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
442 	ctrlr->adminq.is_connected = true;
443 
444 	if (num_ns != 0) {
445 		ctrlr->num_ns = num_ns;
446 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
447 		if (ctrlr->ns == NULL) {
448 			free(ctrlr);
449 			return NULL;
450 		}
451 
452 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
453 		if (ctrlr->nsdata == NULL) {
454 			free(ctrlr->ns);
455 			free(ctrlr);
456 			return NULL;
457 		}
458 
459 		for (i = 0; i < num_ns; i++) {
460 			ctrlr->ns[i].id = i + 1;
461 			ctrlr->ns[i].ctrlr = ctrlr;
462 			ctrlr->ns[i].is_active = true;
463 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
464 			ctrlr->nsdata[i].nsze = 1024;
465 			ctrlr->nsdata[i].nmic.can_share = multipath;
466 		}
467 
468 		ctrlr->cdata.nn = num_ns;
469 		ctrlr->cdata.mnan = num_ns;
470 		ctrlr->cdata.nanagrpid = num_ns;
471 	}
472 
473 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
474 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
475 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
476 	ctrlr->trid = *trid;
477 	TAILQ_INIT(&ctrlr->active_io_qpairs);
478 
479 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
480 
481 	return ctrlr;
482 }
483 
484 static void
485 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
486 {
487 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
488 
489 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
490 	free(ctrlr->nsdata);
491 	free(ctrlr->ns);
492 	free(ctrlr);
493 }
494 
495 static int
496 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
497 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
498 {
499 	struct ut_nvme_req *req;
500 
501 	req = calloc(1, sizeof(*req));
502 	if (req == NULL) {
503 		return -ENOMEM;
504 	}
505 
506 	req->opc = opc;
507 	req->cb_fn = cb_fn;
508 	req->cb_arg = cb_arg;
509 
510 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
511 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
512 
513 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
514 	qpair->num_outstanding_reqs++;
515 
516 	return 0;
517 }
518 
519 static struct ut_nvme_req *
520 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
521 {
522 	struct ut_nvme_req *req;
523 
524 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
525 		if (req->cb_arg == cb_arg) {
526 			break;
527 		}
528 	}
529 
530 	return req;
531 }
532 
533 static struct spdk_bdev_io *
534 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
535 		 struct spdk_io_channel *ch)
536 {
537 	struct spdk_bdev_io *bdev_io;
538 
539 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
540 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
541 	bdev_io->type = type;
542 	bdev_io->bdev = &nbdev->disk;
543 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
544 
545 	return bdev_io;
546 }
547 
548 static void
549 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
550 {
551 	bdev_io->u.bdev.iovs = &bdev_io->iov;
552 	bdev_io->u.bdev.iovcnt = 1;
553 
554 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
555 	bdev_io->iov.iov_len = 4096;
556 }
557 
558 static void
559 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
560 {
561 	if (ctrlr->is_failed) {
562 		free(ctrlr);
563 		return;
564 	}
565 
566 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
567 	if (probe_ctx->cb_ctx) {
568 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
569 	}
570 
571 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
572 
573 	if (probe_ctx->attach_cb) {
574 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
575 	}
576 }
577 
578 int
579 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
580 {
581 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
582 
583 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
584 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
585 			continue;
586 		}
587 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
588 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
589 	}
590 
591 	free(probe_ctx);
592 
593 	return 0;
594 }
595 
596 struct spdk_nvme_probe_ctx *
597 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
598 			const struct spdk_nvme_ctrlr_opts *opts,
599 			spdk_nvme_attach_cb attach_cb)
600 {
601 	struct spdk_nvme_probe_ctx *probe_ctx;
602 
603 	if (trid == NULL) {
604 		return NULL;
605 	}
606 
607 	probe_ctx = calloc(1, sizeof(*probe_ctx));
608 	if (probe_ctx == NULL) {
609 		return NULL;
610 	}
611 
612 	probe_ctx->trid = *trid;
613 	probe_ctx->cb_ctx = (void *)opts;
614 	probe_ctx->attach_cb = attach_cb;
615 
616 	return probe_ctx;
617 }
618 
619 int
620 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
621 {
622 	if (ctrlr->attached) {
623 		ut_detach_ctrlr(ctrlr);
624 	}
625 
626 	return 0;
627 }
628 
629 int
630 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
631 {
632 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
633 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
634 
635 	return 0;
636 }
637 
638 int
639 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
640 {
641 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
642 }
643 
644 void
645 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
646 {
647 	memset(opts, 0, opts_size);
648 
649 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
650 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
651 }
652 
653 const struct spdk_nvme_ctrlr_data *
654 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
655 {
656 	return &ctrlr->cdata;
657 }
658 
659 uint32_t
660 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
661 {
662 	return ctrlr->num_ns;
663 }
664 
665 struct spdk_nvme_ns *
666 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
667 {
668 	if (nsid < 1 || nsid > ctrlr->num_ns) {
669 		return NULL;
670 	}
671 
672 	return &ctrlr->ns[nsid - 1];
673 }
674 
675 bool
676 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
677 {
678 	if (nsid < 1 || nsid > ctrlr->num_ns) {
679 		return false;
680 	}
681 
682 	return ctrlr->ns[nsid - 1].is_active;
683 }
684 
685 union spdk_nvme_csts_register
686 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
687 {
688 	union spdk_nvme_csts_register csts;
689 
690 	csts.raw = 0;
691 
692 	return csts;
693 }
694 
695 union spdk_nvme_vs_register
696 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
697 {
698 	union spdk_nvme_vs_register vs;
699 
700 	vs.raw = 0;
701 
702 	return vs;
703 }
704 
705 struct spdk_nvme_qpair *
706 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
707 			       const struct spdk_nvme_io_qpair_opts *user_opts,
708 			       size_t opts_size)
709 {
710 	struct spdk_nvme_qpair *qpair;
711 
712 	qpair = calloc(1, sizeof(*qpair));
713 	if (qpair == NULL) {
714 		return NULL;
715 	}
716 
717 	qpair->ctrlr = ctrlr;
718 	TAILQ_INIT(&qpair->outstanding_reqs);
719 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
720 
721 	return qpair;
722 }
723 
724 static void
725 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
726 {
727 	struct spdk_nvme_poll_group *group = qpair->poll_group;
728 
729 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
730 
731 	qpair->poll_group_tailq_head = &group->connected_qpairs;
732 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
733 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
734 }
735 
736 static void
737 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
738 {
739 	struct spdk_nvme_poll_group *group = qpair->poll_group;
740 
741 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
742 
743 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
744 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
745 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
746 }
747 
748 int
749 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
750 				 struct spdk_nvme_qpair *qpair)
751 {
752 	if (qpair->is_connected) {
753 		return -EISCONN;
754 	}
755 
756 	qpair->is_connected = true;
757 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
758 
759 	if (qpair->poll_group) {
760 		nvme_poll_group_connect_qpair(qpair);
761 	}
762 
763 	return 0;
764 }
765 
766 void
767 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
768 {
769 	if (!qpair->is_connected) {
770 		return;
771 	}
772 
773 	qpair->is_connected = false;
774 
775 	if (qpair->poll_group != NULL) {
776 		nvme_poll_group_disconnect_qpair(qpair);
777 	}
778 }
779 
780 int
781 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
782 {
783 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
784 
785 	if (qpair->in_completion_context) {
786 		qpair->delete_after_completion_context = true;
787 		return 0;
788 	}
789 
790 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
791 
792 	if (qpair->poll_group != NULL) {
793 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
794 	}
795 
796 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
797 
798 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
799 
800 	free(qpair);
801 
802 	return 0;
803 }
804 
805 int
806 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
807 {
808 	if (ctrlr->fail_reset) {
809 		ctrlr->is_failed = true;
810 		return -EIO;
811 	}
812 
813 	ctrlr->adminq.is_connected = true;
814 	return 0;
815 }
816 
817 void
818 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
819 {
820 }
821 
822 int
823 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
824 {
825 	if (ctrlr->is_removed) {
826 		return -ENXIO;
827 	}
828 
829 	ctrlr->adminq.is_connected = false;
830 	ctrlr->is_failed = false;
831 
832 	return 0;
833 }
834 
835 void
836 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
837 {
838 	ctrlr->is_failed = true;
839 }
840 
841 bool
842 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
843 {
844 	return ctrlr->is_failed;
845 }
846 
847 spdk_nvme_qp_failure_reason
848 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
849 {
850 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
851 }
852 
853 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
854 				 sizeof(uint32_t))
855 static void
856 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
857 {
858 	struct spdk_nvme_ana_page ana_hdr;
859 	char _ana_desc[UT_ANA_DESC_SIZE];
860 	struct spdk_nvme_ana_group_descriptor *ana_desc;
861 	struct spdk_nvme_ns *ns;
862 	uint32_t i;
863 
864 	memset(&ana_hdr, 0, sizeof(ana_hdr));
865 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
866 
867 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
868 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
869 
870 	buf += sizeof(ana_hdr);
871 	length -= sizeof(ana_hdr);
872 
873 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
874 
875 	for (i = 0; i < ctrlr->num_ns; i++) {
876 		ns = &ctrlr->ns[i];
877 
878 		if (!ns->is_active) {
879 			continue;
880 		}
881 
882 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
883 
884 		ana_desc->ana_group_id = ns->id;
885 		ana_desc->num_of_nsid = 1;
886 		ana_desc->ana_state = ns->ana_state;
887 		ana_desc->nsid[0] = ns->id;
888 
889 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
890 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
891 
892 		buf += UT_ANA_DESC_SIZE;
893 		length -= UT_ANA_DESC_SIZE;
894 	}
895 }
896 
897 int
898 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
899 				 uint8_t log_page, uint32_t nsid,
900 				 void *payload, uint32_t payload_size,
901 				 uint64_t offset,
902 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
903 {
904 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
905 		SPDK_CU_ASSERT_FATAL(offset == 0);
906 		ut_create_ana_log_page(ctrlr, payload, payload_size);
907 	}
908 
909 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
910 				      cb_fn, cb_arg);
911 }
912 
913 int
914 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
915 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
916 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
917 {
918 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
919 }
920 
921 int
922 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
923 			      void *cmd_cb_arg,
924 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
925 {
926 	struct ut_nvme_req *req = NULL, *abort_req;
927 
928 	if (qpair == NULL) {
929 		qpair = &ctrlr->adminq;
930 	}
931 
932 	abort_req = calloc(1, sizeof(*abort_req));
933 	if (abort_req == NULL) {
934 		return -ENOMEM;
935 	}
936 
937 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
938 		if (req->cb_arg == cmd_cb_arg) {
939 			break;
940 		}
941 	}
942 
943 	if (req == NULL) {
944 		free(abort_req);
945 		return -ENOENT;
946 	}
947 
948 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
949 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
950 
951 	abort_req->opc = SPDK_NVME_OPC_ABORT;
952 	abort_req->cb_fn = cb_fn;
953 	abort_req->cb_arg = cb_arg;
954 
955 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
956 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
957 	abort_req->cpl.cdw0 = 0;
958 
959 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
960 	ctrlr->adminq.num_outstanding_reqs++;
961 
962 	return 0;
963 }
964 
965 int32_t
966 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
967 {
968 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
969 }
970 
971 uint32_t
972 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
973 {
974 	return ns->id;
975 }
976 
977 struct spdk_nvme_ctrlr *
978 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
979 {
980 	return ns->ctrlr;
981 }
982 
983 static inline struct spdk_nvme_ns_data *
984 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
985 {
986 	return &ns->ctrlr->nsdata[ns->id - 1];
987 }
988 
989 const struct spdk_nvme_ns_data *
990 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
991 {
992 	return _nvme_ns_get_data(ns);
993 }
994 
995 uint64_t
996 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
997 {
998 	return _nvme_ns_get_data(ns)->nsze;
999 }
1000 
1001 const struct spdk_uuid *
1002 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
1003 {
1004 	return ns->uuid;
1005 }
1006 
1007 enum spdk_nvme_csi
1008 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
1009 	return ns->csi;
1010 }
1011 
1012 int
1013 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1014 			      void *metadata, uint64_t lba, uint32_t lba_count,
1015 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1016 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1017 {
1018 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1019 }
1020 
1021 int
1022 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1023 			       void *buffer, void *metadata, uint64_t lba,
1024 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1025 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1026 {
1027 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1028 }
1029 
1030 int
1031 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1032 			       uint64_t lba, uint32_t lba_count,
1033 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1034 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1035 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1036 			       uint16_t apptag_mask, uint16_t apptag)
1037 {
1038 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1039 }
1040 
1041 int
1042 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1043 				uint64_t lba, uint32_t lba_count,
1044 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1045 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1046 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1047 				uint16_t apptag_mask, uint16_t apptag)
1048 {
1049 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1050 }
1051 
1052 static bool g_ut_readv_ext_called;
1053 int
1054 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1055 			   uint64_t lba, uint32_t lba_count,
1056 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1057 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1058 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1059 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1060 {
1061 	g_ut_readv_ext_called = true;
1062 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1063 }
1064 
1065 static bool g_ut_read_ext_called;
1066 int
1067 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1068 			  uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1069 			  struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1070 {
1071 	g_ut_read_ext_called = true;
1072 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1073 }
1074 
1075 static bool g_ut_writev_ext_called;
1076 int
1077 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1078 			    uint64_t lba, uint32_t lba_count,
1079 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1080 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1081 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1082 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1083 {
1084 	g_ut_writev_ext_called = true;
1085 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1086 }
1087 
1088 static bool g_ut_write_ext_called;
1089 int
1090 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1091 			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1092 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1093 {
1094 	g_ut_write_ext_called = true;
1095 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1096 }
1097 
1098 int
1099 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1100 				  uint64_t lba, uint32_t lba_count,
1101 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1102 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1103 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1104 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1105 {
1106 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1107 }
1108 
1109 int
1110 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1111 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1112 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1113 {
1114 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1115 }
1116 
1117 int
1118 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1119 			      uint64_t lba, uint32_t lba_count,
1120 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1121 			      uint32_t io_flags)
1122 {
1123 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1124 }
1125 
1126 int
1127 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1128 		      const struct spdk_nvme_scc_source_range *ranges,
1129 		      uint16_t num_ranges, uint64_t dest_lba,
1130 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1131 {
1132 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1133 }
1134 
1135 struct spdk_nvme_poll_group *
1136 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1137 {
1138 	struct spdk_nvme_poll_group *group;
1139 
1140 	group = calloc(1, sizeof(*group));
1141 	if (group == NULL) {
1142 		return NULL;
1143 	}
1144 
1145 	group->ctx = ctx;
1146 	if (table != NULL) {
1147 		group->accel_fn_table = *table;
1148 	}
1149 	TAILQ_INIT(&group->connected_qpairs);
1150 	TAILQ_INIT(&group->disconnected_qpairs);
1151 
1152 	return group;
1153 }
1154 
1155 int
1156 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1157 {
1158 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1159 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1160 		return -EBUSY;
1161 	}
1162 
1163 	free(group);
1164 
1165 	return 0;
1166 }
1167 
1168 spdk_nvme_qp_failure_reason
1169 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1170 {
1171 	return qpair->failure_reason;
1172 }
1173 
1174 bool
1175 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair)
1176 {
1177 	return qpair->is_connected;
1178 }
1179 
1180 int32_t
1181 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1182 				    uint32_t max_completions)
1183 {
1184 	struct ut_nvme_req *req, *tmp;
1185 	uint32_t num_completions = 0;
1186 
1187 	if (!qpair->is_connected) {
1188 		return -ENXIO;
1189 	}
1190 
1191 	qpair->in_completion_context = true;
1192 
1193 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1194 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1195 		qpair->num_outstanding_reqs--;
1196 
1197 		req->cb_fn(req->cb_arg, &req->cpl);
1198 
1199 		free(req);
1200 		num_completions++;
1201 	}
1202 
1203 	qpair->in_completion_context = false;
1204 	if (qpair->delete_after_completion_context) {
1205 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1206 	}
1207 
1208 	return num_completions;
1209 }
1210 
1211 int64_t
1212 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1213 		uint32_t completions_per_qpair,
1214 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1215 {
1216 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1217 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1218 
1219 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1220 
1221 	if (disconnected_qpair_cb == NULL) {
1222 		return -EINVAL;
1223 	}
1224 
1225 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1226 		disconnected_qpair_cb(qpair, group->ctx);
1227 	}
1228 
1229 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1230 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1231 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1232 			/* Bump the number of completions so this counts as "busy" */
1233 			num_completions++;
1234 			continue;
1235 		}
1236 
1237 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1238 				    completions_per_qpair);
1239 		if (local_completions < 0 && error_reason == 0) {
1240 			error_reason = local_completions;
1241 		} else {
1242 			num_completions += local_completions;
1243 			assert(num_completions >= 0);
1244 		}
1245 	}
1246 
1247 	return error_reason ? error_reason : num_completions;
1248 }
1249 
1250 int
1251 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1252 			 struct spdk_nvme_qpair *qpair)
1253 {
1254 	CU_ASSERT(!qpair->is_connected);
1255 
1256 	qpair->poll_group = group;
1257 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1258 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1259 
1260 	return 0;
1261 }
1262 
1263 int
1264 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1265 			    struct spdk_nvme_qpair *qpair)
1266 {
1267 	CU_ASSERT(!qpair->is_connected);
1268 
1269 	if (qpair->poll_group == NULL) {
1270 		return -ENOENT;
1271 	}
1272 
1273 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1274 
1275 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1276 
1277 	qpair->poll_group = NULL;
1278 	qpair->poll_group_tailq_head = NULL;
1279 
1280 	return 0;
1281 }
1282 
1283 int
1284 spdk_bdev_register(struct spdk_bdev *bdev)
1285 {
1286 	g_ut_registered_bdev = bdev;
1287 
1288 	return g_ut_register_bdev_status;
1289 }
1290 
1291 void
1292 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1293 {
1294 	int rc;
1295 
1296 	rc = bdev->fn_table->destruct(bdev->ctxt);
1297 
1298 	if (bdev == g_ut_registered_bdev) {
1299 		g_ut_registered_bdev = NULL;
1300 	}
1301 
1302 	if (rc <= 0 && cb_fn != NULL) {
1303 		cb_fn(cb_arg, rc);
1304 	}
1305 }
1306 
1307 int
1308 spdk_bdev_open_ext(const char *bdev_name, bool write,
1309 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1310 		   struct spdk_bdev_desc **desc)
1311 {
1312 	if (g_ut_registered_bdev == NULL ||
1313 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1314 		return -ENODEV;
1315 	}
1316 
1317 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1318 
1319 	return 0;
1320 }
1321 
1322 struct spdk_bdev *
1323 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1324 {
1325 	return (struct spdk_bdev *)desc;
1326 }
1327 
1328 int
1329 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1330 {
1331 	bdev->blockcnt = size;
1332 
1333 	return 0;
1334 }
1335 
1336 struct spdk_io_channel *
1337 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1338 {
1339 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1340 }
1341 
1342 struct spdk_thread *
1343 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
1344 {
1345 	return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io));
1346 }
1347 
1348 void
1349 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1350 {
1351 	bdev_io->internal.status = status;
1352 	bdev_io->internal.in_submit_request = false;
1353 }
1354 
1355 void
1356 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1357 {
1358 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1359 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1360 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1361 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1362 	} else {
1363 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1364 	}
1365 
1366 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1367 	bdev_io->internal.error.nvme.sct = sct;
1368 	bdev_io->internal.error.nvme.sc = sc;
1369 
1370 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1371 }
1372 
1373 void
1374 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1375 {
1376 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1377 
1378 	ut_bdev_io_set_buf(bdev_io);
1379 
1380 	cb(ch, bdev_io, true);
1381 }
1382 
1383 static void
1384 test_create_ctrlr(void)
1385 {
1386 	struct spdk_nvme_transport_id trid = {};
1387 	struct spdk_nvme_ctrlr ctrlr = {};
1388 	int rc;
1389 
1390 	ut_init_trid(&trid);
1391 
1392 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1393 	CU_ASSERT(rc == 0);
1394 
1395 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1396 
1397 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1398 	CU_ASSERT(rc == 0);
1399 
1400 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1401 
1402 	poll_threads();
1403 	spdk_delay_us(1000);
1404 	poll_threads();
1405 
1406 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1407 }
1408 
1409 static void
1410 ut_check_hotplug_on_reset(void *cb_arg, int rc)
1411 {
1412 	bool *detect_remove = cb_arg;
1413 
1414 	CU_ASSERT(rc != 0);
1415 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1416 
1417 	*detect_remove = true;
1418 }
1419 
1420 static void
1421 test_reset_ctrlr(void)
1422 {
1423 	struct spdk_nvme_transport_id trid = {};
1424 	struct spdk_nvme_ctrlr ctrlr = {};
1425 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1426 	struct nvme_path_id *curr_trid;
1427 	struct spdk_io_channel *ch1, *ch2;
1428 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1429 	bool detect_remove;
1430 	int rc;
1431 
1432 	ut_init_trid(&trid);
1433 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1434 
1435 	set_thread(0);
1436 
1437 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1438 	CU_ASSERT(rc == 0);
1439 
1440 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1441 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1442 
1443 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1444 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1445 
1446 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1447 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1448 
1449 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1450 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1451 
1452 	set_thread(1);
1453 
1454 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1455 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1456 
1457 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1458 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1459 
1460 	/* Reset starts from thread 1. */
1461 	set_thread(1);
1462 
1463 	/* Case 1: ctrlr is already being destructed. */
1464 	nvme_ctrlr->destruct = true;
1465 
1466 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1467 	CU_ASSERT(rc == -ENXIO);
1468 
1469 	/* Case 2: reset is in progress. */
1470 	nvme_ctrlr->destruct = false;
1471 	nvme_ctrlr->resetting = true;
1472 
1473 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1474 	CU_ASSERT(rc == -EBUSY);
1475 
1476 	/* Case 3: reset completes successfully. */
1477 	nvme_ctrlr->resetting = false;
1478 	curr_trid->last_failed_tsc = spdk_get_ticks();
1479 	ctrlr.is_failed = true;
1480 
1481 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1482 	CU_ASSERT(rc == 0);
1483 	CU_ASSERT(nvme_ctrlr->resetting == true);
1484 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1485 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1486 
1487 	poll_thread_times(0, 3);
1488 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1489 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1490 
1491 	poll_thread_times(0, 1);
1492 	poll_thread_times(1, 1);
1493 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1494 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1495 	CU_ASSERT(ctrlr.is_failed == true);
1496 
1497 	poll_thread_times(1, 1);
1498 	poll_thread_times(0, 1);
1499 	CU_ASSERT(ctrlr.is_failed == false);
1500 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1501 
1502 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1503 	poll_thread_times(0, 2);
1504 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1505 
1506 	poll_thread_times(0, 1);
1507 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1508 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1509 
1510 	poll_thread_times(1, 1);
1511 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1512 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1513 	CU_ASSERT(nvme_ctrlr->resetting == true);
1514 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1515 
1516 	poll_thread_times(0, 2);
1517 	CU_ASSERT(nvme_ctrlr->resetting == true);
1518 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1519 	poll_thread_times(1, 1);
1520 	CU_ASSERT(nvme_ctrlr->resetting == true);
1521 	poll_thread_times(0, 1);
1522 	CU_ASSERT(nvme_ctrlr->resetting == false);
1523 
1524 	/* Case 4: ctrlr is already removed. */
1525 	ctrlr.is_removed = true;
1526 
1527 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1528 	CU_ASSERT(rc == 0);
1529 
1530 	detect_remove = false;
1531 	nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset;
1532 	nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove;
1533 
1534 	poll_threads();
1535 
1536 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
1537 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
1538 	CU_ASSERT(detect_remove == true);
1539 
1540 	ctrlr.is_removed = false;
1541 
1542 	spdk_put_io_channel(ch2);
1543 
1544 	set_thread(0);
1545 
1546 	spdk_put_io_channel(ch1);
1547 
1548 	poll_threads();
1549 
1550 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1551 	CU_ASSERT(rc == 0);
1552 
1553 	poll_threads();
1554 	spdk_delay_us(1000);
1555 	poll_threads();
1556 
1557 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1558 }
1559 
1560 static void
1561 test_race_between_reset_and_destruct_ctrlr(void)
1562 {
1563 	struct spdk_nvme_transport_id trid = {};
1564 	struct spdk_nvme_ctrlr ctrlr = {};
1565 	struct nvme_ctrlr *nvme_ctrlr;
1566 	struct spdk_io_channel *ch1, *ch2;
1567 	int rc;
1568 
1569 	ut_init_trid(&trid);
1570 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1571 
1572 	set_thread(0);
1573 
1574 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1575 	CU_ASSERT(rc == 0);
1576 
1577 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1578 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1579 
1580 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1581 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1582 
1583 	set_thread(1);
1584 
1585 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1586 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1587 
1588 	/* Reset starts from thread 1. */
1589 	set_thread(1);
1590 
1591 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1592 	CU_ASSERT(rc == 0);
1593 	CU_ASSERT(nvme_ctrlr->resetting == true);
1594 
1595 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1596 	set_thread(0);
1597 
1598 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1599 	CU_ASSERT(rc == 0);
1600 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1601 	CU_ASSERT(nvme_ctrlr->destruct == true);
1602 	CU_ASSERT(nvme_ctrlr->resetting == true);
1603 
1604 	poll_threads();
1605 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1606 	poll_threads();
1607 
1608 	/* Reset completed but ctrlr is not still destructed yet. */
1609 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1610 	CU_ASSERT(nvme_ctrlr->destruct == true);
1611 	CU_ASSERT(nvme_ctrlr->resetting == false);
1612 
1613 	/* New reset request is rejected. */
1614 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1615 	CU_ASSERT(rc == -ENXIO);
1616 
1617 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1618 	 * However there are two channels and destruct is not completed yet.
1619 	 */
1620 	poll_threads();
1621 
1622 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1623 
1624 	set_thread(0);
1625 
1626 	spdk_put_io_channel(ch1);
1627 
1628 	set_thread(1);
1629 
1630 	spdk_put_io_channel(ch2);
1631 
1632 	poll_threads();
1633 	spdk_delay_us(1000);
1634 	poll_threads();
1635 
1636 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1637 }
1638 
1639 static void
1640 test_failover_ctrlr(void)
1641 {
1642 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1643 	struct spdk_nvme_ctrlr ctrlr = {};
1644 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1645 	struct nvme_path_id *curr_trid, *next_trid;
1646 	struct spdk_io_channel *ch1, *ch2;
1647 	int rc;
1648 
1649 	ut_init_trid(&trid1);
1650 	ut_init_trid2(&trid2);
1651 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1652 
1653 	set_thread(0);
1654 
1655 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1656 	CU_ASSERT(rc == 0);
1657 
1658 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1659 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1660 
1661 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1662 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1663 
1664 	set_thread(1);
1665 
1666 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1667 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1668 
1669 	/* First, test one trid case. */
1670 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1671 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1672 
1673 	/* Failover starts from thread 1. */
1674 	set_thread(1);
1675 
1676 	/* Case 1: ctrlr is already being destructed. */
1677 	nvme_ctrlr->destruct = true;
1678 
1679 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1680 	CU_ASSERT(rc == -ENXIO);
1681 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1682 
1683 	/* Case 2: reset is in progress. */
1684 	nvme_ctrlr->destruct = false;
1685 	nvme_ctrlr->resetting = true;
1686 
1687 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1688 	CU_ASSERT(rc == -EINPROGRESS);
1689 
1690 	/* Case 3: reset completes successfully. */
1691 	nvme_ctrlr->resetting = false;
1692 
1693 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1694 	CU_ASSERT(rc == 0);
1695 
1696 	CU_ASSERT(nvme_ctrlr->resetting == true);
1697 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1698 
1699 	poll_threads();
1700 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1701 	poll_threads();
1702 
1703 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1704 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1705 
1706 	CU_ASSERT(nvme_ctrlr->resetting == false);
1707 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1708 
1709 	set_thread(0);
1710 
1711 	/* Second, test two trids case. */
1712 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1713 	CU_ASSERT(rc == 0);
1714 
1715 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1716 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1717 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1718 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1719 
1720 	/* Failover starts from thread 1. */
1721 	set_thread(1);
1722 
1723 	/* Case 4: reset is in progress. */
1724 	nvme_ctrlr->resetting = true;
1725 
1726 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1727 	CU_ASSERT(rc == -EINPROGRESS);
1728 
1729 	/* Case 5: failover completes successfully. */
1730 	nvme_ctrlr->resetting = false;
1731 
1732 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
1733 	CU_ASSERT(rc == 0);
1734 
1735 	CU_ASSERT(nvme_ctrlr->resetting == true);
1736 
1737 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1738 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1739 	CU_ASSERT(next_trid != curr_trid);
1740 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1741 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1742 
1743 	poll_threads();
1744 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1745 	poll_threads();
1746 
1747 	CU_ASSERT(nvme_ctrlr->resetting == false);
1748 
1749 	spdk_put_io_channel(ch2);
1750 
1751 	set_thread(0);
1752 
1753 	spdk_put_io_channel(ch1);
1754 
1755 	poll_threads();
1756 
1757 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1758 	CU_ASSERT(rc == 0);
1759 
1760 	poll_threads();
1761 	spdk_delay_us(1000);
1762 	poll_threads();
1763 
1764 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1765 }
1766 
1767 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1768  *
1769  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1770  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1771  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1772  * have been active, i.e., the head of the list until the failover completed.
1773  * However trid3 was inserted to the head of the list by mistake.
1774  *
1775  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1776  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1777  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1778  * may be executed repeatedly before failover is executed. Hence this bug is real.
1779  *
1780  * The following test verifies the fix.
1781  */
1782 static void
1783 test_race_between_failover_and_add_secondary_trid(void)
1784 {
1785 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1786 	struct spdk_nvme_ctrlr ctrlr = {};
1787 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1788 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1789 	struct spdk_io_channel *ch1, *ch2;
1790 	int rc;
1791 
1792 	ut_init_trid(&trid1);
1793 	ut_init_trid2(&trid2);
1794 	ut_init_trid3(&trid3);
1795 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1796 
1797 	set_thread(0);
1798 
1799 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1800 	CU_ASSERT(rc == 0);
1801 
1802 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1803 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1804 
1805 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1806 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1807 
1808 	set_thread(1);
1809 
1810 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1811 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1812 
1813 	set_thread(0);
1814 
1815 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1816 	CU_ASSERT(rc == 0);
1817 
1818 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1819 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1820 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1821 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1822 	path_id2 = TAILQ_NEXT(path_id1, link);
1823 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1824 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1825 
1826 	ctrlr.fail_reset = true;
1827 
1828 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1829 	CU_ASSERT(rc == 0);
1830 
1831 	poll_threads();
1832 
1833 	CU_ASSERT(path_id1->last_failed_tsc != 0);
1834 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1835 
1836 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1837 	CU_ASSERT(rc == 0);
1838 
1839 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1840 	CU_ASSERT(rc == 0);
1841 
1842 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1843 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1844 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1845 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1846 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1847 	path_id3 = TAILQ_NEXT(path_id2, link);
1848 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1849 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1850 
1851 	poll_threads();
1852 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1853 	poll_threads();
1854 
1855 	spdk_put_io_channel(ch1);
1856 
1857 	set_thread(1);
1858 
1859 	spdk_put_io_channel(ch2);
1860 
1861 	poll_threads();
1862 
1863 	set_thread(0);
1864 
1865 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1866 	CU_ASSERT(rc == 0);
1867 
1868 	poll_threads();
1869 	spdk_delay_us(1000);
1870 	poll_threads();
1871 
1872 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1873 }
1874 
1875 static void
1876 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1877 {
1878 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1879 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1880 }
1881 
1882 static void
1883 test_pending_reset(void)
1884 {
1885 	struct spdk_nvme_transport_id trid = {};
1886 	struct spdk_nvme_ctrlr *ctrlr;
1887 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1888 	const int STRING_SIZE = 32;
1889 	const char *attached_names[STRING_SIZE];
1890 	struct nvme_bdev *bdev;
1891 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1892 	struct spdk_io_channel *ch1, *ch2;
1893 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1894 	struct nvme_io_path *io_path1, *io_path2;
1895 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1896 	int rc;
1897 
1898 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1899 	ut_init_trid(&trid);
1900 
1901 	set_thread(0);
1902 
1903 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1904 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1905 
1906 	g_ut_attach_ctrlr_status = 0;
1907 	g_ut_attach_bdev_count = 1;
1908 
1909 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1910 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1911 	CU_ASSERT(rc == 0);
1912 
1913 	spdk_delay_us(1000);
1914 	poll_threads();
1915 
1916 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1917 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1918 
1919 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1920 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1921 
1922 	ch1 = spdk_get_io_channel(bdev);
1923 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1924 
1925 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1926 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1927 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1928 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1929 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1930 
1931 	set_thread(1);
1932 
1933 	ch2 = spdk_get_io_channel(bdev);
1934 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1935 
1936 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1937 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1938 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1939 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1940 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1941 
1942 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1943 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1944 
1945 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1946 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1947 
1948 	/* The first reset request is submitted on thread 1, and the second reset request
1949 	 * is submitted on thread 0 while processing the first request.
1950 	 */
1951 	bdev_nvme_submit_request(ch2, first_bdev_io);
1952 	CU_ASSERT(nvme_ctrlr->resetting == true);
1953 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1954 
1955 	set_thread(0);
1956 
1957 	bdev_nvme_submit_request(ch1, second_bdev_io);
1958 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1959 
1960 	poll_threads();
1961 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1962 	poll_threads();
1963 
1964 	CU_ASSERT(nvme_ctrlr->resetting == false);
1965 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1966 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1967 
1968 	/* The first reset request is submitted on thread 1, and the second reset request
1969 	 * is submitted on thread 0 while processing the first request.
1970 	 *
1971 	 * The difference from the above scenario is that the controller is removed while
1972 	 * processing the first request. Hence both reset requests should fail.
1973 	 */
1974 	set_thread(1);
1975 
1976 	bdev_nvme_submit_request(ch2, first_bdev_io);
1977 	CU_ASSERT(nvme_ctrlr->resetting == true);
1978 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1979 
1980 	set_thread(0);
1981 
1982 	bdev_nvme_submit_request(ch1, second_bdev_io);
1983 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1984 
1985 	ctrlr->fail_reset = true;
1986 
1987 	poll_threads();
1988 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1989 	poll_threads();
1990 
1991 	CU_ASSERT(nvme_ctrlr->resetting == false);
1992 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1993 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1994 
1995 	spdk_put_io_channel(ch1);
1996 
1997 	set_thread(1);
1998 
1999 	spdk_put_io_channel(ch2);
2000 
2001 	poll_threads();
2002 
2003 	set_thread(0);
2004 
2005 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2006 	CU_ASSERT(rc == 0);
2007 
2008 	poll_threads();
2009 	spdk_delay_us(1000);
2010 	poll_threads();
2011 
2012 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2013 
2014 	free(first_bdev_io);
2015 	free(second_bdev_io);
2016 }
2017 
2018 static void
2019 test_attach_ctrlr(void)
2020 {
2021 	struct spdk_nvme_transport_id trid = {};
2022 	struct spdk_nvme_ctrlr *ctrlr;
2023 	struct nvme_ctrlr *nvme_ctrlr;
2024 	const int STRING_SIZE = 32;
2025 	const char *attached_names[STRING_SIZE];
2026 	struct nvme_bdev *nbdev;
2027 	int rc;
2028 
2029 	set_thread(0);
2030 
2031 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2032 	ut_init_trid(&trid);
2033 
2034 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
2035 	 * by probe polling.
2036 	 */
2037 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2038 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2039 
2040 	ctrlr->is_failed = true;
2041 	g_ut_attach_ctrlr_status = -EIO;
2042 	g_ut_attach_bdev_count = 0;
2043 
2044 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2045 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2046 	CU_ASSERT(rc == 0);
2047 
2048 	spdk_delay_us(1000);
2049 	poll_threads();
2050 
2051 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2052 
2053 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2054 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2055 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2056 
2057 	g_ut_attach_ctrlr_status = 0;
2058 
2059 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2060 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2061 	CU_ASSERT(rc == 0);
2062 
2063 	spdk_delay_us(1000);
2064 	poll_threads();
2065 
2066 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2067 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2068 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2069 
2070 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2071 	CU_ASSERT(rc == 0);
2072 
2073 	poll_threads();
2074 	spdk_delay_us(1000);
2075 	poll_threads();
2076 
2077 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2078 
2079 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2080 	 * one nvme_bdev is created.
2081 	 */
2082 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2083 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2084 
2085 	g_ut_attach_bdev_count = 1;
2086 
2087 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2088 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2089 	CU_ASSERT(rc == 0);
2090 
2091 	spdk_delay_us(1000);
2092 	poll_threads();
2093 
2094 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2095 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2096 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2097 
2098 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2099 	attached_names[0] = NULL;
2100 
2101 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2102 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2103 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2104 
2105 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2106 	CU_ASSERT(rc == 0);
2107 
2108 	poll_threads();
2109 	spdk_delay_us(1000);
2110 	poll_threads();
2111 
2112 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2113 
2114 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2115 	 * created because creating one nvme_bdev failed.
2116 	 */
2117 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2118 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2119 
2120 	g_ut_register_bdev_status = -EINVAL;
2121 	g_ut_attach_bdev_count = 0;
2122 
2123 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2124 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2125 	CU_ASSERT(rc == 0);
2126 
2127 	spdk_delay_us(1000);
2128 	poll_threads();
2129 
2130 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2131 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2132 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2133 
2134 	CU_ASSERT(attached_names[0] == NULL);
2135 
2136 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2137 	CU_ASSERT(rc == 0);
2138 
2139 	poll_threads();
2140 	spdk_delay_us(1000);
2141 	poll_threads();
2142 
2143 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2144 
2145 	g_ut_register_bdev_status = 0;
2146 }
2147 
2148 static void
2149 test_aer_cb(void)
2150 {
2151 	struct spdk_nvme_transport_id trid = {};
2152 	struct spdk_nvme_ctrlr *ctrlr;
2153 	struct nvme_ctrlr *nvme_ctrlr;
2154 	struct nvme_bdev *bdev;
2155 	const int STRING_SIZE = 32;
2156 	const char *attached_names[STRING_SIZE];
2157 	union spdk_nvme_async_event_completion event = {};
2158 	struct spdk_nvme_cpl cpl = {};
2159 	int rc;
2160 
2161 	set_thread(0);
2162 
2163 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2164 	ut_init_trid(&trid);
2165 
2166 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2167 	 * namespaces are populated.
2168 	 */
2169 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2170 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2171 
2172 	ctrlr->ns[0].is_active = false;
2173 
2174 	g_ut_attach_ctrlr_status = 0;
2175 	g_ut_attach_bdev_count = 3;
2176 
2177 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2178 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2179 	CU_ASSERT(rc == 0);
2180 
2181 	spdk_delay_us(1000);
2182 	poll_threads();
2183 
2184 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2185 	poll_threads();
2186 
2187 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2188 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2189 
2190 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2191 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2192 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2193 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2194 
2195 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2196 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2197 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2198 
2199 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2200 	 * change the size of the 4th namespace.
2201 	 */
2202 	ctrlr->ns[0].is_active = true;
2203 	ctrlr->ns[2].is_active = false;
2204 	ctrlr->nsdata[3].nsze = 2048;
2205 
2206 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2207 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2208 	cpl.cdw0 = event.raw;
2209 
2210 	aer_cb(nvme_ctrlr, &cpl);
2211 
2212 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2213 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2214 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2215 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2216 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2217 
2218 	/* Change ANA state of active namespaces. */
2219 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2220 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2221 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2222 
2223 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2224 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2225 	cpl.cdw0 = event.raw;
2226 
2227 	aer_cb(nvme_ctrlr, &cpl);
2228 
2229 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2230 	poll_threads();
2231 
2232 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2233 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2234 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2235 
2236 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2237 	CU_ASSERT(rc == 0);
2238 
2239 	poll_threads();
2240 	spdk_delay_us(1000);
2241 	poll_threads();
2242 
2243 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2244 }
2245 
2246 static void
2247 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2248 			enum spdk_bdev_io_type io_type)
2249 {
2250 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2251 	struct nvme_io_path *io_path;
2252 	struct spdk_nvme_qpair *qpair;
2253 
2254 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2255 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2256 	qpair = io_path->qpair->qpair;
2257 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2258 
2259 	bdev_io->type = io_type;
2260 	bdev_io->internal.in_submit_request = true;
2261 
2262 	bdev_nvme_submit_request(ch, bdev_io);
2263 
2264 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2265 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2266 
2267 	poll_threads();
2268 
2269 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2270 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2271 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2272 }
2273 
2274 static void
2275 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2276 		   enum spdk_bdev_io_type io_type)
2277 {
2278 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2279 	struct nvme_io_path *io_path;
2280 	struct spdk_nvme_qpair *qpair;
2281 
2282 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2283 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2284 	qpair = io_path->qpair->qpair;
2285 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2286 
2287 	bdev_io->type = io_type;
2288 	bdev_io->internal.in_submit_request = true;
2289 
2290 	bdev_nvme_submit_request(ch, bdev_io);
2291 
2292 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2293 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2294 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2295 }
2296 
2297 static void
2298 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2299 {
2300 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2301 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2302 	struct ut_nvme_req *req;
2303 	struct nvme_io_path *io_path;
2304 	struct spdk_nvme_qpair *qpair;
2305 
2306 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2307 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2308 	qpair = io_path->qpair->qpair;
2309 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2310 
2311 	/* Only compare and write now. */
2312 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2313 	bdev_io->internal.in_submit_request = true;
2314 
2315 	bdev_nvme_submit_request(ch, bdev_io);
2316 
2317 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2318 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2319 	CU_ASSERT(bio->first_fused_submitted == true);
2320 
2321 	/* First outstanding request is compare operation. */
2322 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2323 	SPDK_CU_ASSERT_FATAL(req != NULL);
2324 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2325 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2326 
2327 	poll_threads();
2328 
2329 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2330 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2331 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2332 }
2333 
2334 static void
2335 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2336 			 struct spdk_nvme_ctrlr *ctrlr)
2337 {
2338 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2339 	bdev_io->internal.in_submit_request = true;
2340 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2341 
2342 	bdev_nvme_submit_request(ch, bdev_io);
2343 
2344 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2345 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2346 
2347 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2348 	poll_thread_times(1, 1);
2349 
2350 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2351 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2352 
2353 	poll_thread_times(0, 1);
2354 
2355 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2356 }
2357 
2358 static void
2359 test_submit_nvme_cmd(void)
2360 {
2361 	struct spdk_nvme_transport_id trid = {};
2362 	struct spdk_nvme_ctrlr *ctrlr;
2363 	struct nvme_ctrlr *nvme_ctrlr;
2364 	const int STRING_SIZE = 32;
2365 	const char *attached_names[STRING_SIZE];
2366 	struct nvme_bdev *bdev;
2367 	struct spdk_bdev_io *bdev_io;
2368 	struct spdk_io_channel *ch;
2369 	int rc;
2370 
2371 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2372 	ut_init_trid(&trid);
2373 
2374 	set_thread(1);
2375 
2376 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2377 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2378 
2379 	g_ut_attach_ctrlr_status = 0;
2380 	g_ut_attach_bdev_count = 1;
2381 
2382 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2383 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2384 	CU_ASSERT(rc == 0);
2385 
2386 	spdk_delay_us(1000);
2387 	poll_threads();
2388 
2389 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2390 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2391 
2392 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2393 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2394 
2395 	set_thread(0);
2396 
2397 	ch = spdk_get_io_channel(bdev);
2398 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2399 
2400 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2401 
2402 	bdev_io->u.bdev.iovs = NULL;
2403 
2404 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2405 
2406 	ut_bdev_io_set_buf(bdev_io);
2407 
2408 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2409 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2410 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2411 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2412 
2413 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2414 
2415 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2416 
2417 	/* Verify that ext NVME API is called when data is described by memory domain  */
2418 	g_ut_read_ext_called = false;
2419 	bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef;
2420 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2421 	CU_ASSERT(g_ut_read_ext_called == true);
2422 	g_ut_read_ext_called = false;
2423 	bdev_io->u.bdev.memory_domain = NULL;
2424 
2425 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2426 
2427 	free(bdev_io);
2428 
2429 	spdk_put_io_channel(ch);
2430 
2431 	poll_threads();
2432 
2433 	set_thread(1);
2434 
2435 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2436 	CU_ASSERT(rc == 0);
2437 
2438 	poll_threads();
2439 	spdk_delay_us(1000);
2440 	poll_threads();
2441 
2442 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2443 }
2444 
2445 static void
2446 test_add_remove_trid(void)
2447 {
2448 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2449 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2450 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2451 	const int STRING_SIZE = 32;
2452 	const char *attached_names[STRING_SIZE];
2453 	struct nvme_path_id *ctrid;
2454 	int rc;
2455 
2456 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2457 	ut_init_trid(&path1.trid);
2458 	ut_init_trid2(&path2.trid);
2459 	ut_init_trid3(&path3.trid);
2460 
2461 	set_thread(0);
2462 
2463 	g_ut_attach_ctrlr_status = 0;
2464 	g_ut_attach_bdev_count = 0;
2465 
2466 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2467 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2468 
2469 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2470 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2471 	CU_ASSERT(rc == 0);
2472 
2473 	spdk_delay_us(1000);
2474 	poll_threads();
2475 
2476 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2477 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2478 
2479 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2480 
2481 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2482 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2483 
2484 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2485 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2486 	CU_ASSERT(rc == 0);
2487 
2488 	spdk_delay_us(1000);
2489 	poll_threads();
2490 
2491 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2492 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2493 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2494 			break;
2495 		}
2496 	}
2497 	CU_ASSERT(ctrid != NULL);
2498 
2499 	/* trid3 is not in the registered list. */
2500 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2501 	CU_ASSERT(rc == -ENXIO);
2502 
2503 	/* trid2 is not used, and simply removed. */
2504 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2505 	CU_ASSERT(rc == 0);
2506 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2507 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2508 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2509 	}
2510 
2511 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2512 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2513 
2514 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2515 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2516 	CU_ASSERT(rc == 0);
2517 
2518 	spdk_delay_us(1000);
2519 	poll_threads();
2520 
2521 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2522 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2523 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2524 			break;
2525 		}
2526 	}
2527 	CU_ASSERT(ctrid != NULL);
2528 
2529 	/* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully.
2530 	 * If we add path2 again, path2 should be inserted between path1 and path3.
2531 	 * Then, we remove path2. It is not used, and simply removed.
2532 	 */
2533 	ctrid->last_failed_tsc = spdk_get_ticks() + 1;
2534 
2535 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2536 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2537 
2538 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2539 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2540 	CU_ASSERT(rc == 0);
2541 
2542 	spdk_delay_us(1000);
2543 	poll_threads();
2544 
2545 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2546 
2547 	ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link);
2548 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2549 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0);
2550 
2551 	ctrid = TAILQ_NEXT(ctrid, link);
2552 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2553 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0);
2554 
2555 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2556 	CU_ASSERT(rc == 0);
2557 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2558 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2559 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2560 	}
2561 
2562 	/* path1 is currently used and path3 is an alternative path.
2563 	 * If we remove path1, path is changed to path3.
2564 	 */
2565 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
2566 	CU_ASSERT(rc == 0);
2567 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2568 	CU_ASSERT(nvme_ctrlr->resetting == true);
2569 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2570 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2571 	}
2572 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2573 
2574 	poll_threads();
2575 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2576 	poll_threads();
2577 
2578 	CU_ASSERT(nvme_ctrlr->resetting == false);
2579 
2580 	/* path3 is the current and only path. If we remove path3, the corresponding
2581 	 * nvme_ctrlr is removed.
2582 	 */
2583 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2584 	CU_ASSERT(rc == 0);
2585 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2586 
2587 	poll_threads();
2588 	spdk_delay_us(1000);
2589 	poll_threads();
2590 
2591 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2592 
2593 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2594 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2595 
2596 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2597 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2598 	CU_ASSERT(rc == 0);
2599 
2600 	spdk_delay_us(1000);
2601 	poll_threads();
2602 
2603 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2604 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2605 
2606 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2607 
2608 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2609 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2610 
2611 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2612 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2613 	CU_ASSERT(rc == 0);
2614 
2615 	spdk_delay_us(1000);
2616 	poll_threads();
2617 
2618 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2619 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2620 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2621 			break;
2622 		}
2623 	}
2624 	CU_ASSERT(ctrid != NULL);
2625 
2626 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2627 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2628 	CU_ASSERT(rc == 0);
2629 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2630 
2631 	poll_threads();
2632 	spdk_delay_us(1000);
2633 	poll_threads();
2634 
2635 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2636 }
2637 
2638 static void
2639 test_abort(void)
2640 {
2641 	struct spdk_nvme_transport_id trid = {};
2642 	struct nvme_ctrlr_opts opts = {};
2643 	struct spdk_nvme_ctrlr *ctrlr;
2644 	struct nvme_ctrlr *nvme_ctrlr;
2645 	const int STRING_SIZE = 32;
2646 	const char *attached_names[STRING_SIZE];
2647 	struct nvme_bdev *bdev;
2648 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2649 	struct spdk_io_channel *ch1, *ch2;
2650 	struct nvme_bdev_channel *nbdev_ch1;
2651 	struct nvme_io_path *io_path1;
2652 	struct nvme_qpair *nvme_qpair1;
2653 	int rc;
2654 
2655 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2656 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2657 	 * are submitted on thread 1. Both should succeed.
2658 	 */
2659 
2660 	ut_init_trid(&trid);
2661 
2662 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2663 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2664 
2665 	g_ut_attach_ctrlr_status = 0;
2666 	g_ut_attach_bdev_count = 1;
2667 
2668 	set_thread(1);
2669 
2670 	opts.ctrlr_loss_timeout_sec = -1;
2671 	opts.reconnect_delay_sec = 1;
2672 
2673 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2674 			      attach_ctrlr_done, NULL, NULL, &opts, false);
2675 	CU_ASSERT(rc == 0);
2676 
2677 	spdk_delay_us(1000);
2678 	poll_threads();
2679 
2680 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2681 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2682 
2683 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2684 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2685 
2686 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2687 	ut_bdev_io_set_buf(write_io);
2688 
2689 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2690 	ut_bdev_io_set_buf(fuse_io);
2691 
2692 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2693 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2694 
2695 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2696 
2697 	set_thread(0);
2698 
2699 	ch1 = spdk_get_io_channel(bdev);
2700 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2701 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2702 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2703 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2704 	nvme_qpair1 = io_path1->qpair;
2705 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2706 
2707 	set_thread(1);
2708 
2709 	ch2 = spdk_get_io_channel(bdev);
2710 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2711 
2712 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2713 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2714 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2715 
2716 	/* Aborting the already completed request should fail. */
2717 	write_io->internal.in_submit_request = true;
2718 	bdev_nvme_submit_request(ch1, write_io);
2719 	poll_threads();
2720 
2721 	CU_ASSERT(write_io->internal.in_submit_request == false);
2722 
2723 	abort_io->u.abort.bio_to_abort = write_io;
2724 	abort_io->internal.in_submit_request = true;
2725 
2726 	bdev_nvme_submit_request(ch1, abort_io);
2727 
2728 	poll_threads();
2729 
2730 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2731 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2732 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2733 
2734 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2735 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2736 
2737 	admin_io->internal.in_submit_request = true;
2738 	bdev_nvme_submit_request(ch1, admin_io);
2739 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2740 	poll_threads();
2741 
2742 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2743 
2744 	abort_io->u.abort.bio_to_abort = admin_io;
2745 	abort_io->internal.in_submit_request = true;
2746 
2747 	bdev_nvme_submit_request(ch2, abort_io);
2748 
2749 	poll_threads();
2750 
2751 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2752 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2753 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2754 
2755 	/* Aborting the write request should succeed. */
2756 	write_io->internal.in_submit_request = true;
2757 	bdev_nvme_submit_request(ch1, write_io);
2758 
2759 	CU_ASSERT(write_io->internal.in_submit_request == true);
2760 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2761 
2762 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2763 	abort_io->u.abort.bio_to_abort = write_io;
2764 	abort_io->internal.in_submit_request = true;
2765 
2766 	bdev_nvme_submit_request(ch1, abort_io);
2767 
2768 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2769 	poll_threads();
2770 
2771 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2772 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2773 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2774 	CU_ASSERT(write_io->internal.in_submit_request == false);
2775 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2776 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2777 
2778 	/* Aborting the fuse request should succeed. */
2779 	fuse_io->internal.in_submit_request = true;
2780 	bdev_nvme_submit_request(ch1, fuse_io);
2781 
2782 	CU_ASSERT(fuse_io->internal.in_submit_request == true);
2783 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2784 
2785 	abort_io->u.abort.bio_to_abort = fuse_io;
2786 	abort_io->internal.in_submit_request = true;
2787 
2788 	bdev_nvme_submit_request(ch1, abort_io);
2789 
2790 	spdk_delay_us(10000);
2791 	poll_threads();
2792 
2793 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2794 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2795 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2796 	CU_ASSERT(fuse_io->internal.in_submit_request == false);
2797 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2798 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2799 
2800 	/* Aborting the admin request should succeed. */
2801 	admin_io->internal.in_submit_request = true;
2802 	bdev_nvme_submit_request(ch1, admin_io);
2803 
2804 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2805 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2806 
2807 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2808 	abort_io->u.abort.bio_to_abort = admin_io;
2809 	abort_io->internal.in_submit_request = true;
2810 
2811 	bdev_nvme_submit_request(ch2, abort_io);
2812 
2813 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2814 	poll_threads();
2815 
2816 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2817 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2818 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2819 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2820 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2821 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2822 
2823 	set_thread(0);
2824 
2825 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2826 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2827 	 * while resetting the nvme_ctrlr.
2828 	 */
2829 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2830 
2831 	poll_thread_times(0, 3);
2832 
2833 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2834 	CU_ASSERT(nvme_ctrlr->resetting == true);
2835 
2836 	write_io->internal.in_submit_request = true;
2837 
2838 	bdev_nvme_submit_request(ch1, write_io);
2839 
2840 	CU_ASSERT(write_io->internal.in_submit_request == true);
2841 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2842 
2843 	/* Aborting the queued write request should succeed immediately. */
2844 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2845 	abort_io->u.abort.bio_to_abort = write_io;
2846 	abort_io->internal.in_submit_request = true;
2847 
2848 	bdev_nvme_submit_request(ch1, abort_io);
2849 
2850 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2851 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2852 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2853 	CU_ASSERT(write_io->internal.in_submit_request == false);
2854 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2855 
2856 	poll_threads();
2857 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2858 	poll_threads();
2859 
2860 	spdk_put_io_channel(ch1);
2861 
2862 	set_thread(1);
2863 
2864 	spdk_put_io_channel(ch2);
2865 
2866 	poll_threads();
2867 
2868 	free(write_io);
2869 	free(fuse_io);
2870 	free(admin_io);
2871 	free(abort_io);
2872 
2873 	set_thread(1);
2874 
2875 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2876 	CU_ASSERT(rc == 0);
2877 
2878 	poll_threads();
2879 	spdk_delay_us(1000);
2880 	poll_threads();
2881 
2882 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2883 }
2884 
2885 static void
2886 test_get_io_qpair(void)
2887 {
2888 	struct spdk_nvme_transport_id trid = {};
2889 	struct spdk_nvme_ctrlr ctrlr = {};
2890 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2891 	struct spdk_io_channel *ch;
2892 	struct nvme_ctrlr_channel *ctrlr_ch;
2893 	struct spdk_nvme_qpair *qpair;
2894 	int rc;
2895 
2896 	ut_init_trid(&trid);
2897 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2898 
2899 	set_thread(0);
2900 
2901 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2902 	CU_ASSERT(rc == 0);
2903 
2904 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2905 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2906 
2907 	ch = spdk_get_io_channel(nvme_ctrlr);
2908 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2909 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2910 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2911 
2912 	qpair = bdev_nvme_get_io_qpair(ch);
2913 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2914 
2915 	spdk_put_io_channel(ch);
2916 
2917 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2918 	CU_ASSERT(rc == 0);
2919 
2920 	poll_threads();
2921 	spdk_delay_us(1000);
2922 	poll_threads();
2923 
2924 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2925 }
2926 
2927 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2928  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2929  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2930  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2931  */
2932 static void
2933 test_bdev_unregister(void)
2934 {
2935 	struct spdk_nvme_transport_id trid = {};
2936 	struct spdk_nvme_ctrlr *ctrlr;
2937 	struct nvme_ctrlr *nvme_ctrlr;
2938 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2939 	const int STRING_SIZE = 32;
2940 	const char *attached_names[STRING_SIZE];
2941 	struct nvme_bdev *bdev1, *bdev2;
2942 	int rc;
2943 
2944 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2945 	ut_init_trid(&trid);
2946 
2947 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2948 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2949 
2950 	g_ut_attach_ctrlr_status = 0;
2951 	g_ut_attach_bdev_count = 2;
2952 
2953 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2954 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2955 	CU_ASSERT(rc == 0);
2956 
2957 	spdk_delay_us(1000);
2958 	poll_threads();
2959 
2960 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2961 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2962 
2963 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2964 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2965 
2966 	bdev1 = nvme_ns1->bdev;
2967 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2968 
2969 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2970 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2971 
2972 	bdev2 = nvme_ns2->bdev;
2973 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2974 
2975 	bdev_nvme_destruct(&bdev1->disk);
2976 	bdev_nvme_destruct(&bdev2->disk);
2977 
2978 	poll_threads();
2979 
2980 	CU_ASSERT(nvme_ns1->bdev == NULL);
2981 	CU_ASSERT(nvme_ns2->bdev == NULL);
2982 
2983 	nvme_ctrlr->destruct = true;
2984 	_nvme_ctrlr_destruct(nvme_ctrlr);
2985 
2986 	poll_threads();
2987 	spdk_delay_us(1000);
2988 	poll_threads();
2989 
2990 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2991 }
2992 
2993 static void
2994 test_compare_ns(void)
2995 {
2996 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2997 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2998 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2999 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
3000 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
3001 
3002 	/* No IDs are defined. */
3003 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3004 
3005 	/* Only EUI64 are defined and not matched. */
3006 	nsdata1.eui64 = 0xABCDEF0123456789;
3007 	nsdata2.eui64 = 0xBBCDEF0123456789;
3008 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3009 
3010 	/* Only EUI64 are defined and matched. */
3011 	nsdata2.eui64 = 0xABCDEF0123456789;
3012 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3013 
3014 	/* Only NGUID are defined and not matched. */
3015 	nsdata1.eui64 = 0x0;
3016 	nsdata2.eui64 = 0x0;
3017 	nsdata1.nguid[0] = 0x12;
3018 	nsdata2.nguid[0] = 0x10;
3019 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3020 
3021 	/* Only NGUID are defined and matched. */
3022 	nsdata2.nguid[0] = 0x12;
3023 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3024 
3025 	/* Only UUID are defined and not matched. */
3026 	nsdata1.nguid[0] = 0x0;
3027 	nsdata2.nguid[0] = 0x0;
3028 	ns1.uuid = &uuid1;
3029 	ns2.uuid = &uuid2;
3030 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3031 
3032 	/* Only one UUID is defined. */
3033 	ns1.uuid = NULL;
3034 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3035 
3036 	/* Only UUID are defined and matched. */
3037 	ns1.uuid = &uuid2;
3038 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3039 
3040 	/* All EUI64, NGUID, and UUID are defined and matched. */
3041 	nsdata1.eui64 = 0x123456789ABCDEF;
3042 	nsdata2.eui64 = 0x123456789ABCDEF;
3043 	nsdata1.nguid[15] = 0x34;
3044 	nsdata2.nguid[15] = 0x34;
3045 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3046 
3047 	/* CSI are not matched. */
3048 	ns1.csi = SPDK_NVME_CSI_ZNS;
3049 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3050 }
3051 
3052 static void
3053 test_init_ana_log_page(void)
3054 {
3055 	struct spdk_nvme_transport_id trid = {};
3056 	struct spdk_nvme_ctrlr *ctrlr;
3057 	struct nvme_ctrlr *nvme_ctrlr;
3058 	const int STRING_SIZE = 32;
3059 	const char *attached_names[STRING_SIZE];
3060 	int rc;
3061 
3062 	set_thread(0);
3063 
3064 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3065 	ut_init_trid(&trid);
3066 
3067 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
3068 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3069 
3070 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3071 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3072 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3073 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3074 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3075 
3076 	g_ut_attach_ctrlr_status = 0;
3077 	g_ut_attach_bdev_count = 5;
3078 
3079 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3080 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3081 	CU_ASSERT(rc == 0);
3082 
3083 	spdk_delay_us(1000);
3084 	poll_threads();
3085 
3086 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3087 	poll_threads();
3088 
3089 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3090 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3091 
3092 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3093 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3094 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3095 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3096 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3097 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3098 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3099 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3100 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3101 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3102 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3103 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3104 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3105 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3106 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3107 
3108 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3109 	CU_ASSERT(rc == 0);
3110 
3111 	poll_threads();
3112 	spdk_delay_us(1000);
3113 	poll_threads();
3114 
3115 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3116 }
3117 
3118 static void
3119 init_accel(void)
3120 {
3121 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3122 				sizeof(int), "accel_p");
3123 }
3124 
3125 static void
3126 fini_accel(void)
3127 {
3128 	spdk_io_device_unregister(g_accel_p, NULL);
3129 }
3130 
3131 static void
3132 test_get_memory_domains(void)
3133 {
3134 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3135 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3136 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3137 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3138 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3139 	struct spdk_memory_domain *domains[4] = {};
3140 	int rc = 0;
3141 
3142 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3143 
3144 	/* nvme controller doesn't have memory domains */
3145 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3146 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3147 	CU_ASSERT(rc == 0);
3148 	CU_ASSERT(domains[0] == NULL);
3149 	CU_ASSERT(domains[1] == NULL);
3150 
3151 	/* nvme controller has a memory domain */
3152 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3153 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3154 	CU_ASSERT(rc == 1);
3155 	CU_ASSERT(domains[0] != NULL);
3156 	memset(domains, 0, sizeof(domains));
3157 
3158 	/* multipath, 2 controllers report 1 memory domain each */
3159 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3160 
3161 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3162 	CU_ASSERT(rc == 2);
3163 	CU_ASSERT(domains[0] != NULL);
3164 	CU_ASSERT(domains[1] != NULL);
3165 	memset(domains, 0, sizeof(domains));
3166 
3167 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3168 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3169 	CU_ASSERT(rc == 2);
3170 
3171 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3172 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3173 	CU_ASSERT(rc == 2);
3174 	CU_ASSERT(domains[0] == NULL);
3175 	CU_ASSERT(domains[1] == NULL);
3176 
3177 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3178 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3179 	CU_ASSERT(rc == 2);
3180 	CU_ASSERT(domains[0] != NULL);
3181 	CU_ASSERT(domains[1] == NULL);
3182 	memset(domains, 0, sizeof(domains));
3183 
3184 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3185 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3186 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3187 	CU_ASSERT(rc == 4);
3188 	CU_ASSERT(domains[0] != NULL);
3189 	CU_ASSERT(domains[1] != NULL);
3190 	CU_ASSERT(domains[2] != NULL);
3191 	CU_ASSERT(domains[3] != NULL);
3192 	memset(domains, 0, sizeof(domains));
3193 
3194 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3195 	 * Array size is less than the number of memory domains */
3196 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3197 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3198 	CU_ASSERT(rc == 4);
3199 	CU_ASSERT(domains[0] != NULL);
3200 	CU_ASSERT(domains[1] != NULL);
3201 	CU_ASSERT(domains[2] != NULL);
3202 	CU_ASSERT(domains[3] == NULL);
3203 	memset(domains, 0, sizeof(domains));
3204 
3205 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3206 }
3207 
3208 static void
3209 test_reconnect_qpair(void)
3210 {
3211 	struct spdk_nvme_transport_id trid = {};
3212 	struct spdk_nvme_ctrlr *ctrlr;
3213 	struct nvme_ctrlr *nvme_ctrlr;
3214 	const int STRING_SIZE = 32;
3215 	const char *attached_names[STRING_SIZE];
3216 	struct nvme_bdev *bdev;
3217 	struct spdk_io_channel *ch1, *ch2;
3218 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3219 	struct nvme_io_path *io_path1, *io_path2;
3220 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3221 	int rc;
3222 
3223 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3224 	ut_init_trid(&trid);
3225 
3226 	set_thread(0);
3227 
3228 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3229 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3230 
3231 	g_ut_attach_ctrlr_status = 0;
3232 	g_ut_attach_bdev_count = 1;
3233 
3234 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3235 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3236 	CU_ASSERT(rc == 0);
3237 
3238 	spdk_delay_us(1000);
3239 	poll_threads();
3240 
3241 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3242 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3243 
3244 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3245 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3246 
3247 	ch1 = spdk_get_io_channel(bdev);
3248 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3249 
3250 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3251 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3252 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3253 	nvme_qpair1 = io_path1->qpair;
3254 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3255 
3256 	set_thread(1);
3257 
3258 	ch2 = spdk_get_io_channel(bdev);
3259 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3260 
3261 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3262 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3263 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3264 	nvme_qpair2 = io_path2->qpair;
3265 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3266 
3267 	/* If a qpair is disconnected, it is freed and then reconnected via
3268 	 * resetting the corresponding nvme_ctrlr.
3269 	 */
3270 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3271 	ctrlr->is_failed = true;
3272 
3273 	poll_thread_times(1, 3);
3274 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3275 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3276 	CU_ASSERT(nvme_ctrlr->resetting == true);
3277 
3278 	poll_thread_times(0, 3);
3279 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3280 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3281 	CU_ASSERT(ctrlr->is_failed == true);
3282 
3283 	poll_thread_times(1, 2);
3284 	poll_thread_times(0, 1);
3285 	CU_ASSERT(ctrlr->is_failed == false);
3286 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3287 
3288 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3289 	poll_thread_times(0, 2);
3290 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3291 
3292 	poll_thread_times(0, 1);
3293 	poll_thread_times(1, 1);
3294 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3295 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3296 	CU_ASSERT(nvme_ctrlr->resetting == true);
3297 
3298 	poll_thread_times(0, 2);
3299 	poll_thread_times(1, 1);
3300 	poll_thread_times(0, 1);
3301 	CU_ASSERT(nvme_ctrlr->resetting == false);
3302 
3303 	poll_threads();
3304 
3305 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3306 	 * fails, the qpair is just freed.
3307 	 */
3308 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3309 	ctrlr->is_failed = true;
3310 	ctrlr->fail_reset = true;
3311 
3312 	poll_thread_times(1, 3);
3313 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3314 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3315 	CU_ASSERT(nvme_ctrlr->resetting == true);
3316 
3317 	poll_thread_times(0, 3);
3318 	poll_thread_times(1, 1);
3319 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3320 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3321 	CU_ASSERT(ctrlr->is_failed == true);
3322 
3323 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3324 	poll_thread_times(0, 3);
3325 	poll_thread_times(1, 1);
3326 	poll_thread_times(0, 1);
3327 	CU_ASSERT(ctrlr->is_failed == true);
3328 	CU_ASSERT(nvme_ctrlr->resetting == false);
3329 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3330 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3331 
3332 	poll_threads();
3333 
3334 	spdk_put_io_channel(ch2);
3335 
3336 	set_thread(0);
3337 
3338 	spdk_put_io_channel(ch1);
3339 
3340 	poll_threads();
3341 
3342 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3343 	CU_ASSERT(rc == 0);
3344 
3345 	poll_threads();
3346 	spdk_delay_us(1000);
3347 	poll_threads();
3348 
3349 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3350 }
3351 
3352 static void
3353 test_create_bdev_ctrlr(void)
3354 {
3355 	struct nvme_path_id path1 = {}, path2 = {};
3356 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3357 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3358 	const int STRING_SIZE = 32;
3359 	const char *attached_names[STRING_SIZE];
3360 	int rc;
3361 
3362 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3363 	ut_init_trid(&path1.trid);
3364 	ut_init_trid2(&path2.trid);
3365 
3366 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3367 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3368 
3369 	g_ut_attach_ctrlr_status = 0;
3370 	g_ut_attach_bdev_count = 0;
3371 
3372 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3373 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3374 	CU_ASSERT(rc == 0);
3375 
3376 	spdk_delay_us(1000);
3377 	poll_threads();
3378 
3379 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3380 	poll_threads();
3381 
3382 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3383 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3384 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3385 
3386 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3387 	g_ut_attach_ctrlr_status = -EINVAL;
3388 
3389 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3390 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3391 
3392 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3393 
3394 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3395 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3396 	CU_ASSERT(rc == 0);
3397 
3398 	spdk_delay_us(1000);
3399 	poll_threads();
3400 
3401 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3402 	poll_threads();
3403 
3404 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3405 
3406 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3407 	g_ut_attach_ctrlr_status = 0;
3408 
3409 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3410 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3411 
3412 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3413 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3414 	CU_ASSERT(rc == 0);
3415 
3416 	spdk_delay_us(1000);
3417 	poll_threads();
3418 
3419 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3420 	poll_threads();
3421 
3422 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3423 
3424 	/* Delete two ctrlrs at once. */
3425 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3426 	CU_ASSERT(rc == 0);
3427 
3428 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3429 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3430 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3431 
3432 	poll_threads();
3433 	spdk_delay_us(1000);
3434 	poll_threads();
3435 
3436 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3437 
3438 	/* Add two ctrlrs and delete one by one. */
3439 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3440 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3441 
3442 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3443 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3444 
3445 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3446 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3447 	CU_ASSERT(rc == 0);
3448 
3449 	spdk_delay_us(1000);
3450 	poll_threads();
3451 
3452 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3453 	poll_threads();
3454 
3455 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3456 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3457 	CU_ASSERT(rc == 0);
3458 
3459 	spdk_delay_us(1000);
3460 	poll_threads();
3461 
3462 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3463 	poll_threads();
3464 
3465 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3466 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3467 
3468 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3469 	CU_ASSERT(rc == 0);
3470 
3471 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3472 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3473 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3474 
3475 	poll_threads();
3476 	spdk_delay_us(1000);
3477 	poll_threads();
3478 
3479 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3480 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3481 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3482 
3483 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3484 	CU_ASSERT(rc == 0);
3485 
3486 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3487 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3488 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3489 
3490 	poll_threads();
3491 	spdk_delay_us(1000);
3492 	poll_threads();
3493 
3494 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3495 }
3496 
3497 static struct nvme_ns *
3498 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3499 {
3500 	struct nvme_ns *nvme_ns;
3501 
3502 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3503 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3504 			return nvme_ns;
3505 		}
3506 	}
3507 
3508 	return NULL;
3509 }
3510 
3511 static void
3512 test_add_multi_ns_to_bdev(void)
3513 {
3514 	struct nvme_path_id path1 = {}, path2 = {};
3515 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3516 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3517 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3518 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3519 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3520 	const int STRING_SIZE = 32;
3521 	const char *attached_names[STRING_SIZE];
3522 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3523 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3524 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3525 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3526 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3527 	int rc;
3528 
3529 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3530 	ut_init_trid(&path1.trid);
3531 	ut_init_trid2(&path2.trid);
3532 
3533 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3534 
3535 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3536 	 * namespaces are populated.
3537 	 */
3538 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3539 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3540 
3541 	ctrlr1->ns[1].is_active = false;
3542 	ctrlr1->ns[4].is_active = false;
3543 	ctrlr1->ns[0].uuid = &uuid1;
3544 	ctrlr1->ns[2].uuid = &uuid3;
3545 	ctrlr1->ns[3].uuid = &uuid4;
3546 
3547 	g_ut_attach_ctrlr_status = 0;
3548 	g_ut_attach_bdev_count = 3;
3549 
3550 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3551 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3552 	CU_ASSERT(rc == 0);
3553 
3554 	spdk_delay_us(1000);
3555 	poll_threads();
3556 
3557 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3558 	poll_threads();
3559 
3560 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3561 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3562 	 * adding 4th namespace to a bdev should fail.
3563 	 */
3564 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3565 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3566 
3567 	ctrlr2->ns[2].is_active = false;
3568 	ctrlr2->ns[4].is_active = false;
3569 	ctrlr2->ns[0].uuid = &uuid1;
3570 	ctrlr2->ns[1].uuid = &uuid2;
3571 	ctrlr2->ns[3].uuid = &uuid44;
3572 
3573 	g_ut_attach_ctrlr_status = 0;
3574 	g_ut_attach_bdev_count = 2;
3575 
3576 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3577 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3578 	CU_ASSERT(rc == 0);
3579 
3580 	spdk_delay_us(1000);
3581 	poll_threads();
3582 
3583 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3584 	poll_threads();
3585 
3586 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3587 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3588 
3589 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3590 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3591 
3592 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3593 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3594 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3595 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3596 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3597 
3598 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3599 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3600 
3601 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3602 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3603 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3604 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3605 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3606 
3607 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3608 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3609 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3610 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3611 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3612 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3613 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3614 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3615 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3616 
3617 	CU_ASSERT(bdev1->ref == 2);
3618 	CU_ASSERT(bdev2->ref == 1);
3619 	CU_ASSERT(bdev3->ref == 1);
3620 	CU_ASSERT(bdev4->ref == 1);
3621 
3622 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3623 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3624 	CU_ASSERT(rc == 0);
3625 
3626 	poll_threads();
3627 	spdk_delay_us(1000);
3628 	poll_threads();
3629 
3630 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3631 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3632 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3633 
3634 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3635 	CU_ASSERT(rc == 0);
3636 
3637 	poll_threads();
3638 	spdk_delay_us(1000);
3639 	poll_threads();
3640 
3641 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3642 
3643 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3644 	 * can be deleted when the bdev subsystem shutdown.
3645 	 */
3646 	g_ut_attach_bdev_count = 1;
3647 
3648 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3649 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3650 
3651 	ctrlr1->ns[0].uuid = &uuid1;
3652 
3653 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3654 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3655 	CU_ASSERT(rc == 0);
3656 
3657 	spdk_delay_us(1000);
3658 	poll_threads();
3659 
3660 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3661 	poll_threads();
3662 
3663 	ut_init_trid2(&path2.trid);
3664 
3665 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3666 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3667 
3668 	ctrlr2->ns[0].uuid = &uuid1;
3669 
3670 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3671 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3672 	CU_ASSERT(rc == 0);
3673 
3674 	spdk_delay_us(1000);
3675 	poll_threads();
3676 
3677 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3678 	poll_threads();
3679 
3680 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3681 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3682 
3683 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3684 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3685 
3686 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3687 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3688 
3689 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3690 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3691 
3692 	/* Check if a nvme_bdev has two nvme_ns. */
3693 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3694 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3695 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3696 
3697 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3698 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3699 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3700 
3701 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3702 	bdev_nvme_destruct(&bdev1->disk);
3703 
3704 	poll_threads();
3705 
3706 	CU_ASSERT(nvme_ns1->bdev == NULL);
3707 	CU_ASSERT(nvme_ns2->bdev == NULL);
3708 
3709 	nvme_ctrlr1->destruct = true;
3710 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3711 
3712 	poll_threads();
3713 	spdk_delay_us(1000);
3714 	poll_threads();
3715 
3716 	nvme_ctrlr2->destruct = true;
3717 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3718 
3719 	poll_threads();
3720 	spdk_delay_us(1000);
3721 	poll_threads();
3722 
3723 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3724 }
3725 
3726 static void
3727 test_add_multi_io_paths_to_nbdev_ch(void)
3728 {
3729 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3730 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3731 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3732 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3733 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3734 	const int STRING_SIZE = 32;
3735 	const char *attached_names[STRING_SIZE];
3736 	struct nvme_bdev *bdev;
3737 	struct spdk_io_channel *ch;
3738 	struct nvme_bdev_channel *nbdev_ch;
3739 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3740 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3741 	int rc;
3742 
3743 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3744 	ut_init_trid(&path1.trid);
3745 	ut_init_trid2(&path2.trid);
3746 	ut_init_trid3(&path3.trid);
3747 	g_ut_attach_ctrlr_status = 0;
3748 	g_ut_attach_bdev_count = 1;
3749 
3750 	set_thread(1);
3751 
3752 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3753 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3754 
3755 	ctrlr1->ns[0].uuid = &uuid1;
3756 
3757 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3758 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3759 	CU_ASSERT(rc == 0);
3760 
3761 	spdk_delay_us(1000);
3762 	poll_threads();
3763 
3764 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3765 	poll_threads();
3766 
3767 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3768 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3769 
3770 	ctrlr2->ns[0].uuid = &uuid1;
3771 
3772 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3773 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3774 	CU_ASSERT(rc == 0);
3775 
3776 	spdk_delay_us(1000);
3777 	poll_threads();
3778 
3779 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3780 	poll_threads();
3781 
3782 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3783 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3784 
3785 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3786 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3787 
3788 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3789 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3790 
3791 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3792 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3793 
3794 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3795 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3796 
3797 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3798 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3799 
3800 	set_thread(0);
3801 
3802 	ch = spdk_get_io_channel(bdev);
3803 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3804 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3805 
3806 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3807 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3808 
3809 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3810 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3811 
3812 	set_thread(1);
3813 
3814 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3815 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3816 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3817 
3818 	ctrlr3->ns[0].uuid = &uuid1;
3819 
3820 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3821 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3822 	CU_ASSERT(rc == 0);
3823 
3824 	spdk_delay_us(1000);
3825 	poll_threads();
3826 
3827 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3828 	poll_threads();
3829 
3830 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3831 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3832 
3833 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3834 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3835 
3836 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3837 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3838 
3839 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3840 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3841 	CU_ASSERT(rc == 0);
3842 
3843 	poll_threads();
3844 	spdk_delay_us(1000);
3845 	poll_threads();
3846 
3847 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3848 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3849 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3850 
3851 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3852 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3853 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3854 
3855 	set_thread(0);
3856 
3857 	spdk_put_io_channel(ch);
3858 
3859 	poll_threads();
3860 
3861 	set_thread(1);
3862 
3863 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3864 	CU_ASSERT(rc == 0);
3865 
3866 	poll_threads();
3867 	spdk_delay_us(1000);
3868 	poll_threads();
3869 
3870 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3871 }
3872 
3873 static void
3874 test_admin_path(void)
3875 {
3876 	struct nvme_path_id path1 = {}, path2 = {};
3877 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3878 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3879 	const int STRING_SIZE = 32;
3880 	const char *attached_names[STRING_SIZE];
3881 	struct nvme_bdev *bdev;
3882 	struct spdk_io_channel *ch;
3883 	struct spdk_bdev_io *bdev_io;
3884 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3885 	int rc;
3886 
3887 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3888 	ut_init_trid(&path1.trid);
3889 	ut_init_trid2(&path2.trid);
3890 	g_ut_attach_ctrlr_status = 0;
3891 	g_ut_attach_bdev_count = 1;
3892 
3893 	set_thread(0);
3894 
3895 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3896 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3897 
3898 	ctrlr1->ns[0].uuid = &uuid1;
3899 
3900 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3901 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3902 	CU_ASSERT(rc == 0);
3903 
3904 	spdk_delay_us(1000);
3905 	poll_threads();
3906 
3907 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3908 	poll_threads();
3909 
3910 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3911 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3912 
3913 	ctrlr2->ns[0].uuid = &uuid1;
3914 
3915 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3916 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3917 	CU_ASSERT(rc == 0);
3918 
3919 	spdk_delay_us(1000);
3920 	poll_threads();
3921 
3922 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3923 	poll_threads();
3924 
3925 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3926 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3927 
3928 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3929 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3930 
3931 	ch = spdk_get_io_channel(bdev);
3932 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3933 
3934 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3935 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3936 
3937 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3938 	 * submitted to ctrlr2.
3939 	 */
3940 	ctrlr1->is_failed = true;
3941 	bdev_io->internal.in_submit_request = true;
3942 
3943 	bdev_nvme_submit_request(ch, bdev_io);
3944 
3945 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3946 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3947 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3948 
3949 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3950 	poll_threads();
3951 
3952 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3953 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3954 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3955 
3956 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3957 	ctrlr2->is_failed = true;
3958 	bdev_io->internal.in_submit_request = true;
3959 
3960 	bdev_nvme_submit_request(ch, bdev_io);
3961 
3962 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3963 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3964 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3965 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3966 
3967 	free(bdev_io);
3968 
3969 	spdk_put_io_channel(ch);
3970 
3971 	poll_threads();
3972 
3973 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3974 	CU_ASSERT(rc == 0);
3975 
3976 	poll_threads();
3977 	spdk_delay_us(1000);
3978 	poll_threads();
3979 
3980 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3981 }
3982 
3983 static struct nvme_io_path *
3984 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3985 			struct nvme_ctrlr *nvme_ctrlr)
3986 {
3987 	struct nvme_io_path *io_path;
3988 
3989 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3990 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
3991 			return io_path;
3992 		}
3993 	}
3994 
3995 	return NULL;
3996 }
3997 
3998 static void
3999 test_reset_bdev_ctrlr(void)
4000 {
4001 	struct nvme_path_id path1 = {}, path2 = {};
4002 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4003 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4004 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4005 	struct nvme_path_id *curr_path1, *curr_path2;
4006 	const int STRING_SIZE = 32;
4007 	const char *attached_names[STRING_SIZE];
4008 	struct nvme_bdev *bdev;
4009 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
4010 	struct nvme_bdev_io *first_bio;
4011 	struct spdk_io_channel *ch1, *ch2;
4012 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
4013 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
4014 	int rc;
4015 
4016 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4017 	ut_init_trid(&path1.trid);
4018 	ut_init_trid2(&path2.trid);
4019 	g_ut_attach_ctrlr_status = 0;
4020 	g_ut_attach_bdev_count = 1;
4021 
4022 	set_thread(0);
4023 
4024 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4025 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4026 
4027 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4028 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4029 	CU_ASSERT(rc == 0);
4030 
4031 	spdk_delay_us(1000);
4032 	poll_threads();
4033 
4034 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4035 	poll_threads();
4036 
4037 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4038 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4039 
4040 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4041 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4042 	CU_ASSERT(rc == 0);
4043 
4044 	spdk_delay_us(1000);
4045 	poll_threads();
4046 
4047 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4048 	poll_threads();
4049 
4050 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4051 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4052 
4053 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4054 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
4055 
4056 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
4057 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
4058 
4059 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4060 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
4061 
4062 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
4063 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
4064 
4065 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4066 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4067 
4068 	set_thread(0);
4069 
4070 	ch1 = spdk_get_io_channel(bdev);
4071 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
4072 
4073 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
4074 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
4075 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
4076 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
4077 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4078 
4079 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
4080 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4081 
4082 	set_thread(1);
4083 
4084 	ch2 = spdk_get_io_channel(bdev);
4085 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4086 
4087 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4088 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4089 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4090 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4091 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4092 
4093 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
4094 
4095 	/* The first reset request from bdev_io is submitted on thread 0.
4096 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4097 	 *
4098 	 * A few extra polls are necessary after resetting ctrlr1 to check
4099 	 * pending reset requests for ctrlr1.
4100 	 */
4101 	ctrlr1->is_failed = true;
4102 	curr_path1->last_failed_tsc = spdk_get_ticks();
4103 	ctrlr2->is_failed = true;
4104 	curr_path2->last_failed_tsc = spdk_get_ticks();
4105 
4106 	set_thread(0);
4107 
4108 	bdev_nvme_submit_request(ch1, first_bdev_io);
4109 	CU_ASSERT(first_bio->io_path == io_path11);
4110 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4111 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4112 
4113 	poll_thread_times(0, 3);
4114 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4115 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4116 
4117 	poll_thread_times(1, 2);
4118 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4119 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4120 	CU_ASSERT(ctrlr1->is_failed == true);
4121 
4122 	poll_thread_times(0, 1);
4123 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4124 	CU_ASSERT(ctrlr1->is_failed == false);
4125 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4126 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4127 
4128 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4129 	poll_thread_times(0, 2);
4130 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4131 
4132 	poll_thread_times(0, 1);
4133 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4134 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4135 
4136 	poll_thread_times(1, 1);
4137 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4138 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4139 
4140 	poll_thread_times(0, 2);
4141 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4142 	poll_thread_times(1, 1);
4143 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4144 	poll_thread_times(0, 2);
4145 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4146 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4147 	CU_ASSERT(first_bio->io_path == io_path12);
4148 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4149 
4150 	poll_thread_times(0, 3);
4151 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4152 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4153 
4154 	poll_thread_times(1, 2);
4155 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4156 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4157 	CU_ASSERT(ctrlr2->is_failed == true);
4158 
4159 	poll_thread_times(0, 1);
4160 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4161 	CU_ASSERT(ctrlr2->is_failed == false);
4162 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4163 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4164 
4165 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4166 	poll_thread_times(0, 2);
4167 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4168 
4169 	poll_thread_times(0, 1);
4170 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4171 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4172 
4173 	poll_thread_times(1, 2);
4174 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4175 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4176 
4177 	poll_thread_times(0, 2);
4178 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4179 	poll_thread_times(1, 1);
4180 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4181 	poll_thread_times(0, 2);
4182 	CU_ASSERT(first_bio->io_path == NULL);
4183 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4184 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4185 
4186 	poll_threads();
4187 
4188 	/* There is a race between two reset requests from bdev_io.
4189 	 *
4190 	 * The first reset request is submitted on thread 0, and the second reset
4191 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4192 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4193 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4194 	 * The second is pending on ctrlr2 again. After the first completes resetting
4195 	 * ctrl2, both complete successfully.
4196 	 */
4197 	ctrlr1->is_failed = true;
4198 	curr_path1->last_failed_tsc = spdk_get_ticks();
4199 	ctrlr2->is_failed = true;
4200 	curr_path2->last_failed_tsc = spdk_get_ticks();
4201 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4202 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4203 
4204 	set_thread(0);
4205 
4206 	bdev_nvme_submit_request(ch1, first_bdev_io);
4207 
4208 	set_thread(1);
4209 
4210 	bdev_nvme_submit_request(ch2, second_bdev_io);
4211 
4212 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4213 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4214 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io);
4215 
4216 	poll_threads();
4217 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4218 	poll_threads();
4219 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4220 	poll_threads();
4221 
4222 	CU_ASSERT(ctrlr1->is_failed == false);
4223 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4224 	CU_ASSERT(ctrlr2->is_failed == false);
4225 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4226 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4227 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4228 
4229 	set_thread(0);
4230 
4231 	spdk_put_io_channel(ch1);
4232 
4233 	set_thread(1);
4234 
4235 	spdk_put_io_channel(ch2);
4236 
4237 	poll_threads();
4238 
4239 	set_thread(0);
4240 
4241 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4242 	CU_ASSERT(rc == 0);
4243 
4244 	poll_threads();
4245 	spdk_delay_us(1000);
4246 	poll_threads();
4247 
4248 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4249 
4250 	free(first_bdev_io);
4251 	free(second_bdev_io);
4252 }
4253 
4254 static void
4255 test_find_io_path(void)
4256 {
4257 	struct nvme_bdev_channel nbdev_ch = {
4258 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4259 	};
4260 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4261 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4262 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4263 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4264 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4265 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4266 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
4267 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4268 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4269 
4270 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4271 
4272 	/* Test if io_path whose ANA state is not accessible is excluded. */
4273 
4274 	nvme_qpair1.qpair = &qpair1;
4275 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4276 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4277 
4278 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4279 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4280 
4281 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4282 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4283 
4284 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4285 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4286 
4287 	nbdev_ch.current_io_path = NULL;
4288 
4289 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4290 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4291 
4292 	nbdev_ch.current_io_path = NULL;
4293 
4294 	/* Test if io_path whose qpair is resetting is excluded. */
4295 
4296 	nvme_qpair1.qpair = NULL;
4297 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4298 
4299 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4300 
4301 	/* Test if ANA optimized state or the first found ANA non-optimized state
4302 	 * is prioritized.
4303 	 */
4304 
4305 	nvme_qpair1.qpair = &qpair1;
4306 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4307 	nvme_qpair2.qpair = &qpair2;
4308 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4309 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4310 
4311 	nbdev_ch.current_io_path = NULL;
4312 
4313 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4314 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4315 
4316 	nbdev_ch.current_io_path = NULL;
4317 }
4318 
4319 static void
4320 test_retry_io_if_ana_state_is_updating(void)
4321 {
4322 	struct nvme_path_id path = {};
4323 	struct nvme_ctrlr_opts opts = {};
4324 	struct spdk_nvme_ctrlr *ctrlr;
4325 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4326 	struct nvme_ctrlr *nvme_ctrlr;
4327 	const int STRING_SIZE = 32;
4328 	const char *attached_names[STRING_SIZE];
4329 	struct nvme_bdev *bdev;
4330 	struct nvme_ns *nvme_ns;
4331 	struct spdk_bdev_io *bdev_io1;
4332 	struct spdk_io_channel *ch;
4333 	struct nvme_bdev_channel *nbdev_ch;
4334 	struct nvme_io_path *io_path;
4335 	struct nvme_qpair *nvme_qpair;
4336 	int rc;
4337 
4338 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4339 	ut_init_trid(&path.trid);
4340 
4341 	set_thread(0);
4342 
4343 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4344 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4345 
4346 	g_ut_attach_ctrlr_status = 0;
4347 	g_ut_attach_bdev_count = 1;
4348 
4349 	opts.ctrlr_loss_timeout_sec = -1;
4350 	opts.reconnect_delay_sec = 1;
4351 
4352 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4353 			      attach_ctrlr_done, NULL, NULL, &opts, false);
4354 	CU_ASSERT(rc == 0);
4355 
4356 	spdk_delay_us(1000);
4357 	poll_threads();
4358 
4359 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4360 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4361 
4362 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4363 	CU_ASSERT(nvme_ctrlr != NULL);
4364 
4365 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4366 	CU_ASSERT(bdev != NULL);
4367 
4368 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4369 	CU_ASSERT(nvme_ns != NULL);
4370 
4371 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4372 	ut_bdev_io_set_buf(bdev_io1);
4373 
4374 	ch = spdk_get_io_channel(bdev);
4375 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4376 
4377 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4378 
4379 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4380 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4381 
4382 	nvme_qpair = io_path->qpair;
4383 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4384 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4385 
4386 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4387 
4388 	/* If qpair is connected, I/O should succeed. */
4389 	bdev_io1->internal.in_submit_request = true;
4390 
4391 	bdev_nvme_submit_request(ch, bdev_io1);
4392 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4393 
4394 	poll_threads();
4395 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4396 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4397 
4398 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4399 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4400 	nbdev_ch->current_io_path = NULL;
4401 
4402 	bdev_io1->internal.in_submit_request = true;
4403 
4404 	bdev_nvme_submit_request(ch, bdev_io1);
4405 
4406 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4407 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4408 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4409 
4410 	/* ANA state became accessible while I/O was queued. */
4411 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4412 
4413 	spdk_delay_us(1000000);
4414 
4415 	poll_thread_times(0, 1);
4416 
4417 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4418 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4419 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4420 
4421 	poll_threads();
4422 
4423 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4424 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4425 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4426 
4427 	free(bdev_io1);
4428 
4429 	spdk_put_io_channel(ch);
4430 
4431 	poll_threads();
4432 
4433 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4434 	CU_ASSERT(rc == 0);
4435 
4436 	poll_threads();
4437 	spdk_delay_us(1000);
4438 	poll_threads();
4439 
4440 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4441 }
4442 
4443 static void
4444 test_retry_io_for_io_path_error(void)
4445 {
4446 	struct nvme_path_id path1 = {}, path2 = {};
4447 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4448 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4449 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4450 	const int STRING_SIZE = 32;
4451 	const char *attached_names[STRING_SIZE];
4452 	struct nvme_bdev *bdev;
4453 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4454 	struct spdk_bdev_io *bdev_io;
4455 	struct nvme_bdev_io *bio;
4456 	struct spdk_io_channel *ch;
4457 	struct nvme_bdev_channel *nbdev_ch;
4458 	struct nvme_io_path *io_path1, *io_path2;
4459 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4460 	struct ut_nvme_req *req;
4461 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4462 	int rc;
4463 
4464 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4465 	ut_init_trid(&path1.trid);
4466 	ut_init_trid2(&path2.trid);
4467 
4468 	g_opts.bdev_retry_count = 1;
4469 
4470 	set_thread(0);
4471 
4472 	g_ut_attach_ctrlr_status = 0;
4473 	g_ut_attach_bdev_count = 1;
4474 
4475 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4476 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4477 
4478 	ctrlr1->ns[0].uuid = &uuid1;
4479 
4480 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4481 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4482 	CU_ASSERT(rc == 0);
4483 
4484 	spdk_delay_us(1000);
4485 	poll_threads();
4486 
4487 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4488 	poll_threads();
4489 
4490 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4491 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4492 
4493 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4494 	CU_ASSERT(nvme_ctrlr1 != NULL);
4495 
4496 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4497 	CU_ASSERT(bdev != NULL);
4498 
4499 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4500 	CU_ASSERT(nvme_ns1 != NULL);
4501 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4502 
4503 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4504 	ut_bdev_io_set_buf(bdev_io);
4505 
4506 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4507 
4508 	ch = spdk_get_io_channel(bdev);
4509 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4510 
4511 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4512 
4513 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4514 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4515 
4516 	nvme_qpair1 = io_path1->qpair;
4517 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4518 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4519 
4520 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4521 
4522 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4523 	bdev_io->internal.in_submit_request = true;
4524 
4525 	bdev_nvme_submit_request(ch, bdev_io);
4526 
4527 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4528 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4529 
4530 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4531 	SPDK_CU_ASSERT_FATAL(req != NULL);
4532 
4533 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4534 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4535 	req->cpl.status.dnr = 1;
4536 
4537 	poll_thread_times(0, 1);
4538 
4539 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4540 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4541 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4542 
4543 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4544 	bdev_io->internal.in_submit_request = true;
4545 
4546 	bdev_nvme_submit_request(ch, bdev_io);
4547 
4548 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4549 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4550 
4551 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4552 	SPDK_CU_ASSERT_FATAL(req != NULL);
4553 
4554 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4555 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4556 
4557 	poll_thread_times(0, 1);
4558 
4559 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4560 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4561 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4562 
4563 	poll_threads();
4564 
4565 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4566 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4567 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4568 
4569 	/* Add io_path2 dynamically, and create a multipath configuration. */
4570 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4571 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4572 
4573 	ctrlr2->ns[0].uuid = &uuid1;
4574 
4575 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4576 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4577 	CU_ASSERT(rc == 0);
4578 
4579 	spdk_delay_us(1000);
4580 	poll_threads();
4581 
4582 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4583 	poll_threads();
4584 
4585 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4586 	CU_ASSERT(nvme_ctrlr2 != NULL);
4587 
4588 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4589 	CU_ASSERT(nvme_ns2 != NULL);
4590 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4591 
4592 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4593 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4594 
4595 	nvme_qpair2 = io_path2->qpair;
4596 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4597 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4598 
4599 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4600 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4601 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4602 	 */
4603 	bdev_io->internal.in_submit_request = true;
4604 
4605 	bdev_nvme_submit_request(ch, bdev_io);
4606 
4607 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4608 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4609 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4610 
4611 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4612 	SPDK_CU_ASSERT_FATAL(req != NULL);
4613 
4614 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4615 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4616 
4617 	poll_thread_times(0, 1);
4618 
4619 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4620 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4621 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4622 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4623 
4624 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4625 	nvme_qpair1->qpair = NULL;
4626 
4627 	poll_threads();
4628 
4629 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4630 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4631 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4632 
4633 	free(bdev_io);
4634 
4635 	spdk_put_io_channel(ch);
4636 
4637 	poll_threads();
4638 
4639 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4640 	CU_ASSERT(rc == 0);
4641 
4642 	poll_threads();
4643 	spdk_delay_us(1000);
4644 	poll_threads();
4645 
4646 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4647 
4648 	g_opts.bdev_retry_count = 0;
4649 }
4650 
4651 static void
4652 test_retry_io_count(void)
4653 {
4654 	struct nvme_path_id path = {};
4655 	struct spdk_nvme_ctrlr *ctrlr;
4656 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4657 	struct nvme_ctrlr *nvme_ctrlr;
4658 	const int STRING_SIZE = 32;
4659 	const char *attached_names[STRING_SIZE];
4660 	struct nvme_bdev *bdev;
4661 	struct nvme_ns *nvme_ns;
4662 	struct spdk_bdev_io *bdev_io;
4663 	struct nvme_bdev_io *bio;
4664 	struct spdk_io_channel *ch;
4665 	struct nvme_bdev_channel *nbdev_ch;
4666 	struct nvme_io_path *io_path;
4667 	struct nvme_qpair *nvme_qpair;
4668 	struct ut_nvme_req *req;
4669 	int rc;
4670 
4671 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4672 	ut_init_trid(&path.trid);
4673 
4674 	set_thread(0);
4675 
4676 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4677 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4678 
4679 	g_ut_attach_ctrlr_status = 0;
4680 	g_ut_attach_bdev_count = 1;
4681 
4682 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4683 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4684 	CU_ASSERT(rc == 0);
4685 
4686 	spdk_delay_us(1000);
4687 	poll_threads();
4688 
4689 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4690 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4691 
4692 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4693 	CU_ASSERT(nvme_ctrlr != NULL);
4694 
4695 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4696 	CU_ASSERT(bdev != NULL);
4697 
4698 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4699 	CU_ASSERT(nvme_ns != NULL);
4700 
4701 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4702 	ut_bdev_io_set_buf(bdev_io);
4703 
4704 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4705 
4706 	ch = spdk_get_io_channel(bdev);
4707 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4708 
4709 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4710 
4711 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4712 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4713 
4714 	nvme_qpair = io_path->qpair;
4715 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4716 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4717 
4718 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4719 
4720 	/* If I/O is aborted by request, it should not be retried. */
4721 	g_opts.bdev_retry_count = 1;
4722 
4723 	bdev_io->internal.in_submit_request = true;
4724 
4725 	bdev_nvme_submit_request(ch, bdev_io);
4726 
4727 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4728 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4729 
4730 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4731 	SPDK_CU_ASSERT_FATAL(req != NULL);
4732 
4733 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4734 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4735 
4736 	poll_thread_times(0, 1);
4737 
4738 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4739 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4740 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4741 
4742 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4743 	 * the failed I/O should not be retried.
4744 	 */
4745 	g_opts.bdev_retry_count = 4;
4746 
4747 	bdev_io->internal.in_submit_request = true;
4748 
4749 	bdev_nvme_submit_request(ch, bdev_io);
4750 
4751 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4752 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4753 
4754 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4755 	SPDK_CU_ASSERT_FATAL(req != NULL);
4756 
4757 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4758 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4759 	bio->retry_count = 4;
4760 
4761 	poll_thread_times(0, 1);
4762 
4763 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4764 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4765 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4766 
4767 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4768 	g_opts.bdev_retry_count = -1;
4769 
4770 	bdev_io->internal.in_submit_request = true;
4771 
4772 	bdev_nvme_submit_request(ch, bdev_io);
4773 
4774 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4775 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4776 
4777 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4778 	SPDK_CU_ASSERT_FATAL(req != NULL);
4779 
4780 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4781 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4782 	bio->retry_count = 4;
4783 
4784 	poll_thread_times(0, 1);
4785 
4786 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4787 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4788 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4789 
4790 	poll_threads();
4791 
4792 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4793 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4794 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4795 
4796 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4797 	 * the failed I/O should be retried.
4798 	 */
4799 	g_opts.bdev_retry_count = 4;
4800 
4801 	bdev_io->internal.in_submit_request = true;
4802 
4803 	bdev_nvme_submit_request(ch, bdev_io);
4804 
4805 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4806 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4807 
4808 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4809 	SPDK_CU_ASSERT_FATAL(req != NULL);
4810 
4811 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4812 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4813 	bio->retry_count = 3;
4814 
4815 	poll_thread_times(0, 1);
4816 
4817 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4818 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4819 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4820 
4821 	poll_threads();
4822 
4823 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4824 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4825 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4826 
4827 	free(bdev_io);
4828 
4829 	spdk_put_io_channel(ch);
4830 
4831 	poll_threads();
4832 
4833 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4834 	CU_ASSERT(rc == 0);
4835 
4836 	poll_threads();
4837 	spdk_delay_us(1000);
4838 	poll_threads();
4839 
4840 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4841 
4842 	g_opts.bdev_retry_count = 0;
4843 }
4844 
4845 static void
4846 test_concurrent_read_ana_log_page(void)
4847 {
4848 	struct spdk_nvme_transport_id trid = {};
4849 	struct spdk_nvme_ctrlr *ctrlr;
4850 	struct nvme_ctrlr *nvme_ctrlr;
4851 	const int STRING_SIZE = 32;
4852 	const char *attached_names[STRING_SIZE];
4853 	int rc;
4854 
4855 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4856 	ut_init_trid(&trid);
4857 
4858 	set_thread(0);
4859 
4860 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4861 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4862 
4863 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4864 
4865 	g_ut_attach_ctrlr_status = 0;
4866 	g_ut_attach_bdev_count = 1;
4867 
4868 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
4869 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4870 	CU_ASSERT(rc == 0);
4871 
4872 	spdk_delay_us(1000);
4873 	poll_threads();
4874 
4875 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4876 	poll_threads();
4877 
4878 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4879 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4880 
4881 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4882 
4883 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4884 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4885 
4886 	/* Following read request should be rejected. */
4887 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4888 
4889 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4890 
4891 	set_thread(1);
4892 
4893 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4894 
4895 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4896 
4897 	/* Reset request while reading ANA log page should not be rejected. */
4898 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
4899 	CU_ASSERT(rc == 0);
4900 
4901 	poll_threads();
4902 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4903 	poll_threads();
4904 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4905 	poll_threads();
4906 
4907 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4908 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4909 
4910 	/* Read ANA log page while resetting ctrlr should be rejected. */
4911 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
4912 	CU_ASSERT(rc == 0);
4913 
4914 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4915 
4916 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4917 
4918 	poll_threads();
4919 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4920 	poll_threads();
4921 
4922 	set_thread(0);
4923 
4924 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4925 	CU_ASSERT(rc == 0);
4926 
4927 	poll_threads();
4928 	spdk_delay_us(1000);
4929 	poll_threads();
4930 
4931 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4932 }
4933 
4934 static void
4935 test_retry_io_for_ana_error(void)
4936 {
4937 	struct nvme_path_id path = {};
4938 	struct spdk_nvme_ctrlr *ctrlr;
4939 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4940 	struct nvme_ctrlr *nvme_ctrlr;
4941 	const int STRING_SIZE = 32;
4942 	const char *attached_names[STRING_SIZE];
4943 	struct nvme_bdev *bdev;
4944 	struct nvme_ns *nvme_ns;
4945 	struct spdk_bdev_io *bdev_io;
4946 	struct nvme_bdev_io *bio;
4947 	struct spdk_io_channel *ch;
4948 	struct nvme_bdev_channel *nbdev_ch;
4949 	struct nvme_io_path *io_path;
4950 	struct nvme_qpair *nvme_qpair;
4951 	struct ut_nvme_req *req;
4952 	uint64_t now;
4953 	int rc;
4954 
4955 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4956 	ut_init_trid(&path.trid);
4957 
4958 	g_opts.bdev_retry_count = 1;
4959 
4960 	set_thread(0);
4961 
4962 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4963 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4964 
4965 	g_ut_attach_ctrlr_status = 0;
4966 	g_ut_attach_bdev_count = 1;
4967 
4968 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4969 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4970 	CU_ASSERT(rc == 0);
4971 
4972 	spdk_delay_us(1000);
4973 	poll_threads();
4974 
4975 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4976 	poll_threads();
4977 
4978 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4979 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4980 
4981 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4982 	CU_ASSERT(nvme_ctrlr != NULL);
4983 
4984 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4985 	CU_ASSERT(bdev != NULL);
4986 
4987 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4988 	CU_ASSERT(nvme_ns != NULL);
4989 
4990 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4991 	ut_bdev_io_set_buf(bdev_io);
4992 
4993 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4994 
4995 	ch = spdk_get_io_channel(bdev);
4996 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4997 
4998 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4999 
5000 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5001 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5002 
5003 	nvme_qpair = io_path->qpair;
5004 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5005 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5006 
5007 	now = spdk_get_ticks();
5008 
5009 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
5010 
5011 	/* If I/O got ANA error, it should be queued, the corresponding namespace
5012 	 * should be freezed and its ANA state should be updated.
5013 	 */
5014 	bdev_io->internal.in_submit_request = true;
5015 
5016 	bdev_nvme_submit_request(ch, bdev_io);
5017 
5018 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5019 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5020 
5021 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5022 	SPDK_CU_ASSERT_FATAL(req != NULL);
5023 
5024 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5025 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
5026 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
5027 
5028 	poll_thread_times(0, 1);
5029 
5030 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5031 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5032 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5033 	/* I/O should be retried immediately. */
5034 	CU_ASSERT(bio->retry_ticks == now);
5035 	CU_ASSERT(nvme_ns->ana_state_updating == true);
5036 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5037 
5038 	poll_threads();
5039 
5040 	/* Namespace is inaccessible, and hence I/O should be queued again. */
5041 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5042 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5043 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5044 	/* I/O should be retried after a second if no I/O path was found but
5045 	 * any I/O path may become available.
5046 	 */
5047 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
5048 
5049 	/* Namespace should be unfreezed after completing to update its ANA state. */
5050 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5051 	poll_threads();
5052 
5053 	CU_ASSERT(nvme_ns->ana_state_updating == false);
5054 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5055 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5056 
5057 	/* Retry the queued I/O should succeed. */
5058 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
5059 	poll_threads();
5060 
5061 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5062 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5063 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5064 
5065 	free(bdev_io);
5066 
5067 	spdk_put_io_channel(ch);
5068 
5069 	poll_threads();
5070 
5071 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5072 	CU_ASSERT(rc == 0);
5073 
5074 	poll_threads();
5075 	spdk_delay_us(1000);
5076 	poll_threads();
5077 
5078 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5079 
5080 	g_opts.bdev_retry_count = 0;
5081 }
5082 
5083 static void
5084 test_check_io_error_resiliency_params(void)
5085 {
5086 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5087 	 * 3rd parameter is fast_io_fail_timeout_sec.
5088 	 */
5089 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5090 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5091 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5092 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5093 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5094 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5095 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5096 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5097 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5098 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5099 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5100 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5101 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5102 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5103 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5104 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5105 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5106 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5107 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5108 }
5109 
5110 static void
5111 test_retry_io_if_ctrlr_is_resetting(void)
5112 {
5113 	struct nvme_path_id path = {};
5114 	struct nvme_ctrlr_opts opts = {};
5115 	struct spdk_nvme_ctrlr *ctrlr;
5116 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5117 	struct nvme_ctrlr *nvme_ctrlr;
5118 	const int STRING_SIZE = 32;
5119 	const char *attached_names[STRING_SIZE];
5120 	struct nvme_bdev *bdev;
5121 	struct nvme_ns *nvme_ns;
5122 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5123 	struct spdk_io_channel *ch;
5124 	struct nvme_bdev_channel *nbdev_ch;
5125 	struct nvme_io_path *io_path;
5126 	struct nvme_qpair *nvme_qpair;
5127 	int rc;
5128 
5129 	g_opts.bdev_retry_count = 1;
5130 
5131 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5132 	ut_init_trid(&path.trid);
5133 
5134 	set_thread(0);
5135 
5136 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5137 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5138 
5139 	g_ut_attach_ctrlr_status = 0;
5140 	g_ut_attach_bdev_count = 1;
5141 
5142 	opts.ctrlr_loss_timeout_sec = -1;
5143 	opts.reconnect_delay_sec = 1;
5144 
5145 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5146 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5147 	CU_ASSERT(rc == 0);
5148 
5149 	spdk_delay_us(1000);
5150 	poll_threads();
5151 
5152 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5153 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5154 
5155 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5156 	CU_ASSERT(nvme_ctrlr != NULL);
5157 
5158 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5159 	CU_ASSERT(bdev != NULL);
5160 
5161 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5162 	CU_ASSERT(nvme_ns != NULL);
5163 
5164 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5165 	ut_bdev_io_set_buf(bdev_io1);
5166 
5167 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5168 	ut_bdev_io_set_buf(bdev_io2);
5169 
5170 	ch = spdk_get_io_channel(bdev);
5171 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5172 
5173 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5174 
5175 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5176 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5177 
5178 	nvme_qpair = io_path->qpair;
5179 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5180 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5181 
5182 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5183 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5184 
5185 	/* If qpair is connected, I/O should succeed. */
5186 	bdev_io1->internal.in_submit_request = true;
5187 
5188 	bdev_nvme_submit_request(ch, bdev_io1);
5189 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5190 
5191 	poll_threads();
5192 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5193 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5194 
5195 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5196 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5197 	 * while resetting the nvme_ctrlr.
5198 	 */
5199 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5200 	ctrlr->is_failed = true;
5201 
5202 	poll_thread_times(0, 5);
5203 
5204 	CU_ASSERT(nvme_qpair->qpair == NULL);
5205 	CU_ASSERT(nvme_ctrlr->resetting == true);
5206 	CU_ASSERT(ctrlr->is_failed == false);
5207 
5208 	bdev_io1->internal.in_submit_request = true;
5209 
5210 	bdev_nvme_submit_request(ch, bdev_io1);
5211 
5212 	spdk_delay_us(1);
5213 
5214 	bdev_io2->internal.in_submit_request = true;
5215 
5216 	bdev_nvme_submit_request(ch, bdev_io2);
5217 
5218 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5219 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5220 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5221 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5222 
5223 	poll_threads();
5224 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5225 	poll_threads();
5226 
5227 	CU_ASSERT(nvme_qpair->qpair != NULL);
5228 	CU_ASSERT(nvme_ctrlr->resetting == false);
5229 
5230 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5231 
5232 	poll_thread_times(0, 1);
5233 
5234 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5235 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5236 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5237 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5238 
5239 	poll_threads();
5240 
5241 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5242 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5243 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5244 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5245 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5246 
5247 	spdk_delay_us(1);
5248 
5249 	poll_thread_times(0, 1);
5250 
5251 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5252 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5253 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5254 
5255 	poll_threads();
5256 
5257 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5258 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5259 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5260 
5261 	free(bdev_io1);
5262 	free(bdev_io2);
5263 
5264 	spdk_put_io_channel(ch);
5265 
5266 	poll_threads();
5267 
5268 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5269 	CU_ASSERT(rc == 0);
5270 
5271 	poll_threads();
5272 	spdk_delay_us(1000);
5273 	poll_threads();
5274 
5275 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5276 
5277 	g_opts.bdev_retry_count = 0;
5278 }
5279 
5280 static void
5281 test_reconnect_ctrlr(void)
5282 {
5283 	struct spdk_nvme_transport_id trid = {};
5284 	struct spdk_nvme_ctrlr ctrlr = {};
5285 	struct nvme_ctrlr *nvme_ctrlr;
5286 	struct spdk_io_channel *ch1, *ch2;
5287 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5288 	int rc;
5289 
5290 	ut_init_trid(&trid);
5291 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5292 
5293 	set_thread(0);
5294 
5295 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5296 	CU_ASSERT(rc == 0);
5297 
5298 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5299 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5300 
5301 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5302 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5303 
5304 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5305 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5306 
5307 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5308 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5309 
5310 	set_thread(1);
5311 
5312 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5313 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5314 
5315 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5316 
5317 	/* Reset starts from thread 1. */
5318 	set_thread(1);
5319 
5320 	/* The reset should fail and a reconnect timer should be registered. */
5321 	ctrlr.fail_reset = true;
5322 	ctrlr.is_failed = true;
5323 
5324 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5325 	CU_ASSERT(rc == 0);
5326 	CU_ASSERT(nvme_ctrlr->resetting == true);
5327 	CU_ASSERT(ctrlr.is_failed == true);
5328 
5329 	poll_threads();
5330 
5331 	CU_ASSERT(nvme_ctrlr->resetting == false);
5332 	CU_ASSERT(ctrlr.is_failed == false);
5333 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5334 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5335 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5336 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5337 
5338 	/* A new reset starts from thread 0. */
5339 	set_thread(1);
5340 
5341 	/* The reset should cancel the reconnect timer and should start from reconnection.
5342 	 * Then, the reset should fail and a reconnect timer should be registered again.
5343 	 */
5344 	ctrlr.fail_reset = true;
5345 	ctrlr.is_failed = true;
5346 
5347 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5348 	CU_ASSERT(rc == 0);
5349 	CU_ASSERT(nvme_ctrlr->resetting == true);
5350 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5351 	CU_ASSERT(ctrlr.is_failed == true);
5352 
5353 	poll_threads();
5354 
5355 	CU_ASSERT(nvme_ctrlr->resetting == false);
5356 	CU_ASSERT(ctrlr.is_failed == false);
5357 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5358 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5359 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5360 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5361 
5362 	/* Then a reconnect retry should suceeed. */
5363 	ctrlr.fail_reset = false;
5364 
5365 	spdk_delay_us(SPDK_SEC_TO_USEC);
5366 	poll_thread_times(0, 1);
5367 
5368 	CU_ASSERT(nvme_ctrlr->resetting == true);
5369 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5370 
5371 	poll_threads();
5372 
5373 	CU_ASSERT(nvme_ctrlr->resetting == false);
5374 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5375 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5376 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5377 
5378 	/* The reset should fail and a reconnect timer should be registered. */
5379 	ctrlr.fail_reset = true;
5380 	ctrlr.is_failed = true;
5381 
5382 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5383 	CU_ASSERT(rc == 0);
5384 	CU_ASSERT(nvme_ctrlr->resetting == true);
5385 	CU_ASSERT(ctrlr.is_failed == true);
5386 
5387 	poll_threads();
5388 
5389 	CU_ASSERT(nvme_ctrlr->resetting == false);
5390 	CU_ASSERT(ctrlr.is_failed == false);
5391 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5392 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5393 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5394 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5395 
5396 	/* Then a reconnect retry should still fail. */
5397 	spdk_delay_us(SPDK_SEC_TO_USEC);
5398 	poll_thread_times(0, 1);
5399 
5400 	CU_ASSERT(nvme_ctrlr->resetting == true);
5401 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5402 
5403 	poll_threads();
5404 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5405 	poll_threads();
5406 
5407 	CU_ASSERT(nvme_ctrlr->resetting == false);
5408 	CU_ASSERT(ctrlr.is_failed == false);
5409 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5410 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5411 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5412 
5413 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5414 	spdk_delay_us(SPDK_SEC_TO_USEC);
5415 	poll_threads();
5416 
5417 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5418 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5419 	CU_ASSERT(nvme_ctrlr->destruct == true);
5420 
5421 	spdk_put_io_channel(ch2);
5422 
5423 	set_thread(0);
5424 
5425 	spdk_put_io_channel(ch1);
5426 
5427 	poll_threads();
5428 	spdk_delay_us(1000);
5429 	poll_threads();
5430 
5431 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5432 }
5433 
5434 static struct nvme_path_id *
5435 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5436 		       const struct spdk_nvme_transport_id *trid)
5437 {
5438 	struct nvme_path_id *p;
5439 
5440 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5441 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5442 			break;
5443 		}
5444 	}
5445 
5446 	return p;
5447 }
5448 
5449 static void
5450 test_retry_failover_ctrlr(void)
5451 {
5452 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5453 	struct spdk_nvme_ctrlr ctrlr = {};
5454 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5455 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5456 	struct spdk_io_channel *ch;
5457 	struct nvme_ctrlr_channel *ctrlr_ch;
5458 	int rc;
5459 
5460 	ut_init_trid(&trid1);
5461 	ut_init_trid2(&trid2);
5462 	ut_init_trid3(&trid3);
5463 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5464 
5465 	set_thread(0);
5466 
5467 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5468 	CU_ASSERT(rc == 0);
5469 
5470 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5471 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5472 
5473 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5474 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5475 
5476 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5477 	CU_ASSERT(rc == 0);
5478 
5479 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5480 	CU_ASSERT(rc == 0);
5481 
5482 	ch = spdk_get_io_channel(nvme_ctrlr);
5483 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5484 
5485 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5486 
5487 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5488 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5489 	CU_ASSERT(path_id1->last_failed_tsc == 0);
5490 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5491 
5492 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5493 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5494 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5495 
5496 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5497 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5498 
5499 	/* It is expected that connecting both of trid1, trid2, and trid3 fail,
5500 	 * and a reconnect timer is started. */
5501 	ctrlr.fail_reset = true;
5502 	ctrlr.is_failed = true;
5503 
5504 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5505 	CU_ASSERT(rc == 0);
5506 
5507 	poll_threads();
5508 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5509 	poll_threads();
5510 
5511 	CU_ASSERT(nvme_ctrlr->resetting == false);
5512 	CU_ASSERT(ctrlr.is_failed == false);
5513 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5514 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5515 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5516 	CU_ASSERT(path_id1->last_failed_tsc != 0);
5517 
5518 	CU_ASSERT(path_id2->last_failed_tsc != 0);
5519 	CU_ASSERT(path_id3->last_failed_tsc != 0);
5520 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5521 
5522 	/* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is
5523 	 * switched to trid2 but reset is not started.
5524 	 */
5525 	rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true);
5526 	CU_ASSERT(rc == -EALREADY);
5527 
5528 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL);
5529 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5530 
5531 	CU_ASSERT(nvme_ctrlr->resetting == false);
5532 
5533 	/* If reconnect succeeds, trid2 should be the active path_id */
5534 	ctrlr.fail_reset = false;
5535 
5536 	spdk_delay_us(SPDK_SEC_TO_USEC);
5537 	poll_thread_times(0, 1);
5538 
5539 	CU_ASSERT(nvme_ctrlr->resetting == true);
5540 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5541 
5542 	poll_threads();
5543 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5544 	poll_threads();
5545 
5546 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL);
5547 	CU_ASSERT(path_id2->last_failed_tsc == 0);
5548 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5549 	CU_ASSERT(nvme_ctrlr->resetting == false);
5550 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5551 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5552 
5553 	spdk_put_io_channel(ch);
5554 
5555 	poll_threads();
5556 
5557 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5558 	CU_ASSERT(rc == 0);
5559 
5560 	poll_threads();
5561 	spdk_delay_us(1000);
5562 	poll_threads();
5563 
5564 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5565 }
5566 
5567 static void
5568 test_fail_path(void)
5569 {
5570 	struct nvme_path_id path = {};
5571 	struct nvme_ctrlr_opts opts = {};
5572 	struct spdk_nvme_ctrlr *ctrlr;
5573 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5574 	struct nvme_ctrlr *nvme_ctrlr;
5575 	const int STRING_SIZE = 32;
5576 	const char *attached_names[STRING_SIZE];
5577 	struct nvme_bdev *bdev;
5578 	struct nvme_ns *nvme_ns;
5579 	struct spdk_bdev_io *bdev_io;
5580 	struct spdk_io_channel *ch;
5581 	struct nvme_bdev_channel *nbdev_ch;
5582 	struct nvme_io_path *io_path;
5583 	struct nvme_ctrlr_channel *ctrlr_ch;
5584 	int rc;
5585 
5586 	/* The test scenario is the following.
5587 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5588 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5589 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5590 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5591 	 *   comes first. The queued I/O is failed.
5592 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5593 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5594 	 */
5595 
5596 	g_opts.bdev_retry_count = 1;
5597 
5598 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5599 	ut_init_trid(&path.trid);
5600 
5601 	set_thread(0);
5602 
5603 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5604 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5605 
5606 	g_ut_attach_ctrlr_status = 0;
5607 	g_ut_attach_bdev_count = 1;
5608 
5609 	opts.ctrlr_loss_timeout_sec = 4;
5610 	opts.reconnect_delay_sec = 1;
5611 	opts.fast_io_fail_timeout_sec = 2;
5612 
5613 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5614 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5615 	CU_ASSERT(rc == 0);
5616 
5617 	spdk_delay_us(1000);
5618 	poll_threads();
5619 
5620 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5621 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5622 
5623 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5624 	CU_ASSERT(nvme_ctrlr != NULL);
5625 
5626 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5627 	CU_ASSERT(bdev != NULL);
5628 
5629 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5630 	CU_ASSERT(nvme_ns != NULL);
5631 
5632 	ch = spdk_get_io_channel(bdev);
5633 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5634 
5635 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5636 
5637 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5638 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5639 
5640 	ctrlr_ch = io_path->qpair->ctrlr_ch;
5641 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5642 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
5643 
5644 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5645 	ut_bdev_io_set_buf(bdev_io);
5646 
5647 
5648 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5649 	ctrlr->fail_reset = true;
5650 	ctrlr->is_failed = true;
5651 
5652 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5653 	CU_ASSERT(rc == 0);
5654 	CU_ASSERT(nvme_ctrlr->resetting == true);
5655 	CU_ASSERT(ctrlr->is_failed == true);
5656 
5657 	poll_threads();
5658 
5659 	CU_ASSERT(nvme_ctrlr->resetting == false);
5660 	CU_ASSERT(ctrlr->is_failed == false);
5661 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5662 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5663 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5664 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5665 
5666 	/* I/O should be queued. */
5667 	bdev_io->internal.in_submit_request = true;
5668 
5669 	bdev_nvme_submit_request(ch, bdev_io);
5670 
5671 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5672 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5673 
5674 	/* After a second, the I/O should be still queued and the ctrlr should be
5675 	 * still recovering.
5676 	 */
5677 	spdk_delay_us(SPDK_SEC_TO_USEC);
5678 	poll_threads();
5679 
5680 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5681 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5682 
5683 	CU_ASSERT(nvme_ctrlr->resetting == false);
5684 	CU_ASSERT(ctrlr->is_failed == false);
5685 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5686 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5687 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5688 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5689 
5690 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5691 
5692 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5693 	spdk_delay_us(SPDK_SEC_TO_USEC);
5694 	poll_threads();
5695 
5696 	CU_ASSERT(nvme_ctrlr->resetting == false);
5697 	CU_ASSERT(ctrlr->is_failed == false);
5698 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5699 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5700 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5701 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5702 
5703 	/* Then within a second, pending I/O should be failed. */
5704 	spdk_delay_us(SPDK_SEC_TO_USEC);
5705 	poll_threads();
5706 
5707 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5708 	poll_threads();
5709 
5710 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5711 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5712 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5713 
5714 	/* Another I/O submission should be failed immediately. */
5715 	bdev_io->internal.in_submit_request = true;
5716 
5717 	bdev_nvme_submit_request(ch, bdev_io);
5718 
5719 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5720 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5721 
5722 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5723 	 * be deleted.
5724 	 */
5725 	spdk_delay_us(SPDK_SEC_TO_USEC);
5726 	poll_threads();
5727 
5728 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5729 	poll_threads();
5730 
5731 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5732 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5733 	CU_ASSERT(nvme_ctrlr->destruct == true);
5734 
5735 	spdk_put_io_channel(ch);
5736 
5737 	poll_threads();
5738 	spdk_delay_us(1000);
5739 	poll_threads();
5740 
5741 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5742 
5743 	free(bdev_io);
5744 
5745 	g_opts.bdev_retry_count = 0;
5746 }
5747 
5748 static void
5749 test_nvme_ns_cmp(void)
5750 {
5751 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
5752 
5753 	nvme_ns1.id = 0;
5754 	nvme_ns2.id = UINT32_MAX;
5755 
5756 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
5757 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
5758 }
5759 
5760 static void
5761 test_ana_transition(void)
5762 {
5763 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
5764 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
5765 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
5766 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
5767 
5768 	/* case 1: ANA transition timedout is canceled. */
5769 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5770 	nvme_ns.ana_transition_timedout = true;
5771 
5772 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5773 
5774 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5775 
5776 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
5777 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5778 
5779 	/* case 2: ANATT timer is kept. */
5780 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5781 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
5782 			      &nvme_ns,
5783 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5784 
5785 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5786 
5787 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5788 
5789 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5790 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
5791 
5792 	/* case 3: ANATT timer is stopped. */
5793 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5794 
5795 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5796 
5797 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5798 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5799 
5800 	/* ANATT timer is started. */
5801 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5802 
5803 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5804 
5805 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5806 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
5807 
5808 	/* ANATT timer is expired. */
5809 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5810 
5811 	poll_threads();
5812 
5813 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5814 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
5815 }
5816 
5817 static void
5818 _set_preferred_path_cb(void *cb_arg, int rc)
5819 {
5820 	bool *done = cb_arg;
5821 
5822 	*done = true;
5823 }
5824 
5825 static void
5826 test_set_preferred_path(void)
5827 {
5828 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
5829 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
5830 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5831 	const int STRING_SIZE = 32;
5832 	const char *attached_names[STRING_SIZE];
5833 	struct nvme_bdev *bdev;
5834 	struct spdk_io_channel *ch;
5835 	struct nvme_bdev_channel *nbdev_ch;
5836 	struct nvme_io_path *io_path;
5837 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
5838 	const struct spdk_nvme_ctrlr_data *cdata;
5839 	bool done;
5840 	int rc;
5841 
5842 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5843 	ut_init_trid(&path1.trid);
5844 	ut_init_trid2(&path2.trid);
5845 	ut_init_trid3(&path3.trid);
5846 	g_ut_attach_ctrlr_status = 0;
5847 	g_ut_attach_bdev_count = 1;
5848 
5849 	set_thread(0);
5850 
5851 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
5852 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
5853 
5854 	ctrlr1->ns[0].uuid = &uuid1;
5855 
5856 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
5857 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5858 	CU_ASSERT(rc == 0);
5859 
5860 	spdk_delay_us(1000);
5861 	poll_threads();
5862 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5863 	poll_threads();
5864 
5865 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
5866 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
5867 
5868 	ctrlr2->ns[0].uuid = &uuid1;
5869 
5870 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
5871 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5872 	CU_ASSERT(rc == 0);
5873 
5874 	spdk_delay_us(1000);
5875 	poll_threads();
5876 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5877 	poll_threads();
5878 
5879 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
5880 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
5881 
5882 	ctrlr3->ns[0].uuid = &uuid1;
5883 
5884 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
5885 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5886 	CU_ASSERT(rc == 0);
5887 
5888 	spdk_delay_us(1000);
5889 	poll_threads();
5890 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5891 	poll_threads();
5892 
5893 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5894 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5895 
5896 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5897 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
5898 
5899 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
5900 
5901 	ch = spdk_get_io_channel(bdev);
5902 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5903 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5904 
5905 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5906 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5907 
5908 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
5909 
5910 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
5911 	 * should return io_path to ctrlr2.
5912 	 */
5913 
5914 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
5915 	done = false;
5916 
5917 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5918 
5919 	poll_threads();
5920 	CU_ASSERT(done == true);
5921 
5922 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5923 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5924 
5925 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
5926 
5927 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
5928 	 * acquired, find_io_path() should return io_path to ctrlr3.
5929 	 */
5930 
5931 	spdk_put_io_channel(ch);
5932 
5933 	poll_threads();
5934 
5935 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
5936 	done = false;
5937 
5938 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5939 
5940 	poll_threads();
5941 	CU_ASSERT(done == true);
5942 
5943 	ch = spdk_get_io_channel(bdev);
5944 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5945 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5946 
5947 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5948 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5949 
5950 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
5951 
5952 	spdk_put_io_channel(ch);
5953 
5954 	poll_threads();
5955 
5956 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5957 	CU_ASSERT(rc == 0);
5958 
5959 	poll_threads();
5960 	spdk_delay_us(1000);
5961 	poll_threads();
5962 
5963 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5964 }
5965 
5966 static void
5967 test_find_next_io_path(void)
5968 {
5969 	struct nvme_bdev_channel nbdev_ch = {
5970 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
5971 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
5972 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
5973 	};
5974 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
5975 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
5976 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
5977 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
5978 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
5979 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
5980 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
5981 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
5982 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
5983 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
5984 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
5985 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
5986 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
5987 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
5988 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
5989 
5990 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
5991 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
5992 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
5993 
5994 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
5995 	 * is covered in test_find_io_path.
5996 	 */
5997 
5998 	nbdev_ch.current_io_path = &io_path2;
5999 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6000 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6001 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6002 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6003 
6004 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6005 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6006 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6007 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6008 
6009 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6010 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6011 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6012 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6013 
6014 	nbdev_ch.current_io_path = &io_path3;
6015 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6016 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6017 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6018 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6019 
6020 	/* Test if next io_path is selected according to rr_min_io */
6021 
6022 	nbdev_ch.current_io_path = NULL;
6023 	nbdev_ch.rr_min_io = 2;
6024 	nbdev_ch.rr_counter = 0;
6025 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6026 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6027 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6028 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6029 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6030 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6031 
6032 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6033 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6034 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6035 }
6036 
6037 static void
6038 test_find_io_path_min_qd(void)
6039 {
6040 	struct nvme_bdev_channel nbdev_ch = {
6041 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6042 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6043 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
6044 	};
6045 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6046 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6047 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6048 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6049 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6050 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6051 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6052 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6053 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6054 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6055 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6056 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
6057 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6058 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6059 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6060 
6061 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6062 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6063 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6064 
6065 	/* Test if the minumum io_outstanding or the ANA optimized state is
6066 	 * prioritized when using least queue depth selector
6067 	 */
6068 	qpair1.num_outstanding_reqs = 2;
6069 	qpair2.num_outstanding_reqs = 1;
6070 	qpair3.num_outstanding_reqs = 0;
6071 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6072 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6073 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6074 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6075 
6076 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6077 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6078 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6079 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6080 
6081 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6082 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6083 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6084 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6085 
6086 	qpair2.num_outstanding_reqs = 4;
6087 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6088 }
6089 
6090 static void
6091 test_disable_auto_failback(void)
6092 {
6093 	struct nvme_path_id path1 = {}, path2 = {};
6094 	struct nvme_ctrlr_opts opts = {};
6095 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6096 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6097 	struct nvme_ctrlr *nvme_ctrlr1;
6098 	const int STRING_SIZE = 32;
6099 	const char *attached_names[STRING_SIZE];
6100 	struct nvme_bdev *bdev;
6101 	struct spdk_io_channel *ch;
6102 	struct nvme_bdev_channel *nbdev_ch;
6103 	struct nvme_io_path *io_path;
6104 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6105 	const struct spdk_nvme_ctrlr_data *cdata;
6106 	bool done;
6107 	int rc;
6108 
6109 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6110 	ut_init_trid(&path1.trid);
6111 	ut_init_trid2(&path2.trid);
6112 	g_ut_attach_ctrlr_status = 0;
6113 	g_ut_attach_bdev_count = 1;
6114 
6115 	g_opts.disable_auto_failback = true;
6116 
6117 	opts.ctrlr_loss_timeout_sec = -1;
6118 	opts.reconnect_delay_sec = 1;
6119 
6120 	set_thread(0);
6121 
6122 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6123 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6124 
6125 	ctrlr1->ns[0].uuid = &uuid1;
6126 
6127 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6128 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6129 	CU_ASSERT(rc == 0);
6130 
6131 	spdk_delay_us(1000);
6132 	poll_threads();
6133 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6134 	poll_threads();
6135 
6136 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6137 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6138 
6139 	ctrlr2->ns[0].uuid = &uuid1;
6140 
6141 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6142 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6143 	CU_ASSERT(rc == 0);
6144 
6145 	spdk_delay_us(1000);
6146 	poll_threads();
6147 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6148 	poll_threads();
6149 
6150 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6151 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6152 
6153 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6154 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6155 
6156 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
6157 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6158 
6159 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6160 
6161 	ch = spdk_get_io_channel(bdev);
6162 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6163 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6164 
6165 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6166 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6167 
6168 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6169 
6170 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6171 	ctrlr1->fail_reset = true;
6172 	ctrlr1->is_failed = true;
6173 
6174 	bdev_nvme_reset_ctrlr(nvme_ctrlr1);
6175 
6176 	poll_threads();
6177 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6178 	poll_threads();
6179 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6180 	poll_threads();
6181 
6182 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6183 
6184 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6185 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6186 
6187 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6188 
6189 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6190 	 * Hence, io_path to ctrlr2 should still be used.
6191 	 */
6192 	ctrlr1->fail_reset = false;
6193 
6194 	spdk_delay_us(SPDK_SEC_TO_USEC);
6195 	poll_threads();
6196 
6197 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6198 
6199 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6200 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6201 
6202 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6203 
6204 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6205 	 * be used again.
6206 	 */
6207 
6208 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6209 	done = false;
6210 
6211 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6212 
6213 	poll_threads();
6214 	CU_ASSERT(done == true);
6215 
6216 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6217 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6218 
6219 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6220 
6221 	spdk_put_io_channel(ch);
6222 
6223 	poll_threads();
6224 
6225 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6226 	CU_ASSERT(rc == 0);
6227 
6228 	poll_threads();
6229 	spdk_delay_us(1000);
6230 	poll_threads();
6231 
6232 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6233 
6234 	g_opts.disable_auto_failback = false;
6235 }
6236 
6237 static void
6238 ut_set_multipath_policy_done(void *cb_arg, int rc)
6239 {
6240 	int *done = cb_arg;
6241 
6242 	SPDK_CU_ASSERT_FATAL(done != NULL);
6243 	*done = rc;
6244 }
6245 
6246 static void
6247 test_set_multipath_policy(void)
6248 {
6249 	struct nvme_path_id path1 = {}, path2 = {};
6250 	struct nvme_ctrlr_opts opts = {};
6251 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6252 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6253 	const int STRING_SIZE = 32;
6254 	const char *attached_names[STRING_SIZE];
6255 	struct nvme_bdev *bdev;
6256 	struct spdk_io_channel *ch;
6257 	struct nvme_bdev_channel *nbdev_ch;
6258 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6259 	int done;
6260 	int rc;
6261 
6262 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6263 	ut_init_trid(&path1.trid);
6264 	ut_init_trid2(&path2.trid);
6265 	g_ut_attach_ctrlr_status = 0;
6266 	g_ut_attach_bdev_count = 1;
6267 
6268 	g_opts.disable_auto_failback = true;
6269 
6270 	opts.ctrlr_loss_timeout_sec = -1;
6271 	opts.reconnect_delay_sec = 1;
6272 
6273 	set_thread(0);
6274 
6275 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6276 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6277 
6278 	ctrlr1->ns[0].uuid = &uuid1;
6279 
6280 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6281 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6282 	CU_ASSERT(rc == 0);
6283 
6284 	spdk_delay_us(1000);
6285 	poll_threads();
6286 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6287 	poll_threads();
6288 
6289 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6290 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6291 
6292 	ctrlr2->ns[0].uuid = &uuid1;
6293 
6294 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6295 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6296 	CU_ASSERT(rc == 0);
6297 
6298 	spdk_delay_us(1000);
6299 	poll_threads();
6300 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6301 	poll_threads();
6302 
6303 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6304 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6305 
6306 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6307 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6308 
6309 	/* If multipath policy is updated before getting any I/O channel,
6310 	 * an new I/O channel should have the update.
6311 	 */
6312 	done = -1;
6313 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6314 				       BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6315 				       ut_set_multipath_policy_done, &done);
6316 	poll_threads();
6317 	CU_ASSERT(done == 0);
6318 
6319 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6320 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6321 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6322 
6323 	ch = spdk_get_io_channel(bdev);
6324 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6325 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6326 
6327 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6328 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6329 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6330 
6331 	/* If multipath policy is updated while a I/O channel is active,
6332 	 * the update should be applied to the I/O channel immediately.
6333 	 */
6334 	done = -1;
6335 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6336 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6337 				       ut_set_multipath_policy_done, &done);
6338 	poll_threads();
6339 	CU_ASSERT(done == 0);
6340 
6341 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6342 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6343 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6344 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6345 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6346 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6347 
6348 	spdk_put_io_channel(ch);
6349 
6350 	poll_threads();
6351 
6352 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6353 	CU_ASSERT(rc == 0);
6354 
6355 	poll_threads();
6356 	spdk_delay_us(1000);
6357 	poll_threads();
6358 
6359 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6360 }
6361 
6362 static void
6363 test_uuid_generation(void)
6364 {
6365 	uint32_t nsid1 = 1, nsid2 = 2;
6366 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6367 	char sn3[21] = "                    ";
6368 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6369 	struct spdk_uuid uuid1, uuid2;
6370 
6371 	/* Test case 1:
6372 	 * Serial numbers are the same, nsids are different.
6373 	 * Compare two generated UUID - they should be different. */
6374 	uuid1 = nvme_generate_uuid(sn1, nsid1);
6375 	uuid2 = nvme_generate_uuid(sn1, nsid2);
6376 
6377 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6378 
6379 	/* Test case 2:
6380 	 * Serial numbers differ only by one character, nsids are the same.
6381 	 * Compare two generated UUID - they should be different. */
6382 	uuid1 = nvme_generate_uuid(sn1, nsid1);
6383 	uuid2 = nvme_generate_uuid(sn2, nsid1);
6384 
6385 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6386 
6387 	/* Test case 3:
6388 	 * Serial number comprises only of space characters.
6389 	 * Validate the generated UUID. */
6390 	uuid1 = nvme_generate_uuid(sn3, nsid1);
6391 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6392 }
6393 
6394 static void
6395 test_retry_io_to_same_path(void)
6396 {
6397 	struct nvme_path_id path1 = {}, path2 = {};
6398 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6399 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6400 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6401 	const int STRING_SIZE = 32;
6402 	const char *attached_names[STRING_SIZE];
6403 	struct nvme_bdev *bdev;
6404 	struct spdk_bdev_io *bdev_io;
6405 	struct nvme_bdev_io *bio;
6406 	struct spdk_io_channel *ch;
6407 	struct nvme_bdev_channel *nbdev_ch;
6408 	struct nvme_io_path *io_path1, *io_path2;
6409 	struct ut_nvme_req *req;
6410 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6411 	int done;
6412 	int rc;
6413 
6414 	g_opts.nvme_ioq_poll_period_us = 1;
6415 
6416 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6417 	ut_init_trid(&path1.trid);
6418 	ut_init_trid2(&path2.trid);
6419 	g_ut_attach_ctrlr_status = 0;
6420 	g_ut_attach_bdev_count = 1;
6421 
6422 	set_thread(0);
6423 
6424 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6425 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6426 
6427 	ctrlr1->ns[0].uuid = &uuid1;
6428 
6429 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6430 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6431 	CU_ASSERT(rc == 0);
6432 
6433 	spdk_delay_us(1000);
6434 	poll_threads();
6435 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6436 	poll_threads();
6437 
6438 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6439 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6440 
6441 	ctrlr2->ns[0].uuid = &uuid1;
6442 
6443 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6444 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6445 	CU_ASSERT(rc == 0);
6446 
6447 	spdk_delay_us(1000);
6448 	poll_threads();
6449 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6450 	poll_threads();
6451 
6452 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6453 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6454 
6455 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
6456 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6457 
6458 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
6459 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6460 
6461 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6462 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6463 
6464 	done = -1;
6465 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6466 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6467 	poll_threads();
6468 	CU_ASSERT(done == 0);
6469 
6470 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6471 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6472 	CU_ASSERT(bdev->rr_min_io == 1);
6473 
6474 	ch = spdk_get_io_channel(bdev);
6475 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6476 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6477 
6478 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6479 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6480 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6481 
6482 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6483 	ut_bdev_io_set_buf(bdev_io);
6484 
6485 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6486 
6487 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6488 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6489 
6490 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6491 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6492 
6493 	/* The 1st I/O should be submitted to io_path1. */
6494 	bdev_io->internal.in_submit_request = true;
6495 
6496 	bdev_nvme_submit_request(ch, bdev_io);
6497 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6498 	CU_ASSERT(bio->io_path == io_path1);
6499 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6500 
6501 	spdk_delay_us(1);
6502 
6503 	poll_threads();
6504 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6505 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6506 
6507 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6508 	 * policy is round-robin.
6509 	 */
6510 	bdev_io->internal.in_submit_request = true;
6511 
6512 	bdev_nvme_submit_request(ch, bdev_io);
6513 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6514 	CU_ASSERT(bio->io_path == io_path2);
6515 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6516 
6517 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6518 	SPDK_CU_ASSERT_FATAL(req != NULL);
6519 
6520 	/* Set retry count to non-zero. */
6521 	g_opts.bdev_retry_count = 2;
6522 
6523 	/* Inject an I/O error. */
6524 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6525 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6526 
6527 	/* The 2nd I/O should be queued to nbdev_ch. */
6528 	spdk_delay_us(1);
6529 	poll_thread_times(0, 1);
6530 
6531 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6532 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6533 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
6534 
6535 	/* The 2nd I/O should keep caching io_path2. */
6536 	CU_ASSERT(bio->io_path == io_path2);
6537 
6538 	/* The 2nd I/O should be submitted to io_path2 again. */
6539 	poll_thread_times(0, 1);
6540 
6541 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6542 	CU_ASSERT(bio->io_path == io_path2);
6543 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6544 
6545 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6546 	SPDK_CU_ASSERT_FATAL(req != NULL);
6547 
6548 	/* Inject an I/O error again. */
6549 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6550 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6551 	req->cpl.status.crd = 1;
6552 
6553 	ctrlr2->cdata.crdt[1] = 1;
6554 
6555 	/* The 2nd I/O should be queued to nbdev_ch. */
6556 	spdk_delay_us(1);
6557 	poll_thread_times(0, 1);
6558 
6559 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6560 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6561 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
6562 
6563 	/* The 2nd I/O should keep caching io_path2. */
6564 	CU_ASSERT(bio->io_path == io_path2);
6565 
6566 	/* Detach ctrlr2 dynamically. */
6567 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
6568 	CU_ASSERT(rc == 0);
6569 
6570 	spdk_delay_us(1000);
6571 	poll_threads();
6572 	spdk_delay_us(1000);
6573 	poll_threads();
6574 	spdk_delay_us(1000);
6575 	poll_threads();
6576 	spdk_delay_us(1000);
6577 	poll_threads();
6578 
6579 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
6580 
6581 	poll_threads();
6582 	spdk_delay_us(100000);
6583 	poll_threads();
6584 	spdk_delay_us(1);
6585 	poll_threads();
6586 
6587 	/* The 2nd I/O should succeed by io_path1. */
6588 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6589 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6590 	CU_ASSERT(bio->io_path == io_path1);
6591 
6592 	free(bdev_io);
6593 
6594 	spdk_put_io_channel(ch);
6595 
6596 	poll_threads();
6597 	spdk_delay_us(1);
6598 	poll_threads();
6599 
6600 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6601 	CU_ASSERT(rc == 0);
6602 
6603 	poll_threads();
6604 	spdk_delay_us(1000);
6605 	poll_threads();
6606 
6607 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
6608 
6609 	g_opts.nvme_ioq_poll_period_us = 0;
6610 	g_opts.bdev_retry_count = 0;
6611 }
6612 
6613 /* This case is to verify a fix for a complex race condition that
6614  * failover is lost if fabric connect command gets timeout while
6615  * controller is being reset.
6616  */
6617 static void
6618 test_race_between_reset_and_disconnected(void)
6619 {
6620 	struct spdk_nvme_transport_id trid = {};
6621 	struct spdk_nvme_ctrlr ctrlr = {};
6622 	struct nvme_ctrlr *nvme_ctrlr = NULL;
6623 	struct nvme_path_id *curr_trid;
6624 	struct spdk_io_channel *ch1, *ch2;
6625 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
6626 	int rc;
6627 
6628 	ut_init_trid(&trid);
6629 	TAILQ_INIT(&ctrlr.active_io_qpairs);
6630 
6631 	set_thread(0);
6632 
6633 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
6634 	CU_ASSERT(rc == 0);
6635 
6636 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
6637 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
6638 
6639 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
6640 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
6641 
6642 	ch1 = spdk_get_io_channel(nvme_ctrlr);
6643 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
6644 
6645 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
6646 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6647 
6648 	set_thread(1);
6649 
6650 	ch2 = spdk_get_io_channel(nvme_ctrlr);
6651 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
6652 
6653 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
6654 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6655 
6656 	/* Reset starts from thread 1. */
6657 	set_thread(1);
6658 
6659 	nvme_ctrlr->resetting = false;
6660 	curr_trid->last_failed_tsc = spdk_get_ticks();
6661 	ctrlr.is_failed = true;
6662 
6663 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
6664 	CU_ASSERT(rc == 0);
6665 	CU_ASSERT(nvme_ctrlr->resetting == true);
6666 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6667 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6668 
6669 	poll_thread_times(0, 3);
6670 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
6671 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6672 
6673 	poll_thread_times(0, 1);
6674 	poll_thread_times(1, 1);
6675 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
6676 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
6677 	CU_ASSERT(ctrlr.is_failed == true);
6678 
6679 	poll_thread_times(1, 1);
6680 	poll_thread_times(0, 1);
6681 	CU_ASSERT(ctrlr.is_failed == false);
6682 	CU_ASSERT(ctrlr.adminq.is_connected == false);
6683 
6684 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6685 	poll_thread_times(0, 2);
6686 	CU_ASSERT(ctrlr.adminq.is_connected == true);
6687 
6688 	poll_thread_times(0, 1);
6689 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6690 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
6691 
6692 	poll_thread_times(1, 1);
6693 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6694 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6695 	CU_ASSERT(nvme_ctrlr->resetting == true);
6696 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
6697 
6698 	poll_thread_times(0, 2);
6699 	CU_ASSERT(nvme_ctrlr->resetting == true);
6700 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6701 	poll_thread_times(1, 1);
6702 	CU_ASSERT(nvme_ctrlr->resetting == true);
6703 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6704 
6705 	/* Here is just one poll before _bdev_nvme_reset_complete() is executed.
6706 	 *
6707 	 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric
6708 	 * connect command is executed. If fabric connect command gets timeout,
6709 	 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until
6710 	 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false.
6711 	 *
6712 	 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr().
6713 	 */
6714 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr);
6715 	CU_ASSERT(rc == -EINPROGRESS);
6716 	CU_ASSERT(nvme_ctrlr->resetting == true);
6717 	CU_ASSERT(nvme_ctrlr->pending_failover == true);
6718 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6719 
6720 	poll_thread_times(0, 1);
6721 
6722 	CU_ASSERT(nvme_ctrlr->resetting == true);
6723 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6724 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
6725 
6726 	poll_threads();
6727 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6728 	poll_threads();
6729 
6730 	CU_ASSERT(nvme_ctrlr->resetting == false);
6731 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6732 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6733 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6734 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6735 
6736 	spdk_put_io_channel(ch2);
6737 
6738 	set_thread(0);
6739 
6740 	spdk_put_io_channel(ch1);
6741 
6742 	poll_threads();
6743 
6744 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6745 	CU_ASSERT(rc == 0);
6746 
6747 	poll_threads();
6748 	spdk_delay_us(1000);
6749 	poll_threads();
6750 
6751 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6752 }
6753 static void
6754 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
6755 {
6756 	int *_rc = (int *)cb_arg;
6757 
6758 	SPDK_CU_ASSERT_FATAL(_rc != NULL);
6759 	*_rc = rc;
6760 }
6761 
6762 static void
6763 test_ctrlr_op_rpc(void)
6764 {
6765 	struct spdk_nvme_transport_id trid = {};
6766 	struct spdk_nvme_ctrlr ctrlr = {};
6767 	struct nvme_ctrlr *nvme_ctrlr = NULL;
6768 	struct nvme_path_id *curr_trid;
6769 	struct spdk_io_channel *ch1, *ch2;
6770 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
6771 	int ctrlr_op_rc;
6772 	int rc;
6773 
6774 	ut_init_trid(&trid);
6775 	TAILQ_INIT(&ctrlr.active_io_qpairs);
6776 
6777 	set_thread(0);
6778 
6779 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
6780 	CU_ASSERT(rc == 0);
6781 
6782 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
6783 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
6784 
6785 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
6786 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
6787 
6788 	ch1 = spdk_get_io_channel(nvme_ctrlr);
6789 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
6790 
6791 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
6792 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6793 
6794 	set_thread(1);
6795 
6796 	ch2 = spdk_get_io_channel(nvme_ctrlr);
6797 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
6798 
6799 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
6800 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6801 
6802 	/* Reset starts from thread 1. */
6803 	set_thread(1);
6804 
6805 	/* Case 1: ctrlr is already being destructed. */
6806 	nvme_ctrlr->destruct = true;
6807 	ctrlr_op_rc = 0;
6808 
6809 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6810 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6811 
6812 	poll_threads();
6813 
6814 	CU_ASSERT(ctrlr_op_rc == -ENXIO);
6815 
6816 	/* Case 2: reset is in progress. */
6817 	nvme_ctrlr->destruct = false;
6818 	nvme_ctrlr->resetting = true;
6819 	ctrlr_op_rc = 0;
6820 
6821 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6822 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6823 
6824 	poll_threads();
6825 
6826 	CU_ASSERT(ctrlr_op_rc == -EBUSY);
6827 
6828 	/* Case 3: reset completes successfully. */
6829 	nvme_ctrlr->resetting = false;
6830 	curr_trid->last_failed_tsc = spdk_get_ticks();
6831 	ctrlr.is_failed = true;
6832 	ctrlr_op_rc = -1;
6833 
6834 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6835 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6836 
6837 	CU_ASSERT(nvme_ctrlr->resetting == true);
6838 	CU_ASSERT(ctrlr_op_rc == -1);
6839 
6840 	poll_threads();
6841 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6842 	poll_threads();
6843 
6844 	CU_ASSERT(nvme_ctrlr->resetting == false);
6845 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6846 	CU_ASSERT(ctrlr.is_failed == false);
6847 	CU_ASSERT(ctrlr_op_rc == 0);
6848 
6849 	/* Case 4: invalid operation. */
6850 	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
6851 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6852 
6853 	poll_threads();
6854 
6855 	CU_ASSERT(ctrlr_op_rc == -EINVAL);
6856 
6857 	spdk_put_io_channel(ch2);
6858 
6859 	set_thread(0);
6860 
6861 	spdk_put_io_channel(ch1);
6862 
6863 	poll_threads();
6864 
6865 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6866 	CU_ASSERT(rc == 0);
6867 
6868 	poll_threads();
6869 	spdk_delay_us(1000);
6870 	poll_threads();
6871 
6872 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6873 }
6874 
6875 static void
6876 test_bdev_ctrlr_op_rpc(void)
6877 {
6878 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
6879 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
6880 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6881 	struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL;
6882 	struct nvme_path_id *curr_trid1, *curr_trid2;
6883 	struct spdk_io_channel *ch11, *ch12, *ch21, *ch22;
6884 	struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22;
6885 	int ctrlr_op_rc;
6886 	int rc;
6887 
6888 	ut_init_trid(&trid1);
6889 	ut_init_trid2(&trid2);
6890 	TAILQ_INIT(&ctrlr1.active_io_qpairs);
6891 	TAILQ_INIT(&ctrlr2.active_io_qpairs);
6892 	ctrlr1.cdata.cmic.multi_ctrlr = 1;
6893 	ctrlr2.cdata.cmic.multi_ctrlr = 1;
6894 	ctrlr1.cdata.cntlid = 1;
6895 	ctrlr2.cdata.cntlid = 2;
6896 	ctrlr1.adminq.is_connected = true;
6897 	ctrlr2.adminq.is_connected = true;
6898 
6899 	set_thread(0);
6900 
6901 	rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL);
6902 	CU_ASSERT(rc == 0);
6903 
6904 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6905 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6906 
6907 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1);
6908 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6909 
6910 	curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
6911 	SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL);
6912 
6913 	ch11 = spdk_get_io_channel(nvme_ctrlr1);
6914 	SPDK_CU_ASSERT_FATAL(ch11 != NULL);
6915 
6916 	ctrlr_ch11 = spdk_io_channel_get_ctx(ch11);
6917 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
6918 
6919 	set_thread(1);
6920 
6921 	ch12 = spdk_get_io_channel(nvme_ctrlr1);
6922 	SPDK_CU_ASSERT_FATAL(ch12 != NULL);
6923 
6924 	ctrlr_ch12 = spdk_io_channel_get_ctx(ch12);
6925 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
6926 
6927 	set_thread(0);
6928 
6929 	rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL);
6930 	CU_ASSERT(rc == 0);
6931 
6932 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2);
6933 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6934 
6935 	curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
6936 	SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL);
6937 
6938 	ch21 = spdk_get_io_channel(nvme_ctrlr2);
6939 	SPDK_CU_ASSERT_FATAL(ch21 != NULL);
6940 
6941 	ctrlr_ch21 = spdk_io_channel_get_ctx(ch21);
6942 	CU_ASSERT(ctrlr_ch21->qpair != NULL);
6943 
6944 	set_thread(1);
6945 
6946 	ch22 = spdk_get_io_channel(nvme_ctrlr2);
6947 	SPDK_CU_ASSERT_FATAL(ch22 != NULL);
6948 
6949 	ctrlr_ch22 = spdk_io_channel_get_ctx(ch22);
6950 	CU_ASSERT(ctrlr_ch22->qpair != NULL);
6951 
6952 	/* Reset starts from thread 1. */
6953 	set_thread(1);
6954 
6955 	nvme_ctrlr1->resetting = false;
6956 	nvme_ctrlr2->resetting = false;
6957 	curr_trid1->last_failed_tsc = spdk_get_ticks();
6958 	curr_trid2->last_failed_tsc = spdk_get_ticks();
6959 	ctrlr_op_rc = -1;
6960 
6961 	nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET,
6962 			       ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6963 
6964 	CU_ASSERT(nvme_ctrlr1->resetting == true);
6965 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
6966 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
6967 	CU_ASSERT(nvme_ctrlr2->resetting == false);
6968 
6969 	poll_thread_times(0, 3);
6970 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
6971 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
6972 
6973 	poll_thread_times(0, 1);
6974 	poll_thread_times(1, 1);
6975 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
6976 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
6977 
6978 	poll_thread_times(1, 1);
6979 	poll_thread_times(0, 1);
6980 	CU_ASSERT(ctrlr1.adminq.is_connected == false);
6981 
6982 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6983 	poll_thread_times(0, 2);
6984 	CU_ASSERT(ctrlr1.adminq.is_connected == true);
6985 
6986 	poll_thread_times(0, 1);
6987 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
6988 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
6989 
6990 	poll_thread_times(1, 1);
6991 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
6992 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
6993 	CU_ASSERT(nvme_ctrlr1->resetting == true);
6994 	CU_ASSERT(curr_trid1->last_failed_tsc != 0);
6995 
6996 	poll_thread_times(0, 2);
6997 	poll_thread_times(1, 1);
6998 	poll_thread_times(0, 1);
6999 	poll_thread_times(1, 1);
7000 	poll_thread_times(0, 1);
7001 	poll_thread_times(1, 1);
7002 	poll_thread_times(0, 1);
7003 
7004 	CU_ASSERT(nvme_ctrlr1->resetting == false);
7005 	CU_ASSERT(curr_trid1->last_failed_tsc == 0);
7006 	CU_ASSERT(nvme_ctrlr2->resetting == true);
7007 
7008 	poll_threads();
7009 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7010 	poll_threads();
7011 
7012 	CU_ASSERT(nvme_ctrlr2->resetting == false);
7013 	CU_ASSERT(ctrlr_op_rc == 0);
7014 
7015 	set_thread(1);
7016 
7017 	spdk_put_io_channel(ch12);
7018 	spdk_put_io_channel(ch22);
7019 
7020 	set_thread(0);
7021 
7022 	spdk_put_io_channel(ch11);
7023 	spdk_put_io_channel(ch21);
7024 
7025 	poll_threads();
7026 
7027 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7028 	CU_ASSERT(rc == 0);
7029 
7030 	poll_threads();
7031 	spdk_delay_us(1000);
7032 	poll_threads();
7033 
7034 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7035 }
7036 
7037 static void
7038 test_disable_enable_ctrlr(void)
7039 {
7040 	struct spdk_nvme_transport_id trid = {};
7041 	struct spdk_nvme_ctrlr ctrlr = {};
7042 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7043 	struct nvme_path_id *curr_trid;
7044 	struct spdk_io_channel *ch1, *ch2;
7045 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7046 	int rc;
7047 
7048 	ut_init_trid(&trid);
7049 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7050 	ctrlr.adminq.is_connected = true;
7051 
7052 	set_thread(0);
7053 
7054 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7055 	CU_ASSERT(rc == 0);
7056 
7057 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7058 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7059 
7060 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7061 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7062 
7063 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7064 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7065 
7066 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7067 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7068 
7069 	set_thread(1);
7070 
7071 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7072 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7073 
7074 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7075 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7076 
7077 	/* Disable starts from thread 1. */
7078 	set_thread(1);
7079 
7080 	/* Case 1: ctrlr is already disabled. */
7081 	nvme_ctrlr->disabled = true;
7082 
7083 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7084 	CU_ASSERT(rc == -EALREADY);
7085 
7086 	/* Case 2: ctrlr is already being destructed. */
7087 	nvme_ctrlr->disabled = false;
7088 	nvme_ctrlr->destruct = true;
7089 
7090 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7091 	CU_ASSERT(rc == -ENXIO);
7092 
7093 	/* Case 3: reset is in progress. */
7094 	nvme_ctrlr->destruct = false;
7095 	nvme_ctrlr->resetting = true;
7096 
7097 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7098 	CU_ASSERT(rc == -EBUSY);
7099 
7100 	/* Case 4: disable completes successfully. */
7101 	nvme_ctrlr->resetting = false;
7102 
7103 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7104 	CU_ASSERT(rc == 0);
7105 	CU_ASSERT(nvme_ctrlr->resetting == true);
7106 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7107 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7108 
7109 	poll_thread_times(0, 3);
7110 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7111 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7112 
7113 	poll_thread_times(0, 1);
7114 	poll_thread_times(1, 1);
7115 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7116 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7117 
7118 	poll_thread_times(1, 1);
7119 	poll_thread_times(0, 1);
7120 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7121 	poll_thread_times(1, 1);
7122 	poll_thread_times(0, 1);
7123 	poll_thread_times(1, 1);
7124 	poll_thread_times(0, 1);
7125 	CU_ASSERT(nvme_ctrlr->resetting == false);
7126 	CU_ASSERT(nvme_ctrlr->disabled == true);
7127 
7128 	/* Case 5: enable completes successfully. */
7129 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7130 	CU_ASSERT(rc == 0);
7131 
7132 	CU_ASSERT(nvme_ctrlr->resetting == true);
7133 	CU_ASSERT(nvme_ctrlr->disabled == false);
7134 
7135 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7136 	poll_thread_times(0, 2);
7137 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7138 
7139 	poll_thread_times(0, 1);
7140 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7141 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7142 
7143 	poll_thread_times(1, 1);
7144 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7145 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7146 	CU_ASSERT(nvme_ctrlr->resetting == true);
7147 
7148 	poll_thread_times(0, 2);
7149 	CU_ASSERT(nvme_ctrlr->resetting == true);
7150 	poll_thread_times(1, 1);
7151 	CU_ASSERT(nvme_ctrlr->resetting == true);
7152 	poll_thread_times(0, 1);
7153 	CU_ASSERT(nvme_ctrlr->resetting == false);
7154 
7155 	/* Case 6: ctrlr is already enabled. */
7156 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7157 	CU_ASSERT(rc == -EALREADY);
7158 
7159 	set_thread(0);
7160 
7161 	/* Case 7: disable cancels delayed reconnect. */
7162 	nvme_ctrlr->opts.reconnect_delay_sec = 10;
7163 	ctrlr.fail_reset = true;
7164 
7165 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7166 	CU_ASSERT(rc == 0);
7167 
7168 	poll_threads();
7169 
7170 	CU_ASSERT(nvme_ctrlr->resetting == false);
7171 	CU_ASSERT(ctrlr.is_failed == false);
7172 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7173 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7174 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
7175 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
7176 
7177 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7178 	CU_ASSERT(rc == 0);
7179 
7180 	CU_ASSERT(nvme_ctrlr->resetting == true);
7181 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
7182 
7183 	poll_threads();
7184 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7185 	poll_threads();
7186 
7187 	CU_ASSERT(nvme_ctrlr->resetting == false);
7188 	CU_ASSERT(nvme_ctrlr->disabled == true);
7189 
7190 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7191 	CU_ASSERT(rc == 0);
7192 
7193 	CU_ASSERT(nvme_ctrlr->resetting == true);
7194 	CU_ASSERT(nvme_ctrlr->disabled == false);
7195 
7196 	poll_threads();
7197 
7198 	CU_ASSERT(nvme_ctrlr->resetting == false);
7199 
7200 	set_thread(1);
7201 
7202 	spdk_put_io_channel(ch2);
7203 
7204 	set_thread(0);
7205 
7206 	spdk_put_io_channel(ch1);
7207 
7208 	poll_threads();
7209 
7210 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7211 	CU_ASSERT(rc == 0);
7212 
7213 	poll_threads();
7214 	spdk_delay_us(1000);
7215 	poll_threads();
7216 
7217 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7218 }
7219 
7220 static void
7221 ut_delete_done(void *ctx, int rc)
7222 {
7223 	int *delete_done_rc = ctx;
7224 	*delete_done_rc = rc;
7225 }
7226 
7227 static void
7228 test_delete_ctrlr_done(void)
7229 {
7230 	struct spdk_nvme_transport_id trid = {};
7231 	struct spdk_nvme_ctrlr ctrlr = {};
7232 	int delete_done_rc = 0xDEADBEEF;
7233 	int rc;
7234 
7235 	ut_init_trid(&trid);
7236 
7237 	nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7238 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
7239 
7240 	rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc);
7241 	CU_ASSERT(rc == 0);
7242 
7243 	for (int i = 0; i < 20; i++) {
7244 		poll_threads();
7245 		if (delete_done_rc == 0) {
7246 			break;
7247 		}
7248 		spdk_delay_us(1000);
7249 	}
7250 
7251 	CU_ASSERT(delete_done_rc == 0);
7252 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7253 }
7254 
7255 int
7256 main(int argc, char **argv)
7257 {
7258 	CU_pSuite	suite = NULL;
7259 	unsigned int	num_failures;
7260 
7261 	CU_initialize_registry();
7262 
7263 	suite = CU_add_suite("nvme", NULL, NULL);
7264 
7265 	CU_ADD_TEST(suite, test_create_ctrlr);
7266 	CU_ADD_TEST(suite, test_reset_ctrlr);
7267 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
7268 	CU_ADD_TEST(suite, test_failover_ctrlr);
7269 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
7270 	CU_ADD_TEST(suite, test_pending_reset);
7271 	CU_ADD_TEST(suite, test_attach_ctrlr);
7272 	CU_ADD_TEST(suite, test_aer_cb);
7273 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
7274 	CU_ADD_TEST(suite, test_add_remove_trid);
7275 	CU_ADD_TEST(suite, test_abort);
7276 	CU_ADD_TEST(suite, test_get_io_qpair);
7277 	CU_ADD_TEST(suite, test_bdev_unregister);
7278 	CU_ADD_TEST(suite, test_compare_ns);
7279 	CU_ADD_TEST(suite, test_init_ana_log_page);
7280 	CU_ADD_TEST(suite, test_get_memory_domains);
7281 	CU_ADD_TEST(suite, test_reconnect_qpair);
7282 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
7283 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
7284 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
7285 	CU_ADD_TEST(suite, test_admin_path);
7286 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
7287 	CU_ADD_TEST(suite, test_find_io_path);
7288 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
7289 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
7290 	CU_ADD_TEST(suite, test_retry_io_count);
7291 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
7292 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
7293 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
7294 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
7295 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
7296 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
7297 	CU_ADD_TEST(suite, test_fail_path);
7298 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
7299 	CU_ADD_TEST(suite, test_ana_transition);
7300 	CU_ADD_TEST(suite, test_set_preferred_path);
7301 	CU_ADD_TEST(suite, test_find_next_io_path);
7302 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
7303 	CU_ADD_TEST(suite, test_disable_auto_failback);
7304 	CU_ADD_TEST(suite, test_set_multipath_policy);
7305 	CU_ADD_TEST(suite, test_uuid_generation);
7306 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
7307 	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
7308 	CU_ADD_TEST(suite, test_ctrlr_op_rpc);
7309 	CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc);
7310 	CU_ADD_TEST(suite, test_disable_enable_ctrlr);
7311 	CU_ADD_TEST(suite, test_delete_ctrlr_done);
7312 
7313 	allocate_threads(3);
7314 	set_thread(0);
7315 	bdev_nvme_library_init();
7316 	init_accel();
7317 
7318 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
7319 
7320 	set_thread(0);
7321 	bdev_nvme_library_fini();
7322 	fini_accel();
7323 	free_threads();
7324 
7325 	CU_cleanup_registry();
7326 
7327 	return num_failures;
7328 }
7329