xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision f8410506e19c71a2a8979946bd5ca0314d2146d4)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 #include "spdk/bdev_module.h"
12 
13 #include "common/lib/ut_multithread.c"
14 
15 #include "bdev/nvme/bdev_nvme.c"
16 
17 #include "unit/lib/json_mock.c"
18 
19 #include "bdev/nvme/bdev_mdns_client.c"
20 
21 static void *g_accel_p = (void *)0xdeadbeaf;
22 
23 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
24 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
25 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
26 	     spdk_nvme_remove_cb remove_cb), NULL);
27 
28 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
29 		enum spdk_nvme_transport_type trtype));
30 
31 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
32 	    NULL);
33 
34 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
35 
36 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
37 		struct spdk_nvme_transport_id *trid), 0);
38 
39 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
40 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
41 
42 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
43 
44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
46 
47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
48 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
49 
50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
51 
52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
53 		int error_code, const char *msg));
54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *,
55 	    (struct spdk_jsonrpc_request *request), NULL);
56 DEFINE_STUB_V(spdk_jsonrpc_end_result,
57 	      (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w));
58 
59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
60 		size_t opts_size));
61 
62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
63 		size_t opts_size), 0);
64 
65 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
66 
67 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
68 					enum spdk_bdev_reset_stat_mode mode));
69 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
70 				      struct spdk_bdev_io_stat *add));
71 
72 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr));
73 
74 int
75 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
76 				   struct spdk_memory_domain **domains, int array_size)
77 {
78 	int i, min_array_size;
79 
80 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
81 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
82 		for (i = 0; i < min_array_size; i++) {
83 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
84 		}
85 	}
86 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
87 
88 	return 0;
89 }
90 
91 struct spdk_io_channel *
92 spdk_accel_get_io_channel(void)
93 {
94 	return spdk_get_io_channel(g_accel_p);
95 }
96 
97 void
98 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
99 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
100 {
101 	/* Avoid warning that opts is used uninitialised */
102 	memset(opts, 0, opts_size);
103 }
104 
105 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
106 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
107 
108 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
109 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
110 
111 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
112 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
113 
114 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
115 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
116 
117 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
118 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
119 
120 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
121 
122 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
123 
124 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
125 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
126 
127 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
128 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
129 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
130 
131 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
132 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
133 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
134 
135 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, (
136 		    struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
137 		    struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf,
138 		    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
139 		    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
140 		    spdk_nvme_req_next_sge_cb next_sge_fn), 0);
141 
142 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
143 		size_t *size), 0);
144 
145 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
146 
147 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
148 
149 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
150 
151 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
152 
153 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
154 
155 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
156 
157 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
158 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
161 
162 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
163 		char *name, size_t *size), 0);
164 
165 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
166 	    (struct spdk_nvme_ns *ns), 0);
167 
168 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
169 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
170 
171 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
172 	    (struct spdk_nvme_ns *ns), 0);
173 
174 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
175 	    (struct spdk_nvme_ns *ns), 0);
176 
177 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
178 	    (struct spdk_nvme_ns *ns), 0);
179 
180 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
181 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
182 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
183 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
184 
185 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
186 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
187 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
188 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
189 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
190 
191 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
192 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
193 	     void *payload, uint32_t payload_size, uint64_t slba,
194 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
195 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
196 
197 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
198 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
199 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
200 
201 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
202 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
203 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
204 
205 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
206 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
207 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
208 
209 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
210 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
211 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
212 
213 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
214 
215 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
216 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
217 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
218 
219 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
220 	    (const struct spdk_nvme_status *status), NULL);
221 
222 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
223 	    (const struct spdk_nvme_status *status), NULL);
224 
225 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
226 
227 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
228 
229 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
230 
231 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
232 
233 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
234 
235 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
236 		struct iovec *iov,
237 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
238 DEFINE_STUB(spdk_accel_append_crc32c, int,
239 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst,
240 	     struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx,
241 	     uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
242 DEFINE_STUB_V(spdk_accel_sequence_finish,
243 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
244 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
245 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
246 
247 struct ut_nvme_req {
248 	uint16_t			opc;
249 	spdk_nvme_cmd_cb		cb_fn;
250 	void				*cb_arg;
251 	struct spdk_nvme_cpl		cpl;
252 	TAILQ_ENTRY(ut_nvme_req)	tailq;
253 };
254 
255 struct spdk_nvme_ns {
256 	struct spdk_nvme_ctrlr		*ctrlr;
257 	uint32_t			id;
258 	bool				is_active;
259 	struct spdk_uuid		*uuid;
260 	enum spdk_nvme_ana_state	ana_state;
261 	enum spdk_nvme_csi		csi;
262 };
263 
264 struct spdk_nvme_qpair {
265 	struct spdk_nvme_ctrlr		*ctrlr;
266 	uint8_t				failure_reason;
267 	bool				is_connected;
268 	bool				in_completion_context;
269 	bool				delete_after_completion_context;
270 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
271 	uint32_t			num_outstanding_reqs;
272 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
273 	struct spdk_nvme_poll_group	*poll_group;
274 	void				*poll_group_tailq_head;
275 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
276 };
277 
278 struct spdk_nvme_ctrlr {
279 	uint32_t			num_ns;
280 	struct spdk_nvme_ns		*ns;
281 	struct spdk_nvme_ns_data	*nsdata;
282 	struct spdk_nvme_qpair		adminq;
283 	struct spdk_nvme_ctrlr_data	cdata;
284 	bool				attached;
285 	bool				is_failed;
286 	bool				fail_reset;
287 	bool				is_removed;
288 	struct spdk_nvme_transport_id	trid;
289 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
290 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
291 	struct spdk_nvme_ctrlr_opts	opts;
292 };
293 
294 struct spdk_nvme_poll_group {
295 	void				*ctx;
296 	struct spdk_nvme_accel_fn_table	accel_fn_table;
297 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
298 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
299 };
300 
301 struct spdk_nvme_probe_ctx {
302 	struct spdk_nvme_transport_id	trid;
303 	void				*cb_ctx;
304 	spdk_nvme_attach_cb		attach_cb;
305 	struct spdk_nvme_ctrlr		*init_ctrlr;
306 };
307 
308 uint32_t
309 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
310 {
311 	uint32_t nsid;
312 
313 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
314 		if (ctrlr->ns[nsid - 1].is_active) {
315 			return nsid;
316 		}
317 	}
318 
319 	return 0;
320 }
321 
322 uint32_t
323 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
324 {
325 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
326 		if (ctrlr->ns[nsid - 1].is_active) {
327 			return nsid;
328 		}
329 	}
330 
331 	return 0;
332 }
333 
334 uint32_t
335 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
336 {
337 	return qpair->num_outstanding_reqs;
338 }
339 
340 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
341 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
342 			g_ut_attached_ctrlrs);
343 static int g_ut_attach_ctrlr_status;
344 static size_t g_ut_attach_bdev_count;
345 static int g_ut_register_bdev_status;
346 static struct spdk_bdev *g_ut_registered_bdev;
347 static uint16_t g_ut_cntlid;
348 static struct nvme_path_id g_any_path = {};
349 
350 static void
351 ut_init_trid(struct spdk_nvme_transport_id *trid)
352 {
353 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
354 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
355 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
356 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
357 }
358 
359 static void
360 ut_init_trid2(struct spdk_nvme_transport_id *trid)
361 {
362 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
363 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
364 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
365 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
366 }
367 
368 static void
369 ut_init_trid3(struct spdk_nvme_transport_id *trid)
370 {
371 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
372 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
373 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
374 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
375 }
376 
377 static int
378 cmp_int(int a, int b)
379 {
380 	return a - b;
381 }
382 
383 int
384 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
385 			       const struct spdk_nvme_transport_id *trid2)
386 {
387 	int cmp;
388 
389 	/* We assume trtype is TCP for now. */
390 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
391 
392 	cmp = cmp_int(trid1->trtype, trid2->trtype);
393 	if (cmp) {
394 		return cmp;
395 	}
396 
397 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
398 	if (cmp) {
399 		return cmp;
400 	}
401 
402 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
403 	if (cmp) {
404 		return cmp;
405 	}
406 
407 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
408 	if (cmp) {
409 		return cmp;
410 	}
411 
412 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
413 	if (cmp) {
414 		return cmp;
415 	}
416 
417 	return 0;
418 }
419 
420 static struct spdk_nvme_ctrlr *
421 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
422 		bool ana_reporting, bool multipath)
423 {
424 	struct spdk_nvme_ctrlr *ctrlr;
425 	uint32_t i;
426 
427 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
428 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
429 			/* There is a ctrlr whose trid matches. */
430 			return NULL;
431 		}
432 	}
433 
434 	ctrlr = calloc(1, sizeof(*ctrlr));
435 	if (ctrlr == NULL) {
436 		return NULL;
437 	}
438 
439 	ctrlr->attached = true;
440 	ctrlr->adminq.ctrlr = ctrlr;
441 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
442 	ctrlr->adminq.is_connected = true;
443 
444 	if (num_ns != 0) {
445 		ctrlr->num_ns = num_ns;
446 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
447 		if (ctrlr->ns == NULL) {
448 			free(ctrlr);
449 			return NULL;
450 		}
451 
452 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
453 		if (ctrlr->nsdata == NULL) {
454 			free(ctrlr->ns);
455 			free(ctrlr);
456 			return NULL;
457 		}
458 
459 		for (i = 0; i < num_ns; i++) {
460 			ctrlr->ns[i].id = i + 1;
461 			ctrlr->ns[i].ctrlr = ctrlr;
462 			ctrlr->ns[i].is_active = true;
463 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
464 			ctrlr->nsdata[i].nsze = 1024;
465 			ctrlr->nsdata[i].nmic.can_share = multipath;
466 		}
467 
468 		ctrlr->cdata.nn = num_ns;
469 		ctrlr->cdata.mnan = num_ns;
470 		ctrlr->cdata.nanagrpid = num_ns;
471 	}
472 
473 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
474 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
475 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
476 	ctrlr->trid = *trid;
477 	TAILQ_INIT(&ctrlr->active_io_qpairs);
478 
479 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
480 
481 	return ctrlr;
482 }
483 
484 static void
485 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
486 {
487 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
488 
489 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
490 	free(ctrlr->nsdata);
491 	free(ctrlr->ns);
492 	free(ctrlr);
493 }
494 
495 static int
496 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
497 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
498 {
499 	struct ut_nvme_req *req;
500 
501 	req = calloc(1, sizeof(*req));
502 	if (req == NULL) {
503 		return -ENOMEM;
504 	}
505 
506 	req->opc = opc;
507 	req->cb_fn = cb_fn;
508 	req->cb_arg = cb_arg;
509 
510 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
511 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
512 
513 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
514 	qpair->num_outstanding_reqs++;
515 
516 	return 0;
517 }
518 
519 static struct ut_nvme_req *
520 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
521 {
522 	struct ut_nvme_req *req;
523 
524 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
525 		if (req->cb_arg == cb_arg) {
526 			break;
527 		}
528 	}
529 
530 	return req;
531 }
532 
533 static struct spdk_bdev_io *
534 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
535 		 struct spdk_io_channel *ch)
536 {
537 	struct spdk_bdev_io *bdev_io;
538 
539 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
540 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
541 	bdev_io->type = type;
542 	bdev_io->bdev = &nbdev->disk;
543 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
544 
545 	return bdev_io;
546 }
547 
548 static void
549 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
550 {
551 	bdev_io->u.bdev.iovs = &bdev_io->iov;
552 	bdev_io->u.bdev.iovcnt = 1;
553 
554 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
555 	bdev_io->iov.iov_len = 4096;
556 }
557 
558 static void
559 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
560 {
561 	if (ctrlr->is_failed) {
562 		free(ctrlr);
563 		return;
564 	}
565 
566 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
567 	if (probe_ctx->cb_ctx) {
568 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
569 	}
570 
571 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
572 
573 	if (probe_ctx->attach_cb) {
574 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
575 	}
576 }
577 
578 int
579 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
580 {
581 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
582 
583 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
584 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
585 			continue;
586 		}
587 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
588 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
589 	}
590 
591 	free(probe_ctx);
592 
593 	return 0;
594 }
595 
596 struct spdk_nvme_probe_ctx *
597 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
598 			const struct spdk_nvme_ctrlr_opts *opts,
599 			spdk_nvme_attach_cb attach_cb)
600 {
601 	struct spdk_nvme_probe_ctx *probe_ctx;
602 
603 	if (trid == NULL) {
604 		return NULL;
605 	}
606 
607 	probe_ctx = calloc(1, sizeof(*probe_ctx));
608 	if (probe_ctx == NULL) {
609 		return NULL;
610 	}
611 
612 	probe_ctx->trid = *trid;
613 	probe_ctx->cb_ctx = (void *)opts;
614 	probe_ctx->attach_cb = attach_cb;
615 
616 	return probe_ctx;
617 }
618 
619 int
620 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
621 {
622 	if (ctrlr->attached) {
623 		ut_detach_ctrlr(ctrlr);
624 	}
625 
626 	return 0;
627 }
628 
629 int
630 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
631 {
632 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
633 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
634 
635 	return 0;
636 }
637 
638 int
639 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
640 {
641 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
642 }
643 
644 void
645 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
646 {
647 	memset(opts, 0, opts_size);
648 
649 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
650 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
651 }
652 
653 const struct spdk_nvme_ctrlr_data *
654 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
655 {
656 	return &ctrlr->cdata;
657 }
658 
659 uint32_t
660 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
661 {
662 	return ctrlr->num_ns;
663 }
664 
665 struct spdk_nvme_ns *
666 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
667 {
668 	if (nsid < 1 || nsid > ctrlr->num_ns) {
669 		return NULL;
670 	}
671 
672 	return &ctrlr->ns[nsid - 1];
673 }
674 
675 bool
676 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
677 {
678 	if (nsid < 1 || nsid > ctrlr->num_ns) {
679 		return false;
680 	}
681 
682 	return ctrlr->ns[nsid - 1].is_active;
683 }
684 
685 union spdk_nvme_csts_register
686 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
687 {
688 	union spdk_nvme_csts_register csts;
689 
690 	csts.raw = 0;
691 
692 	return csts;
693 }
694 
695 union spdk_nvme_vs_register
696 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
697 {
698 	union spdk_nvme_vs_register vs;
699 
700 	vs.raw = 0;
701 
702 	return vs;
703 }
704 
705 struct spdk_nvme_qpair *
706 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
707 			       const struct spdk_nvme_io_qpair_opts *user_opts,
708 			       size_t opts_size)
709 {
710 	struct spdk_nvme_qpair *qpair;
711 
712 	qpair = calloc(1, sizeof(*qpair));
713 	if (qpair == NULL) {
714 		return NULL;
715 	}
716 
717 	qpair->ctrlr = ctrlr;
718 	TAILQ_INIT(&qpair->outstanding_reqs);
719 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
720 
721 	return qpair;
722 }
723 
724 static void
725 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
726 {
727 	struct spdk_nvme_poll_group *group = qpair->poll_group;
728 
729 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
730 
731 	qpair->poll_group_tailq_head = &group->connected_qpairs;
732 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
733 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
734 }
735 
736 static void
737 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
738 {
739 	struct spdk_nvme_poll_group *group = qpair->poll_group;
740 
741 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
742 
743 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
744 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
745 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
746 }
747 
748 int
749 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
750 				 struct spdk_nvme_qpair *qpair)
751 {
752 	if (qpair->is_connected) {
753 		return -EISCONN;
754 	}
755 
756 	qpair->is_connected = true;
757 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
758 
759 	if (qpair->poll_group) {
760 		nvme_poll_group_connect_qpair(qpair);
761 	}
762 
763 	return 0;
764 }
765 
766 void
767 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
768 {
769 	if (!qpair->is_connected) {
770 		return;
771 	}
772 
773 	qpair->is_connected = false;
774 
775 	if (qpair->poll_group != NULL) {
776 		nvme_poll_group_disconnect_qpair(qpair);
777 	}
778 }
779 
780 int
781 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
782 {
783 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
784 
785 	if (qpair->in_completion_context) {
786 		qpair->delete_after_completion_context = true;
787 		return 0;
788 	}
789 
790 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
791 
792 	if (qpair->poll_group != NULL) {
793 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
794 	}
795 
796 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
797 
798 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
799 
800 	free(qpair);
801 
802 	return 0;
803 }
804 
805 int
806 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
807 {
808 	if (ctrlr->fail_reset) {
809 		ctrlr->is_failed = true;
810 		return -EIO;
811 	}
812 
813 	ctrlr->adminq.is_connected = true;
814 	return 0;
815 }
816 
817 void
818 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
819 {
820 }
821 
822 int
823 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
824 {
825 	if (ctrlr->is_removed) {
826 		return -ENXIO;
827 	}
828 
829 	ctrlr->adminq.is_connected = false;
830 	ctrlr->is_failed = false;
831 
832 	return 0;
833 }
834 
835 void
836 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
837 {
838 	ctrlr->is_failed = true;
839 }
840 
841 bool
842 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
843 {
844 	return ctrlr->is_failed;
845 }
846 
847 spdk_nvme_qp_failure_reason
848 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
849 {
850 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
851 }
852 
853 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
854 				 sizeof(uint32_t))
855 static void
856 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
857 {
858 	struct spdk_nvme_ana_page ana_hdr;
859 	char _ana_desc[UT_ANA_DESC_SIZE];
860 	struct spdk_nvme_ana_group_descriptor *ana_desc;
861 	struct spdk_nvme_ns *ns;
862 	uint32_t i;
863 
864 	memset(&ana_hdr, 0, sizeof(ana_hdr));
865 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
866 
867 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
868 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
869 
870 	buf += sizeof(ana_hdr);
871 	length -= sizeof(ana_hdr);
872 
873 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
874 
875 	for (i = 0; i < ctrlr->num_ns; i++) {
876 		ns = &ctrlr->ns[i];
877 
878 		if (!ns->is_active) {
879 			continue;
880 		}
881 
882 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
883 
884 		ana_desc->ana_group_id = ns->id;
885 		ana_desc->num_of_nsid = 1;
886 		ana_desc->ana_state = ns->ana_state;
887 		ana_desc->nsid[0] = ns->id;
888 
889 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
890 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
891 
892 		buf += UT_ANA_DESC_SIZE;
893 		length -= UT_ANA_DESC_SIZE;
894 	}
895 }
896 
897 int
898 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
899 				 uint8_t log_page, uint32_t nsid,
900 				 void *payload, uint32_t payload_size,
901 				 uint64_t offset,
902 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
903 {
904 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
905 		SPDK_CU_ASSERT_FATAL(offset == 0);
906 		ut_create_ana_log_page(ctrlr, payload, payload_size);
907 	}
908 
909 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
910 				      cb_fn, cb_arg);
911 }
912 
913 int
914 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
915 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
916 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
917 {
918 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
919 }
920 
921 int
922 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
923 			      void *cmd_cb_arg,
924 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
925 {
926 	struct ut_nvme_req *req = NULL, *abort_req;
927 
928 	if (qpair == NULL) {
929 		qpair = &ctrlr->adminq;
930 	}
931 
932 	abort_req = calloc(1, sizeof(*abort_req));
933 	if (abort_req == NULL) {
934 		return -ENOMEM;
935 	}
936 
937 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
938 		if (req->cb_arg == cmd_cb_arg) {
939 			break;
940 		}
941 	}
942 
943 	if (req == NULL) {
944 		free(abort_req);
945 		return -ENOENT;
946 	}
947 
948 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
949 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
950 
951 	abort_req->opc = SPDK_NVME_OPC_ABORT;
952 	abort_req->cb_fn = cb_fn;
953 	abort_req->cb_arg = cb_arg;
954 
955 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
956 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
957 	abort_req->cpl.cdw0 = 0;
958 
959 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
960 	ctrlr->adminq.num_outstanding_reqs++;
961 
962 	return 0;
963 }
964 
965 int32_t
966 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
967 {
968 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
969 }
970 
971 uint32_t
972 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
973 {
974 	return ns->id;
975 }
976 
977 struct spdk_nvme_ctrlr *
978 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
979 {
980 	return ns->ctrlr;
981 }
982 
983 static inline struct spdk_nvme_ns_data *
984 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
985 {
986 	return &ns->ctrlr->nsdata[ns->id - 1];
987 }
988 
989 const struct spdk_nvme_ns_data *
990 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
991 {
992 	return _nvme_ns_get_data(ns);
993 }
994 
995 uint64_t
996 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
997 {
998 	return _nvme_ns_get_data(ns)->nsze;
999 }
1000 
1001 const struct spdk_uuid *
1002 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
1003 {
1004 	return ns->uuid;
1005 }
1006 
1007 enum spdk_nvme_csi
1008 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
1009 	return ns->csi;
1010 }
1011 
1012 int
1013 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
1014 			      void *metadata, uint64_t lba, uint32_t lba_count,
1015 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1016 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1017 {
1018 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1019 }
1020 
1021 int
1022 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1023 			       void *buffer, void *metadata, uint64_t lba,
1024 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1025 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1026 {
1027 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1028 }
1029 
1030 int
1031 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1032 			       uint64_t lba, uint32_t lba_count,
1033 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1034 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1035 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1036 			       uint16_t apptag_mask, uint16_t apptag)
1037 {
1038 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1039 }
1040 
1041 int
1042 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1043 				uint64_t lba, uint32_t lba_count,
1044 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1045 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1046 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1047 				uint16_t apptag_mask, uint16_t apptag)
1048 {
1049 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1050 }
1051 
1052 static bool g_ut_readv_ext_called;
1053 int
1054 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1055 			   uint64_t lba, uint32_t lba_count,
1056 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1057 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1058 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1059 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1060 {
1061 	g_ut_readv_ext_called = true;
1062 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1063 }
1064 
1065 static bool g_ut_writev_ext_called;
1066 int
1067 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1068 			    uint64_t lba, uint32_t lba_count,
1069 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1070 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1071 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1072 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1073 {
1074 	g_ut_writev_ext_called = true;
1075 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1076 }
1077 
1078 int
1079 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1080 				  uint64_t lba, uint32_t lba_count,
1081 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1082 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1083 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1084 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1085 {
1086 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1087 }
1088 
1089 int
1090 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1091 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1092 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1093 {
1094 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1095 }
1096 
1097 int
1098 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1099 			      uint64_t lba, uint32_t lba_count,
1100 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1101 			      uint32_t io_flags)
1102 {
1103 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1104 }
1105 
1106 int
1107 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1108 		      const struct spdk_nvme_scc_source_range *ranges,
1109 		      uint16_t num_ranges, uint64_t dest_lba,
1110 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1111 {
1112 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1113 }
1114 
1115 struct spdk_nvme_poll_group *
1116 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1117 {
1118 	struct spdk_nvme_poll_group *group;
1119 
1120 	group = calloc(1, sizeof(*group));
1121 	if (group == NULL) {
1122 		return NULL;
1123 	}
1124 
1125 	group->ctx = ctx;
1126 	if (table != NULL) {
1127 		group->accel_fn_table = *table;
1128 	}
1129 	TAILQ_INIT(&group->connected_qpairs);
1130 	TAILQ_INIT(&group->disconnected_qpairs);
1131 
1132 	return group;
1133 }
1134 
1135 int
1136 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1137 {
1138 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1139 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1140 		return -EBUSY;
1141 	}
1142 
1143 	free(group);
1144 
1145 	return 0;
1146 }
1147 
1148 spdk_nvme_qp_failure_reason
1149 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1150 {
1151 	return qpair->failure_reason;
1152 }
1153 
1154 bool
1155 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair)
1156 {
1157 	return qpair->is_connected;
1158 }
1159 
1160 int32_t
1161 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1162 				    uint32_t max_completions)
1163 {
1164 	struct ut_nvme_req *req, *tmp;
1165 	uint32_t num_completions = 0;
1166 
1167 	if (!qpair->is_connected) {
1168 		return -ENXIO;
1169 	}
1170 
1171 	qpair->in_completion_context = true;
1172 
1173 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1174 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1175 		qpair->num_outstanding_reqs--;
1176 
1177 		req->cb_fn(req->cb_arg, &req->cpl);
1178 
1179 		free(req);
1180 		num_completions++;
1181 	}
1182 
1183 	qpair->in_completion_context = false;
1184 	if (qpair->delete_after_completion_context) {
1185 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1186 	}
1187 
1188 	return num_completions;
1189 }
1190 
1191 int64_t
1192 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1193 		uint32_t completions_per_qpair,
1194 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1195 {
1196 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1197 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1198 
1199 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1200 
1201 	if (disconnected_qpair_cb == NULL) {
1202 		return -EINVAL;
1203 	}
1204 
1205 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1206 		disconnected_qpair_cb(qpair, group->ctx);
1207 	}
1208 
1209 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1210 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1211 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1212 			/* Bump the number of completions so this counts as "busy" */
1213 			num_completions++;
1214 			continue;
1215 		}
1216 
1217 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1218 				    completions_per_qpair);
1219 		if (local_completions < 0 && error_reason == 0) {
1220 			error_reason = local_completions;
1221 		} else {
1222 			num_completions += local_completions;
1223 			assert(num_completions >= 0);
1224 		}
1225 	}
1226 
1227 	return error_reason ? error_reason : num_completions;
1228 }
1229 
1230 int
1231 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1232 			 struct spdk_nvme_qpair *qpair)
1233 {
1234 	CU_ASSERT(!qpair->is_connected);
1235 
1236 	qpair->poll_group = group;
1237 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1238 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1239 
1240 	return 0;
1241 }
1242 
1243 int
1244 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1245 			    struct spdk_nvme_qpair *qpair)
1246 {
1247 	CU_ASSERT(!qpair->is_connected);
1248 
1249 	if (qpair->poll_group == NULL) {
1250 		return -ENOENT;
1251 	}
1252 
1253 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1254 
1255 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1256 
1257 	qpair->poll_group = NULL;
1258 	qpair->poll_group_tailq_head = NULL;
1259 
1260 	return 0;
1261 }
1262 
1263 int
1264 spdk_bdev_register(struct spdk_bdev *bdev)
1265 {
1266 	g_ut_registered_bdev = bdev;
1267 
1268 	return g_ut_register_bdev_status;
1269 }
1270 
1271 void
1272 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1273 {
1274 	int rc;
1275 
1276 	rc = bdev->fn_table->destruct(bdev->ctxt);
1277 
1278 	if (bdev == g_ut_registered_bdev) {
1279 		g_ut_registered_bdev = NULL;
1280 	}
1281 
1282 	if (rc <= 0 && cb_fn != NULL) {
1283 		cb_fn(cb_arg, rc);
1284 	}
1285 }
1286 
1287 int
1288 spdk_bdev_open_ext(const char *bdev_name, bool write,
1289 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1290 		   struct spdk_bdev_desc **desc)
1291 {
1292 	if (g_ut_registered_bdev == NULL ||
1293 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1294 		return -ENODEV;
1295 	}
1296 
1297 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1298 
1299 	return 0;
1300 }
1301 
1302 struct spdk_bdev *
1303 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1304 {
1305 	return (struct spdk_bdev *)desc;
1306 }
1307 
1308 int
1309 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1310 {
1311 	bdev->blockcnt = size;
1312 
1313 	return 0;
1314 }
1315 
1316 struct spdk_io_channel *
1317 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1318 {
1319 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1320 }
1321 
1322 struct spdk_thread *
1323 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
1324 {
1325 	return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io));
1326 }
1327 
1328 void
1329 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1330 {
1331 	bdev_io->internal.status = status;
1332 	bdev_io->internal.in_submit_request = false;
1333 }
1334 
1335 void
1336 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1337 {
1338 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1339 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1340 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1341 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1342 	} else {
1343 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1344 	}
1345 
1346 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1347 	bdev_io->internal.error.nvme.sct = sct;
1348 	bdev_io->internal.error.nvme.sc = sc;
1349 
1350 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1351 }
1352 
1353 void
1354 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1355 {
1356 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1357 
1358 	ut_bdev_io_set_buf(bdev_io);
1359 
1360 	cb(ch, bdev_io, true);
1361 }
1362 
1363 static void
1364 test_create_ctrlr(void)
1365 {
1366 	struct spdk_nvme_transport_id trid = {};
1367 	struct spdk_nvme_ctrlr ctrlr = {};
1368 	int rc;
1369 
1370 	ut_init_trid(&trid);
1371 
1372 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1373 	CU_ASSERT(rc == 0);
1374 
1375 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1376 
1377 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1378 	CU_ASSERT(rc == 0);
1379 
1380 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1381 
1382 	poll_threads();
1383 	spdk_delay_us(1000);
1384 	poll_threads();
1385 
1386 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1387 }
1388 
1389 static void
1390 ut_check_hotplug_on_reset(void *cb_arg, int rc)
1391 {
1392 	bool *detect_remove = cb_arg;
1393 
1394 	CU_ASSERT(rc != 0);
1395 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1396 
1397 	*detect_remove = true;
1398 }
1399 
1400 static void
1401 test_reset_ctrlr(void)
1402 {
1403 	struct spdk_nvme_transport_id trid = {};
1404 	struct spdk_nvme_ctrlr ctrlr = {};
1405 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1406 	struct nvme_path_id *curr_trid;
1407 	struct spdk_io_channel *ch1, *ch2;
1408 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1409 	bool detect_remove;
1410 	int rc;
1411 
1412 	ut_init_trid(&trid);
1413 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1414 
1415 	set_thread(0);
1416 
1417 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1418 	CU_ASSERT(rc == 0);
1419 
1420 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1421 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1422 
1423 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1424 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1425 
1426 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1427 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1428 
1429 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1430 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1431 
1432 	set_thread(1);
1433 
1434 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1435 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1436 
1437 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1438 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1439 
1440 	/* Reset starts from thread 1. */
1441 	set_thread(1);
1442 
1443 	/* Case 1: ctrlr is already being destructed. */
1444 	nvme_ctrlr->destruct = true;
1445 
1446 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1447 	CU_ASSERT(rc == -ENXIO);
1448 
1449 	/* Case 2: reset is in progress. */
1450 	nvme_ctrlr->destruct = false;
1451 	nvme_ctrlr->resetting = true;
1452 
1453 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1454 	CU_ASSERT(rc == -EBUSY);
1455 
1456 	/* Case 3: reset completes successfully. */
1457 	nvme_ctrlr->resetting = false;
1458 	curr_trid->last_failed_tsc = spdk_get_ticks();
1459 	ctrlr.is_failed = true;
1460 
1461 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1462 	CU_ASSERT(rc == 0);
1463 	CU_ASSERT(nvme_ctrlr->resetting == true);
1464 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1465 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1466 
1467 	poll_thread_times(0, 3);
1468 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1469 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1470 
1471 	poll_thread_times(0, 1);
1472 	poll_thread_times(1, 1);
1473 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1474 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1475 	CU_ASSERT(ctrlr.is_failed == true);
1476 
1477 	poll_thread_times(1, 1);
1478 	poll_thread_times(0, 1);
1479 	CU_ASSERT(ctrlr.is_failed == false);
1480 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1481 
1482 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1483 	poll_thread_times(0, 2);
1484 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1485 
1486 	poll_thread_times(0, 1);
1487 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1488 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1489 
1490 	poll_thread_times(1, 1);
1491 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1492 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1493 	CU_ASSERT(nvme_ctrlr->resetting == true);
1494 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1495 
1496 	poll_thread_times(0, 2);
1497 	CU_ASSERT(nvme_ctrlr->resetting == true);
1498 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1499 	poll_thread_times(1, 1);
1500 	CU_ASSERT(nvme_ctrlr->resetting == true);
1501 	poll_thread_times(0, 1);
1502 	CU_ASSERT(nvme_ctrlr->resetting == false);
1503 
1504 	/* Case 4: ctrlr is already removed. */
1505 	ctrlr.is_removed = true;
1506 
1507 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1508 	CU_ASSERT(rc == 0);
1509 
1510 	detect_remove = false;
1511 	nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset;
1512 	nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove;
1513 
1514 	poll_threads();
1515 
1516 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
1517 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
1518 	CU_ASSERT(detect_remove == true);
1519 
1520 	ctrlr.is_removed = false;
1521 
1522 	spdk_put_io_channel(ch2);
1523 
1524 	set_thread(0);
1525 
1526 	spdk_put_io_channel(ch1);
1527 
1528 	poll_threads();
1529 
1530 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1531 	CU_ASSERT(rc == 0);
1532 
1533 	poll_threads();
1534 	spdk_delay_us(1000);
1535 	poll_threads();
1536 
1537 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1538 }
1539 
1540 static void
1541 test_race_between_reset_and_destruct_ctrlr(void)
1542 {
1543 	struct spdk_nvme_transport_id trid = {};
1544 	struct spdk_nvme_ctrlr ctrlr = {};
1545 	struct nvme_ctrlr *nvme_ctrlr;
1546 	struct spdk_io_channel *ch1, *ch2;
1547 	int rc;
1548 
1549 	ut_init_trid(&trid);
1550 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1551 
1552 	set_thread(0);
1553 
1554 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1555 	CU_ASSERT(rc == 0);
1556 
1557 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1558 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1559 
1560 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1561 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1562 
1563 	set_thread(1);
1564 
1565 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1566 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1567 
1568 	/* Reset starts from thread 1. */
1569 	set_thread(1);
1570 
1571 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1572 	CU_ASSERT(rc == 0);
1573 	CU_ASSERT(nvme_ctrlr->resetting == true);
1574 
1575 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1576 	set_thread(0);
1577 
1578 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1579 	CU_ASSERT(rc == 0);
1580 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1581 	CU_ASSERT(nvme_ctrlr->destruct == true);
1582 	CU_ASSERT(nvme_ctrlr->resetting == true);
1583 
1584 	poll_threads();
1585 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1586 	poll_threads();
1587 
1588 	/* Reset completed but ctrlr is not still destructed yet. */
1589 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1590 	CU_ASSERT(nvme_ctrlr->destruct == true);
1591 	CU_ASSERT(nvme_ctrlr->resetting == false);
1592 
1593 	/* New reset request is rejected. */
1594 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1595 	CU_ASSERT(rc == -ENXIO);
1596 
1597 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1598 	 * However there are two channels and destruct is not completed yet.
1599 	 */
1600 	poll_threads();
1601 
1602 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1603 
1604 	set_thread(0);
1605 
1606 	spdk_put_io_channel(ch1);
1607 
1608 	set_thread(1);
1609 
1610 	spdk_put_io_channel(ch2);
1611 
1612 	poll_threads();
1613 	spdk_delay_us(1000);
1614 	poll_threads();
1615 
1616 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1617 }
1618 
1619 static void
1620 test_failover_ctrlr(void)
1621 {
1622 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1623 	struct spdk_nvme_ctrlr ctrlr = {};
1624 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1625 	struct nvme_path_id *curr_trid, *next_trid;
1626 	struct spdk_io_channel *ch1, *ch2;
1627 	int rc;
1628 
1629 	ut_init_trid(&trid1);
1630 	ut_init_trid2(&trid2);
1631 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1632 
1633 	set_thread(0);
1634 
1635 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1636 	CU_ASSERT(rc == 0);
1637 
1638 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1639 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1640 
1641 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1642 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1643 
1644 	set_thread(1);
1645 
1646 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1647 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1648 
1649 	/* First, test one trid case. */
1650 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1651 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1652 
1653 	/* Failover starts from thread 1. */
1654 	set_thread(1);
1655 
1656 	/* Case 1: ctrlr is already being destructed. */
1657 	nvme_ctrlr->destruct = true;
1658 
1659 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1660 	CU_ASSERT(rc == -ENXIO);
1661 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1662 
1663 	/* Case 2: reset is in progress. */
1664 	nvme_ctrlr->destruct = false;
1665 	nvme_ctrlr->resetting = true;
1666 
1667 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1668 	CU_ASSERT(rc == -EINPROGRESS);
1669 
1670 	/* Case 3: reset completes successfully. */
1671 	nvme_ctrlr->resetting = false;
1672 
1673 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1674 	CU_ASSERT(rc == 0);
1675 
1676 	CU_ASSERT(nvme_ctrlr->resetting == true);
1677 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1678 
1679 	poll_threads();
1680 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1681 	poll_threads();
1682 
1683 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1684 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1685 
1686 	CU_ASSERT(nvme_ctrlr->resetting == false);
1687 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1688 
1689 	set_thread(0);
1690 
1691 	/* Second, test two trids case. */
1692 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1693 	CU_ASSERT(rc == 0);
1694 
1695 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1696 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1697 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1698 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1699 
1700 	/* Failover starts from thread 1. */
1701 	set_thread(1);
1702 
1703 	/* Case 4: reset is in progress. */
1704 	nvme_ctrlr->resetting = true;
1705 
1706 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1707 	CU_ASSERT(rc == -EINPROGRESS);
1708 
1709 	/* Case 5: failover completes successfully. */
1710 	nvme_ctrlr->resetting = false;
1711 
1712 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1713 	CU_ASSERT(rc == 0);
1714 
1715 	CU_ASSERT(nvme_ctrlr->resetting == true);
1716 
1717 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1718 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1719 	CU_ASSERT(next_trid != curr_trid);
1720 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1721 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1722 
1723 	poll_threads();
1724 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1725 	poll_threads();
1726 
1727 	CU_ASSERT(nvme_ctrlr->resetting == false);
1728 
1729 	spdk_put_io_channel(ch2);
1730 
1731 	set_thread(0);
1732 
1733 	spdk_put_io_channel(ch1);
1734 
1735 	poll_threads();
1736 
1737 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1738 	CU_ASSERT(rc == 0);
1739 
1740 	poll_threads();
1741 	spdk_delay_us(1000);
1742 	poll_threads();
1743 
1744 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1745 }
1746 
1747 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1748  *
1749  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1750  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1751  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1752  * have been active, i.e., the head of the list until the failover completed.
1753  * However trid3 was inserted to the head of the list by mistake.
1754  *
1755  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1756  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1757  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1758  * may be executed repeatedly before failover is executed. Hence this bug is real.
1759  *
1760  * The following test verifies the fix.
1761  */
1762 static void
1763 test_race_between_failover_and_add_secondary_trid(void)
1764 {
1765 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1766 	struct spdk_nvme_ctrlr ctrlr = {};
1767 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1768 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1769 	struct spdk_io_channel *ch1, *ch2;
1770 	int rc;
1771 
1772 	ut_init_trid(&trid1);
1773 	ut_init_trid2(&trid2);
1774 	ut_init_trid3(&trid3);
1775 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1776 
1777 	set_thread(0);
1778 
1779 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1780 	CU_ASSERT(rc == 0);
1781 
1782 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1783 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1784 
1785 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1786 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1787 
1788 	set_thread(1);
1789 
1790 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1791 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1792 
1793 	set_thread(0);
1794 
1795 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1796 	CU_ASSERT(rc == 0);
1797 
1798 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1799 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1800 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1801 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1802 	path_id2 = TAILQ_NEXT(path_id1, link);
1803 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1804 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1805 
1806 	ctrlr.fail_reset = true;
1807 
1808 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1809 	CU_ASSERT(rc == 0);
1810 
1811 	poll_threads();
1812 
1813 	CU_ASSERT(path_id1->last_failed_tsc != 0);
1814 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1815 
1816 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1817 	CU_ASSERT(rc == 0);
1818 
1819 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1820 	CU_ASSERT(rc == 0);
1821 
1822 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1823 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1824 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1825 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1826 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1827 	path_id3 = TAILQ_NEXT(path_id2, link);
1828 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1829 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1830 
1831 	poll_threads();
1832 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1833 	poll_threads();
1834 
1835 	spdk_put_io_channel(ch1);
1836 
1837 	set_thread(1);
1838 
1839 	spdk_put_io_channel(ch2);
1840 
1841 	poll_threads();
1842 
1843 	set_thread(0);
1844 
1845 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1846 	CU_ASSERT(rc == 0);
1847 
1848 	poll_threads();
1849 	spdk_delay_us(1000);
1850 	poll_threads();
1851 
1852 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1853 }
1854 
1855 static void
1856 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1857 {
1858 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1859 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1860 }
1861 
1862 static void
1863 test_pending_reset(void)
1864 {
1865 	struct spdk_nvme_transport_id trid = {};
1866 	struct spdk_nvme_ctrlr *ctrlr;
1867 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1868 	const int STRING_SIZE = 32;
1869 	const char *attached_names[STRING_SIZE];
1870 	struct nvme_bdev *bdev;
1871 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1872 	struct spdk_io_channel *ch1, *ch2;
1873 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1874 	struct nvme_io_path *io_path1, *io_path2;
1875 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1876 	int rc;
1877 
1878 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1879 	ut_init_trid(&trid);
1880 
1881 	set_thread(0);
1882 
1883 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1884 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1885 
1886 	g_ut_attach_ctrlr_status = 0;
1887 	g_ut_attach_bdev_count = 1;
1888 
1889 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1890 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1891 	CU_ASSERT(rc == 0);
1892 
1893 	spdk_delay_us(1000);
1894 	poll_threads();
1895 
1896 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1897 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1898 
1899 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1900 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1901 
1902 	ch1 = spdk_get_io_channel(bdev);
1903 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1904 
1905 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1906 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1907 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1908 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1909 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1910 
1911 	set_thread(1);
1912 
1913 	ch2 = spdk_get_io_channel(bdev);
1914 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1915 
1916 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1917 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1918 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1919 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1920 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1921 
1922 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1923 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1924 
1925 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1926 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1927 
1928 	/* The first reset request is submitted on thread 1, and the second reset request
1929 	 * is submitted on thread 0 while processing the first request.
1930 	 */
1931 	bdev_nvme_submit_request(ch2, first_bdev_io);
1932 	CU_ASSERT(nvme_ctrlr->resetting == true);
1933 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1934 
1935 	set_thread(0);
1936 
1937 	bdev_nvme_submit_request(ch1, second_bdev_io);
1938 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1939 
1940 	poll_threads();
1941 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1942 	poll_threads();
1943 
1944 	CU_ASSERT(nvme_ctrlr->resetting == false);
1945 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1946 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1947 
1948 	/* The first reset request is submitted on thread 1, and the second reset request
1949 	 * is submitted on thread 0 while processing the first request.
1950 	 *
1951 	 * The difference from the above scenario is that the controller is removed while
1952 	 * processing the first request. Hence both reset requests should fail.
1953 	 */
1954 	set_thread(1);
1955 
1956 	bdev_nvme_submit_request(ch2, first_bdev_io);
1957 	CU_ASSERT(nvme_ctrlr->resetting == true);
1958 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1959 
1960 	set_thread(0);
1961 
1962 	bdev_nvme_submit_request(ch1, second_bdev_io);
1963 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1964 
1965 	ctrlr->fail_reset = true;
1966 
1967 	poll_threads();
1968 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1969 	poll_threads();
1970 
1971 	CU_ASSERT(nvme_ctrlr->resetting == false);
1972 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1973 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1974 
1975 	spdk_put_io_channel(ch1);
1976 
1977 	set_thread(1);
1978 
1979 	spdk_put_io_channel(ch2);
1980 
1981 	poll_threads();
1982 
1983 	set_thread(0);
1984 
1985 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
1986 	CU_ASSERT(rc == 0);
1987 
1988 	poll_threads();
1989 	spdk_delay_us(1000);
1990 	poll_threads();
1991 
1992 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1993 
1994 	free(first_bdev_io);
1995 	free(second_bdev_io);
1996 }
1997 
1998 static void
1999 test_attach_ctrlr(void)
2000 {
2001 	struct spdk_nvme_transport_id trid = {};
2002 	struct spdk_nvme_ctrlr *ctrlr;
2003 	struct nvme_ctrlr *nvme_ctrlr;
2004 	const int STRING_SIZE = 32;
2005 	const char *attached_names[STRING_SIZE];
2006 	struct nvme_bdev *nbdev;
2007 	int rc;
2008 
2009 	set_thread(0);
2010 
2011 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2012 	ut_init_trid(&trid);
2013 
2014 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
2015 	 * by probe polling.
2016 	 */
2017 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2018 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2019 
2020 	ctrlr->is_failed = true;
2021 	g_ut_attach_ctrlr_status = -EIO;
2022 	g_ut_attach_bdev_count = 0;
2023 
2024 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2025 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2026 	CU_ASSERT(rc == 0);
2027 
2028 	spdk_delay_us(1000);
2029 	poll_threads();
2030 
2031 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2032 
2033 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2034 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2035 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2036 
2037 	g_ut_attach_ctrlr_status = 0;
2038 
2039 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2040 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2041 	CU_ASSERT(rc == 0);
2042 
2043 	spdk_delay_us(1000);
2044 	poll_threads();
2045 
2046 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2047 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2048 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2049 
2050 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2051 	CU_ASSERT(rc == 0);
2052 
2053 	poll_threads();
2054 	spdk_delay_us(1000);
2055 	poll_threads();
2056 
2057 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2058 
2059 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2060 	 * one nvme_bdev is created.
2061 	 */
2062 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2063 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2064 
2065 	g_ut_attach_bdev_count = 1;
2066 
2067 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2068 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2069 	CU_ASSERT(rc == 0);
2070 
2071 	spdk_delay_us(1000);
2072 	poll_threads();
2073 
2074 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2075 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2076 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2077 
2078 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2079 	attached_names[0] = NULL;
2080 
2081 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2082 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2083 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2084 
2085 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2086 	CU_ASSERT(rc == 0);
2087 
2088 	poll_threads();
2089 	spdk_delay_us(1000);
2090 	poll_threads();
2091 
2092 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2093 
2094 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2095 	 * created because creating one nvme_bdev failed.
2096 	 */
2097 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2098 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2099 
2100 	g_ut_register_bdev_status = -EINVAL;
2101 	g_ut_attach_bdev_count = 0;
2102 
2103 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2104 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2105 	CU_ASSERT(rc == 0);
2106 
2107 	spdk_delay_us(1000);
2108 	poll_threads();
2109 
2110 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2111 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2112 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2113 
2114 	CU_ASSERT(attached_names[0] == NULL);
2115 
2116 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2117 	CU_ASSERT(rc == 0);
2118 
2119 	poll_threads();
2120 	spdk_delay_us(1000);
2121 	poll_threads();
2122 
2123 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2124 
2125 	g_ut_register_bdev_status = 0;
2126 }
2127 
2128 static void
2129 test_aer_cb(void)
2130 {
2131 	struct spdk_nvme_transport_id trid = {};
2132 	struct spdk_nvme_ctrlr *ctrlr;
2133 	struct nvme_ctrlr *nvme_ctrlr;
2134 	struct nvme_bdev *bdev;
2135 	const int STRING_SIZE = 32;
2136 	const char *attached_names[STRING_SIZE];
2137 	union spdk_nvme_async_event_completion event = {};
2138 	struct spdk_nvme_cpl cpl = {};
2139 	int rc;
2140 
2141 	set_thread(0);
2142 
2143 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2144 	ut_init_trid(&trid);
2145 
2146 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2147 	 * namespaces are populated.
2148 	 */
2149 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2150 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2151 
2152 	ctrlr->ns[0].is_active = false;
2153 
2154 	g_ut_attach_ctrlr_status = 0;
2155 	g_ut_attach_bdev_count = 3;
2156 
2157 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2158 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2159 	CU_ASSERT(rc == 0);
2160 
2161 	spdk_delay_us(1000);
2162 	poll_threads();
2163 
2164 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2165 	poll_threads();
2166 
2167 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2168 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2169 
2170 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2171 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2172 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2173 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2174 
2175 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2176 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2177 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2178 
2179 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2180 	 * change the size of the 4th namespace.
2181 	 */
2182 	ctrlr->ns[0].is_active = true;
2183 	ctrlr->ns[2].is_active = false;
2184 	ctrlr->nsdata[3].nsze = 2048;
2185 
2186 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2187 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2188 	cpl.cdw0 = event.raw;
2189 
2190 	aer_cb(nvme_ctrlr, &cpl);
2191 
2192 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2193 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2194 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2195 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2196 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2197 
2198 	/* Change ANA state of active namespaces. */
2199 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2200 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2201 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2202 
2203 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2204 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2205 	cpl.cdw0 = event.raw;
2206 
2207 	aer_cb(nvme_ctrlr, &cpl);
2208 
2209 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2210 	poll_threads();
2211 
2212 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2213 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2214 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2215 
2216 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2217 	CU_ASSERT(rc == 0);
2218 
2219 	poll_threads();
2220 	spdk_delay_us(1000);
2221 	poll_threads();
2222 
2223 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2224 }
2225 
2226 static void
2227 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2228 			enum spdk_bdev_io_type io_type)
2229 {
2230 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2231 	struct nvme_io_path *io_path;
2232 	struct spdk_nvme_qpair *qpair;
2233 
2234 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2235 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2236 	qpair = io_path->qpair->qpair;
2237 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2238 
2239 	bdev_io->type = io_type;
2240 	bdev_io->internal.in_submit_request = true;
2241 
2242 	bdev_nvme_submit_request(ch, bdev_io);
2243 
2244 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2245 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2246 
2247 	poll_threads();
2248 
2249 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2250 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2251 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2252 }
2253 
2254 static void
2255 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2256 		   enum spdk_bdev_io_type io_type)
2257 {
2258 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2259 	struct nvme_io_path *io_path;
2260 	struct spdk_nvme_qpair *qpair;
2261 
2262 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2263 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2264 	qpair = io_path->qpair->qpair;
2265 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2266 
2267 	bdev_io->type = io_type;
2268 	bdev_io->internal.in_submit_request = true;
2269 
2270 	bdev_nvme_submit_request(ch, bdev_io);
2271 
2272 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2273 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2274 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2275 }
2276 
2277 static void
2278 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2279 {
2280 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2281 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2282 	struct ut_nvme_req *req;
2283 	struct nvme_io_path *io_path;
2284 	struct spdk_nvme_qpair *qpair;
2285 
2286 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2287 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2288 	qpair = io_path->qpair->qpair;
2289 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2290 
2291 	/* Only compare and write now. */
2292 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2293 	bdev_io->internal.in_submit_request = true;
2294 
2295 	bdev_nvme_submit_request(ch, bdev_io);
2296 
2297 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2298 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2299 	CU_ASSERT(bio->first_fused_submitted == true);
2300 
2301 	/* First outstanding request is compare operation. */
2302 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2303 	SPDK_CU_ASSERT_FATAL(req != NULL);
2304 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2305 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2306 
2307 	poll_threads();
2308 
2309 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2310 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2311 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2312 }
2313 
2314 static void
2315 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2316 			 struct spdk_nvme_ctrlr *ctrlr)
2317 {
2318 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2319 	bdev_io->internal.in_submit_request = true;
2320 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2321 
2322 	bdev_nvme_submit_request(ch, bdev_io);
2323 
2324 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2325 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2326 
2327 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2328 	poll_thread_times(1, 1);
2329 
2330 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2331 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2332 
2333 	poll_thread_times(0, 1);
2334 
2335 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2336 }
2337 
2338 static void
2339 test_submit_nvme_cmd(void)
2340 {
2341 	struct spdk_nvme_transport_id trid = {};
2342 	struct spdk_nvme_ctrlr *ctrlr;
2343 	struct nvme_ctrlr *nvme_ctrlr;
2344 	const int STRING_SIZE = 32;
2345 	const char *attached_names[STRING_SIZE];
2346 	struct nvme_bdev *bdev;
2347 	struct spdk_bdev_io *bdev_io;
2348 	struct spdk_io_channel *ch;
2349 	int rc;
2350 
2351 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2352 	ut_init_trid(&trid);
2353 
2354 	set_thread(1);
2355 
2356 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2357 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2358 
2359 	g_ut_attach_ctrlr_status = 0;
2360 	g_ut_attach_bdev_count = 1;
2361 
2362 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2363 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2364 	CU_ASSERT(rc == 0);
2365 
2366 	spdk_delay_us(1000);
2367 	poll_threads();
2368 
2369 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2370 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2371 
2372 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2373 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2374 
2375 	set_thread(0);
2376 
2377 	ch = spdk_get_io_channel(bdev);
2378 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2379 
2380 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2381 
2382 	bdev_io->u.bdev.iovs = NULL;
2383 
2384 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2385 
2386 	ut_bdev_io_set_buf(bdev_io);
2387 
2388 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2389 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2390 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2391 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2392 
2393 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2394 
2395 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2396 
2397 	/* Verify that ext NVME API is called when data is described by memory domain  */
2398 	g_ut_readv_ext_called = false;
2399 	bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef;
2400 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2401 	CU_ASSERT(g_ut_readv_ext_called == true);
2402 	g_ut_readv_ext_called = false;
2403 	bdev_io->u.bdev.memory_domain = NULL;
2404 
2405 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2406 
2407 	free(bdev_io);
2408 
2409 	spdk_put_io_channel(ch);
2410 
2411 	poll_threads();
2412 
2413 	set_thread(1);
2414 
2415 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2416 	CU_ASSERT(rc == 0);
2417 
2418 	poll_threads();
2419 	spdk_delay_us(1000);
2420 	poll_threads();
2421 
2422 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2423 }
2424 
2425 static void
2426 test_add_remove_trid(void)
2427 {
2428 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2429 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2430 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2431 	const int STRING_SIZE = 32;
2432 	const char *attached_names[STRING_SIZE];
2433 	struct nvme_path_id *ctrid;
2434 	int rc;
2435 
2436 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2437 	ut_init_trid(&path1.trid);
2438 	ut_init_trid2(&path2.trid);
2439 	ut_init_trid3(&path3.trid);
2440 
2441 	set_thread(0);
2442 
2443 	g_ut_attach_ctrlr_status = 0;
2444 	g_ut_attach_bdev_count = 0;
2445 
2446 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2447 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2448 
2449 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2450 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2451 	CU_ASSERT(rc == 0);
2452 
2453 	spdk_delay_us(1000);
2454 	poll_threads();
2455 
2456 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2457 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2458 
2459 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2460 
2461 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2462 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2463 
2464 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2465 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2466 	CU_ASSERT(rc == 0);
2467 
2468 	spdk_delay_us(1000);
2469 	poll_threads();
2470 
2471 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2472 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2473 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2474 			break;
2475 		}
2476 	}
2477 	CU_ASSERT(ctrid != NULL);
2478 
2479 	/* trid3 is not in the registered list. */
2480 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2481 	CU_ASSERT(rc == -ENXIO);
2482 
2483 	/* trid2 is not used, and simply removed. */
2484 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2485 	CU_ASSERT(rc == 0);
2486 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2487 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2488 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2489 	}
2490 
2491 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2492 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2493 
2494 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2495 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2496 	CU_ASSERT(rc == 0);
2497 
2498 	spdk_delay_us(1000);
2499 	poll_threads();
2500 
2501 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2502 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2503 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2504 			break;
2505 		}
2506 	}
2507 	CU_ASSERT(ctrid != NULL);
2508 
2509 	/* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully.
2510 	 * If we add path2 again, path2 should be inserted between path1 and path3.
2511 	 * Then, we remove path2. It is not used, and simply removed.
2512 	 */
2513 	ctrid->last_failed_tsc = spdk_get_ticks() + 1;
2514 
2515 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2516 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2517 
2518 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2519 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2520 	CU_ASSERT(rc == 0);
2521 
2522 	spdk_delay_us(1000);
2523 	poll_threads();
2524 
2525 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2526 
2527 	ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link);
2528 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2529 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0);
2530 
2531 	ctrid = TAILQ_NEXT(ctrid, link);
2532 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2533 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0);
2534 
2535 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
2536 	CU_ASSERT(rc == 0);
2537 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2538 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2539 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2540 	}
2541 
2542 	/* path1 is currently used and path3 is an alternative path.
2543 	 * If we remove path1, path is changed to path3.
2544 	 */
2545 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
2546 	CU_ASSERT(rc == 0);
2547 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2548 	CU_ASSERT(nvme_ctrlr->resetting == true);
2549 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2550 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2551 	}
2552 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2553 
2554 	poll_threads();
2555 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2556 	poll_threads();
2557 
2558 	CU_ASSERT(nvme_ctrlr->resetting == false);
2559 
2560 	/* path3 is the current and only path. If we remove path3, the corresponding
2561 	 * nvme_ctrlr is removed.
2562 	 */
2563 	rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL);
2564 	CU_ASSERT(rc == 0);
2565 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2566 
2567 	poll_threads();
2568 	spdk_delay_us(1000);
2569 	poll_threads();
2570 
2571 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2572 
2573 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2574 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2575 
2576 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2577 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2578 	CU_ASSERT(rc == 0);
2579 
2580 	spdk_delay_us(1000);
2581 	poll_threads();
2582 
2583 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2584 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2585 
2586 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2587 
2588 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2589 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2590 
2591 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2592 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2593 	CU_ASSERT(rc == 0);
2594 
2595 	spdk_delay_us(1000);
2596 	poll_threads();
2597 
2598 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2599 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2600 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2601 			break;
2602 		}
2603 	}
2604 	CU_ASSERT(ctrid != NULL);
2605 
2606 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2607 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2608 	CU_ASSERT(rc == 0);
2609 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2610 
2611 	poll_threads();
2612 	spdk_delay_us(1000);
2613 	poll_threads();
2614 
2615 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2616 }
2617 
2618 static void
2619 test_abort(void)
2620 {
2621 	struct spdk_nvme_transport_id trid = {};
2622 	struct nvme_ctrlr_opts opts = {};
2623 	struct spdk_nvme_ctrlr *ctrlr;
2624 	struct nvme_ctrlr *nvme_ctrlr;
2625 	const int STRING_SIZE = 32;
2626 	const char *attached_names[STRING_SIZE];
2627 	struct nvme_bdev *bdev;
2628 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2629 	struct spdk_io_channel *ch1, *ch2;
2630 	struct nvme_bdev_channel *nbdev_ch1;
2631 	struct nvme_io_path *io_path1;
2632 	struct nvme_qpair *nvme_qpair1;
2633 	int rc;
2634 
2635 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2636 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2637 	 * are submitted on thread 1. Both should succeed.
2638 	 */
2639 
2640 	ut_init_trid(&trid);
2641 
2642 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2643 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2644 
2645 	g_ut_attach_ctrlr_status = 0;
2646 	g_ut_attach_bdev_count = 1;
2647 
2648 	set_thread(1);
2649 
2650 	opts.ctrlr_loss_timeout_sec = -1;
2651 	opts.reconnect_delay_sec = 1;
2652 
2653 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2654 			      attach_ctrlr_done, NULL, NULL, &opts, false);
2655 	CU_ASSERT(rc == 0);
2656 
2657 	spdk_delay_us(1000);
2658 	poll_threads();
2659 
2660 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2661 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2662 
2663 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2664 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2665 
2666 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2667 	ut_bdev_io_set_buf(write_io);
2668 
2669 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2670 	ut_bdev_io_set_buf(fuse_io);
2671 
2672 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2673 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2674 
2675 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2676 
2677 	set_thread(0);
2678 
2679 	ch1 = spdk_get_io_channel(bdev);
2680 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2681 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2682 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2683 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2684 	nvme_qpair1 = io_path1->qpair;
2685 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2686 
2687 	set_thread(1);
2688 
2689 	ch2 = spdk_get_io_channel(bdev);
2690 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2691 
2692 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2693 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2694 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2695 
2696 	/* Aborting the already completed request should fail. */
2697 	write_io->internal.in_submit_request = true;
2698 	bdev_nvme_submit_request(ch1, write_io);
2699 	poll_threads();
2700 
2701 	CU_ASSERT(write_io->internal.in_submit_request == false);
2702 
2703 	abort_io->u.abort.bio_to_abort = write_io;
2704 	abort_io->internal.in_submit_request = true;
2705 
2706 	bdev_nvme_submit_request(ch1, abort_io);
2707 
2708 	poll_threads();
2709 
2710 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2711 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2712 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2713 
2714 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2715 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2716 
2717 	admin_io->internal.in_submit_request = true;
2718 	bdev_nvme_submit_request(ch1, admin_io);
2719 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2720 	poll_threads();
2721 
2722 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2723 
2724 	abort_io->u.abort.bio_to_abort = admin_io;
2725 	abort_io->internal.in_submit_request = true;
2726 
2727 	bdev_nvme_submit_request(ch2, abort_io);
2728 
2729 	poll_threads();
2730 
2731 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2732 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2733 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2734 
2735 	/* Aborting the write request should succeed. */
2736 	write_io->internal.in_submit_request = true;
2737 	bdev_nvme_submit_request(ch1, write_io);
2738 
2739 	CU_ASSERT(write_io->internal.in_submit_request == true);
2740 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2741 
2742 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2743 	abort_io->u.abort.bio_to_abort = write_io;
2744 	abort_io->internal.in_submit_request = true;
2745 
2746 	bdev_nvme_submit_request(ch1, abort_io);
2747 
2748 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2749 	poll_threads();
2750 
2751 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2752 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2753 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2754 	CU_ASSERT(write_io->internal.in_submit_request == false);
2755 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2756 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2757 
2758 	/* Aborting the fuse request should succeed. */
2759 	fuse_io->internal.in_submit_request = true;
2760 	bdev_nvme_submit_request(ch1, fuse_io);
2761 
2762 	CU_ASSERT(fuse_io->internal.in_submit_request == true);
2763 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2764 
2765 	abort_io->u.abort.bio_to_abort = fuse_io;
2766 	abort_io->internal.in_submit_request = true;
2767 
2768 	bdev_nvme_submit_request(ch1, abort_io);
2769 
2770 	spdk_delay_us(10000);
2771 	poll_threads();
2772 
2773 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2774 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2775 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2776 	CU_ASSERT(fuse_io->internal.in_submit_request == false);
2777 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2778 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2779 
2780 	/* Aborting the admin request should succeed. */
2781 	admin_io->internal.in_submit_request = true;
2782 	bdev_nvme_submit_request(ch1, admin_io);
2783 
2784 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2785 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2786 
2787 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2788 	abort_io->u.abort.bio_to_abort = admin_io;
2789 	abort_io->internal.in_submit_request = true;
2790 
2791 	bdev_nvme_submit_request(ch2, abort_io);
2792 
2793 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2794 	poll_threads();
2795 
2796 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2797 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2798 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2799 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2800 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2801 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2802 
2803 	set_thread(0);
2804 
2805 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2806 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2807 	 * while resetting the nvme_ctrlr.
2808 	 */
2809 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2810 
2811 	poll_thread_times(0, 3);
2812 
2813 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2814 	CU_ASSERT(nvme_ctrlr->resetting == true);
2815 
2816 	write_io->internal.in_submit_request = true;
2817 
2818 	bdev_nvme_submit_request(ch1, write_io);
2819 
2820 	CU_ASSERT(write_io->internal.in_submit_request == true);
2821 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2822 
2823 	/* Aborting the queued write request should succeed immediately. */
2824 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2825 	abort_io->u.abort.bio_to_abort = write_io;
2826 	abort_io->internal.in_submit_request = true;
2827 
2828 	bdev_nvme_submit_request(ch1, abort_io);
2829 
2830 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2831 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2832 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2833 	CU_ASSERT(write_io->internal.in_submit_request == false);
2834 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2835 
2836 	poll_threads();
2837 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2838 	poll_threads();
2839 
2840 	spdk_put_io_channel(ch1);
2841 
2842 	set_thread(1);
2843 
2844 	spdk_put_io_channel(ch2);
2845 
2846 	poll_threads();
2847 
2848 	free(write_io);
2849 	free(fuse_io);
2850 	free(admin_io);
2851 	free(abort_io);
2852 
2853 	set_thread(1);
2854 
2855 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2856 	CU_ASSERT(rc == 0);
2857 
2858 	poll_threads();
2859 	spdk_delay_us(1000);
2860 	poll_threads();
2861 
2862 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2863 }
2864 
2865 static void
2866 test_get_io_qpair(void)
2867 {
2868 	struct spdk_nvme_transport_id trid = {};
2869 	struct spdk_nvme_ctrlr ctrlr = {};
2870 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2871 	struct spdk_io_channel *ch;
2872 	struct nvme_ctrlr_channel *ctrlr_ch;
2873 	struct spdk_nvme_qpair *qpair;
2874 	int rc;
2875 
2876 	ut_init_trid(&trid);
2877 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2878 
2879 	set_thread(0);
2880 
2881 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2882 	CU_ASSERT(rc == 0);
2883 
2884 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2885 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2886 
2887 	ch = spdk_get_io_channel(nvme_ctrlr);
2888 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2889 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2890 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2891 
2892 	qpair = bdev_nvme_get_io_qpair(ch);
2893 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2894 
2895 	spdk_put_io_channel(ch);
2896 
2897 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
2898 	CU_ASSERT(rc == 0);
2899 
2900 	poll_threads();
2901 	spdk_delay_us(1000);
2902 	poll_threads();
2903 
2904 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2905 }
2906 
2907 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2908  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2909  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2910  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2911  */
2912 static void
2913 test_bdev_unregister(void)
2914 {
2915 	struct spdk_nvme_transport_id trid = {};
2916 	struct spdk_nvme_ctrlr *ctrlr;
2917 	struct nvme_ctrlr *nvme_ctrlr;
2918 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2919 	const int STRING_SIZE = 32;
2920 	const char *attached_names[STRING_SIZE];
2921 	struct nvme_bdev *bdev1, *bdev2;
2922 	int rc;
2923 
2924 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2925 	ut_init_trid(&trid);
2926 
2927 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2928 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2929 
2930 	g_ut_attach_ctrlr_status = 0;
2931 	g_ut_attach_bdev_count = 2;
2932 
2933 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2934 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2935 	CU_ASSERT(rc == 0);
2936 
2937 	spdk_delay_us(1000);
2938 	poll_threads();
2939 
2940 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2941 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2942 
2943 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2944 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2945 
2946 	bdev1 = nvme_ns1->bdev;
2947 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2948 
2949 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2950 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2951 
2952 	bdev2 = nvme_ns2->bdev;
2953 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2954 
2955 	bdev_nvme_destruct(&bdev1->disk);
2956 	bdev_nvme_destruct(&bdev2->disk);
2957 
2958 	poll_threads();
2959 
2960 	CU_ASSERT(nvme_ns1->bdev == NULL);
2961 	CU_ASSERT(nvme_ns2->bdev == NULL);
2962 
2963 	nvme_ctrlr->destruct = true;
2964 	_nvme_ctrlr_destruct(nvme_ctrlr);
2965 
2966 	poll_threads();
2967 	spdk_delay_us(1000);
2968 	poll_threads();
2969 
2970 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2971 }
2972 
2973 static void
2974 test_compare_ns(void)
2975 {
2976 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2977 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2978 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2979 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
2980 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
2981 
2982 	/* No IDs are defined. */
2983 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2984 
2985 	/* Only EUI64 are defined and not matched. */
2986 	nsdata1.eui64 = 0xABCDEF0123456789;
2987 	nsdata2.eui64 = 0xBBCDEF0123456789;
2988 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2989 
2990 	/* Only EUI64 are defined and matched. */
2991 	nsdata2.eui64 = 0xABCDEF0123456789;
2992 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2993 
2994 	/* Only NGUID are defined and not matched. */
2995 	nsdata1.eui64 = 0x0;
2996 	nsdata2.eui64 = 0x0;
2997 	nsdata1.nguid[0] = 0x12;
2998 	nsdata2.nguid[0] = 0x10;
2999 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3000 
3001 	/* Only NGUID are defined and matched. */
3002 	nsdata2.nguid[0] = 0x12;
3003 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3004 
3005 	/* Only UUID are defined and not matched. */
3006 	nsdata1.nguid[0] = 0x0;
3007 	nsdata2.nguid[0] = 0x0;
3008 	ns1.uuid = &uuid1;
3009 	ns2.uuid = &uuid2;
3010 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3011 
3012 	/* Only one UUID is defined. */
3013 	ns1.uuid = NULL;
3014 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3015 
3016 	/* Only UUID are defined and matched. */
3017 	ns1.uuid = &uuid2;
3018 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3019 
3020 	/* All EUI64, NGUID, and UUID are defined and matched. */
3021 	nsdata1.eui64 = 0x123456789ABCDEF;
3022 	nsdata2.eui64 = 0x123456789ABCDEF;
3023 	nsdata1.nguid[15] = 0x34;
3024 	nsdata2.nguid[15] = 0x34;
3025 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3026 
3027 	/* CSI are not matched. */
3028 	ns1.csi = SPDK_NVME_CSI_ZNS;
3029 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3030 }
3031 
3032 static void
3033 test_init_ana_log_page(void)
3034 {
3035 	struct spdk_nvme_transport_id trid = {};
3036 	struct spdk_nvme_ctrlr *ctrlr;
3037 	struct nvme_ctrlr *nvme_ctrlr;
3038 	const int STRING_SIZE = 32;
3039 	const char *attached_names[STRING_SIZE];
3040 	int rc;
3041 
3042 	set_thread(0);
3043 
3044 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3045 	ut_init_trid(&trid);
3046 
3047 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
3048 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3049 
3050 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3051 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3052 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3053 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3054 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3055 
3056 	g_ut_attach_ctrlr_status = 0;
3057 	g_ut_attach_bdev_count = 5;
3058 
3059 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3060 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3061 	CU_ASSERT(rc == 0);
3062 
3063 	spdk_delay_us(1000);
3064 	poll_threads();
3065 
3066 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3067 	poll_threads();
3068 
3069 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3070 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3071 
3072 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3073 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3074 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3075 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3076 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3077 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3078 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3079 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3080 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3081 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3082 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3083 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3084 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3085 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3086 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3087 
3088 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3089 	CU_ASSERT(rc == 0);
3090 
3091 	poll_threads();
3092 	spdk_delay_us(1000);
3093 	poll_threads();
3094 
3095 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3096 }
3097 
3098 static void
3099 init_accel(void)
3100 {
3101 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3102 				sizeof(int), "accel_p");
3103 }
3104 
3105 static void
3106 fini_accel(void)
3107 {
3108 	spdk_io_device_unregister(g_accel_p, NULL);
3109 }
3110 
3111 static void
3112 test_get_memory_domains(void)
3113 {
3114 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3115 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3116 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3117 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3118 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3119 	struct spdk_memory_domain *domains[4] = {};
3120 	int rc = 0;
3121 
3122 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3123 
3124 	/* nvme controller doesn't have memory domains */
3125 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3126 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3127 	CU_ASSERT(rc == 0);
3128 	CU_ASSERT(domains[0] == NULL);
3129 	CU_ASSERT(domains[1] == NULL);
3130 
3131 	/* nvme controller has a memory domain */
3132 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3133 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3134 	CU_ASSERT(rc == 1);
3135 	CU_ASSERT(domains[0] != NULL);
3136 	memset(domains, 0, sizeof(domains));
3137 
3138 	/* multipath, 2 controllers report 1 memory domain each */
3139 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3140 
3141 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3142 	CU_ASSERT(rc == 2);
3143 	CU_ASSERT(domains[0] != NULL);
3144 	CU_ASSERT(domains[1] != NULL);
3145 	memset(domains, 0, sizeof(domains));
3146 
3147 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3148 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3149 	CU_ASSERT(rc == 2);
3150 
3151 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3152 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3153 	CU_ASSERT(rc == 2);
3154 	CU_ASSERT(domains[0] == NULL);
3155 	CU_ASSERT(domains[1] == NULL);
3156 
3157 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3158 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3159 	CU_ASSERT(rc == 2);
3160 	CU_ASSERT(domains[0] != NULL);
3161 	CU_ASSERT(domains[1] == NULL);
3162 	memset(domains, 0, sizeof(domains));
3163 
3164 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3165 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3166 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3167 	CU_ASSERT(rc == 4);
3168 	CU_ASSERT(domains[0] != NULL);
3169 	CU_ASSERT(domains[1] != NULL);
3170 	CU_ASSERT(domains[2] != NULL);
3171 	CU_ASSERT(domains[3] != NULL);
3172 	memset(domains, 0, sizeof(domains));
3173 
3174 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3175 	 * Array size is less than the number of memory domains */
3176 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3177 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3178 	CU_ASSERT(rc == 4);
3179 	CU_ASSERT(domains[0] != NULL);
3180 	CU_ASSERT(domains[1] != NULL);
3181 	CU_ASSERT(domains[2] != NULL);
3182 	CU_ASSERT(domains[3] == NULL);
3183 	memset(domains, 0, sizeof(domains));
3184 
3185 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3186 }
3187 
3188 static void
3189 test_reconnect_qpair(void)
3190 {
3191 	struct spdk_nvme_transport_id trid = {};
3192 	struct spdk_nvme_ctrlr *ctrlr;
3193 	struct nvme_ctrlr *nvme_ctrlr;
3194 	const int STRING_SIZE = 32;
3195 	const char *attached_names[STRING_SIZE];
3196 	struct nvme_bdev *bdev;
3197 	struct spdk_io_channel *ch1, *ch2;
3198 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3199 	struct nvme_io_path *io_path1, *io_path2;
3200 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3201 	int rc;
3202 
3203 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3204 	ut_init_trid(&trid);
3205 
3206 	set_thread(0);
3207 
3208 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3209 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3210 
3211 	g_ut_attach_ctrlr_status = 0;
3212 	g_ut_attach_bdev_count = 1;
3213 
3214 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3215 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3216 	CU_ASSERT(rc == 0);
3217 
3218 	spdk_delay_us(1000);
3219 	poll_threads();
3220 
3221 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3222 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3223 
3224 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3225 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3226 
3227 	ch1 = spdk_get_io_channel(bdev);
3228 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3229 
3230 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3231 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3232 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3233 	nvme_qpair1 = io_path1->qpair;
3234 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3235 
3236 	set_thread(1);
3237 
3238 	ch2 = spdk_get_io_channel(bdev);
3239 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3240 
3241 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3242 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3243 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3244 	nvme_qpair2 = io_path2->qpair;
3245 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3246 
3247 	/* If a qpair is disconnected, it is freed and then reconnected via
3248 	 * resetting the corresponding nvme_ctrlr.
3249 	 */
3250 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3251 	ctrlr->is_failed = true;
3252 
3253 	poll_thread_times(1, 3);
3254 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3255 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3256 	CU_ASSERT(nvme_ctrlr->resetting == true);
3257 
3258 	poll_thread_times(0, 3);
3259 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3260 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3261 	CU_ASSERT(ctrlr->is_failed == true);
3262 
3263 	poll_thread_times(1, 2);
3264 	poll_thread_times(0, 1);
3265 	CU_ASSERT(ctrlr->is_failed == false);
3266 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3267 
3268 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3269 	poll_thread_times(0, 2);
3270 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3271 
3272 	poll_thread_times(0, 1);
3273 	poll_thread_times(1, 1);
3274 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3275 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3276 	CU_ASSERT(nvme_ctrlr->resetting == true);
3277 
3278 	poll_thread_times(0, 2);
3279 	poll_thread_times(1, 1);
3280 	poll_thread_times(0, 1);
3281 	CU_ASSERT(nvme_ctrlr->resetting == false);
3282 
3283 	poll_threads();
3284 
3285 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3286 	 * fails, the qpair is just freed.
3287 	 */
3288 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3289 	ctrlr->is_failed = true;
3290 	ctrlr->fail_reset = true;
3291 
3292 	poll_thread_times(1, 3);
3293 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3294 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3295 	CU_ASSERT(nvme_ctrlr->resetting == true);
3296 
3297 	poll_thread_times(0, 3);
3298 	poll_thread_times(1, 1);
3299 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3300 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3301 	CU_ASSERT(ctrlr->is_failed == true);
3302 
3303 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3304 	poll_thread_times(0, 3);
3305 	poll_thread_times(1, 1);
3306 	poll_thread_times(0, 1);
3307 	CU_ASSERT(ctrlr->is_failed == true);
3308 	CU_ASSERT(nvme_ctrlr->resetting == false);
3309 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3310 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3311 
3312 	poll_threads();
3313 
3314 	spdk_put_io_channel(ch2);
3315 
3316 	set_thread(0);
3317 
3318 	spdk_put_io_channel(ch1);
3319 
3320 	poll_threads();
3321 
3322 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3323 	CU_ASSERT(rc == 0);
3324 
3325 	poll_threads();
3326 	spdk_delay_us(1000);
3327 	poll_threads();
3328 
3329 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3330 }
3331 
3332 static void
3333 test_create_bdev_ctrlr(void)
3334 {
3335 	struct nvme_path_id path1 = {}, path2 = {};
3336 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3337 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3338 	const int STRING_SIZE = 32;
3339 	const char *attached_names[STRING_SIZE];
3340 	int rc;
3341 
3342 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3343 	ut_init_trid(&path1.trid);
3344 	ut_init_trid2(&path2.trid);
3345 
3346 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3347 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3348 
3349 	g_ut_attach_ctrlr_status = 0;
3350 	g_ut_attach_bdev_count = 0;
3351 
3352 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3353 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3354 	CU_ASSERT(rc == 0);
3355 
3356 	spdk_delay_us(1000);
3357 	poll_threads();
3358 
3359 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3360 	poll_threads();
3361 
3362 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3363 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3364 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3365 
3366 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3367 	g_ut_attach_ctrlr_status = -EINVAL;
3368 
3369 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3370 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3371 
3372 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3373 
3374 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3375 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3376 	CU_ASSERT(rc == 0);
3377 
3378 	spdk_delay_us(1000);
3379 	poll_threads();
3380 
3381 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3382 	poll_threads();
3383 
3384 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3385 
3386 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3387 	g_ut_attach_ctrlr_status = 0;
3388 
3389 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3390 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3391 
3392 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3393 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3394 	CU_ASSERT(rc == 0);
3395 
3396 	spdk_delay_us(1000);
3397 	poll_threads();
3398 
3399 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3400 	poll_threads();
3401 
3402 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3403 
3404 	/* Delete two ctrlrs at once. */
3405 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3406 	CU_ASSERT(rc == 0);
3407 
3408 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3409 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3410 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3411 
3412 	poll_threads();
3413 	spdk_delay_us(1000);
3414 	poll_threads();
3415 
3416 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3417 
3418 	/* Add two ctrlrs and delete one by one. */
3419 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3420 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3421 
3422 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3423 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3424 
3425 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3426 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3427 	CU_ASSERT(rc == 0);
3428 
3429 	spdk_delay_us(1000);
3430 	poll_threads();
3431 
3432 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3433 	poll_threads();
3434 
3435 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3436 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3437 	CU_ASSERT(rc == 0);
3438 
3439 	spdk_delay_us(1000);
3440 	poll_threads();
3441 
3442 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3443 	poll_threads();
3444 
3445 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3446 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3447 
3448 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3449 	CU_ASSERT(rc == 0);
3450 
3451 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3452 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3453 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3454 
3455 	poll_threads();
3456 	spdk_delay_us(1000);
3457 	poll_threads();
3458 
3459 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3460 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3461 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3462 
3463 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3464 	CU_ASSERT(rc == 0);
3465 
3466 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3467 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3468 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3469 
3470 	poll_threads();
3471 	spdk_delay_us(1000);
3472 	poll_threads();
3473 
3474 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3475 }
3476 
3477 static struct nvme_ns *
3478 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3479 {
3480 	struct nvme_ns *nvme_ns;
3481 
3482 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3483 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3484 			return nvme_ns;
3485 		}
3486 	}
3487 
3488 	return NULL;
3489 }
3490 
3491 static void
3492 test_add_multi_ns_to_bdev(void)
3493 {
3494 	struct nvme_path_id path1 = {}, path2 = {};
3495 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3496 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3497 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3498 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3499 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3500 	const int STRING_SIZE = 32;
3501 	const char *attached_names[STRING_SIZE];
3502 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3503 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3504 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3505 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3506 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3507 	int rc;
3508 
3509 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3510 	ut_init_trid(&path1.trid);
3511 	ut_init_trid2(&path2.trid);
3512 
3513 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3514 
3515 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3516 	 * namespaces are populated.
3517 	 */
3518 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3519 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3520 
3521 	ctrlr1->ns[1].is_active = false;
3522 	ctrlr1->ns[4].is_active = false;
3523 	ctrlr1->ns[0].uuid = &uuid1;
3524 	ctrlr1->ns[2].uuid = &uuid3;
3525 	ctrlr1->ns[3].uuid = &uuid4;
3526 
3527 	g_ut_attach_ctrlr_status = 0;
3528 	g_ut_attach_bdev_count = 3;
3529 
3530 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3531 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3532 	CU_ASSERT(rc == 0);
3533 
3534 	spdk_delay_us(1000);
3535 	poll_threads();
3536 
3537 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3538 	poll_threads();
3539 
3540 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3541 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3542 	 * adding 4th namespace to a bdev should fail.
3543 	 */
3544 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3545 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3546 
3547 	ctrlr2->ns[2].is_active = false;
3548 	ctrlr2->ns[4].is_active = false;
3549 	ctrlr2->ns[0].uuid = &uuid1;
3550 	ctrlr2->ns[1].uuid = &uuid2;
3551 	ctrlr2->ns[3].uuid = &uuid44;
3552 
3553 	g_ut_attach_ctrlr_status = 0;
3554 	g_ut_attach_bdev_count = 2;
3555 
3556 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3557 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3558 	CU_ASSERT(rc == 0);
3559 
3560 	spdk_delay_us(1000);
3561 	poll_threads();
3562 
3563 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3564 	poll_threads();
3565 
3566 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3567 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3568 
3569 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3570 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3571 
3572 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3573 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3574 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3575 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3576 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3577 
3578 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3579 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3580 
3581 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3582 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3583 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3584 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3585 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3586 
3587 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3588 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3589 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3590 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3591 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3592 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3593 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3594 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3595 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3596 
3597 	CU_ASSERT(bdev1->ref == 2);
3598 	CU_ASSERT(bdev2->ref == 1);
3599 	CU_ASSERT(bdev3->ref == 1);
3600 	CU_ASSERT(bdev4->ref == 1);
3601 
3602 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3603 	rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL);
3604 	CU_ASSERT(rc == 0);
3605 
3606 	poll_threads();
3607 	spdk_delay_us(1000);
3608 	poll_threads();
3609 
3610 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3611 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3612 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3613 
3614 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3615 	CU_ASSERT(rc == 0);
3616 
3617 	poll_threads();
3618 	spdk_delay_us(1000);
3619 	poll_threads();
3620 
3621 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3622 
3623 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3624 	 * can be deleted when the bdev subsystem shutdown.
3625 	 */
3626 	g_ut_attach_bdev_count = 1;
3627 
3628 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3629 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3630 
3631 	ctrlr1->ns[0].uuid = &uuid1;
3632 
3633 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3634 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3635 	CU_ASSERT(rc == 0);
3636 
3637 	spdk_delay_us(1000);
3638 	poll_threads();
3639 
3640 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3641 	poll_threads();
3642 
3643 	ut_init_trid2(&path2.trid);
3644 
3645 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3646 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3647 
3648 	ctrlr2->ns[0].uuid = &uuid1;
3649 
3650 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3651 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3652 	CU_ASSERT(rc == 0);
3653 
3654 	spdk_delay_us(1000);
3655 	poll_threads();
3656 
3657 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3658 	poll_threads();
3659 
3660 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3661 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3662 
3663 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3664 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3665 
3666 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3667 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3668 
3669 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3670 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3671 
3672 	/* Check if a nvme_bdev has two nvme_ns. */
3673 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3674 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3675 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3676 
3677 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3678 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3679 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3680 
3681 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3682 	bdev_nvme_destruct(&bdev1->disk);
3683 
3684 	poll_threads();
3685 
3686 	CU_ASSERT(nvme_ns1->bdev == NULL);
3687 	CU_ASSERT(nvme_ns2->bdev == NULL);
3688 
3689 	nvme_ctrlr1->destruct = true;
3690 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3691 
3692 	poll_threads();
3693 	spdk_delay_us(1000);
3694 	poll_threads();
3695 
3696 	nvme_ctrlr2->destruct = true;
3697 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3698 
3699 	poll_threads();
3700 	spdk_delay_us(1000);
3701 	poll_threads();
3702 
3703 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3704 }
3705 
3706 static void
3707 test_add_multi_io_paths_to_nbdev_ch(void)
3708 {
3709 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3710 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3711 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3712 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3713 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3714 	const int STRING_SIZE = 32;
3715 	const char *attached_names[STRING_SIZE];
3716 	struct nvme_bdev *bdev;
3717 	struct spdk_io_channel *ch;
3718 	struct nvme_bdev_channel *nbdev_ch;
3719 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3720 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3721 	int rc;
3722 
3723 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3724 	ut_init_trid(&path1.trid);
3725 	ut_init_trid2(&path2.trid);
3726 	ut_init_trid3(&path3.trid);
3727 	g_ut_attach_ctrlr_status = 0;
3728 	g_ut_attach_bdev_count = 1;
3729 
3730 	set_thread(1);
3731 
3732 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3733 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3734 
3735 	ctrlr1->ns[0].uuid = &uuid1;
3736 
3737 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3738 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3739 	CU_ASSERT(rc == 0);
3740 
3741 	spdk_delay_us(1000);
3742 	poll_threads();
3743 
3744 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3745 	poll_threads();
3746 
3747 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3748 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3749 
3750 	ctrlr2->ns[0].uuid = &uuid1;
3751 
3752 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3753 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3754 	CU_ASSERT(rc == 0);
3755 
3756 	spdk_delay_us(1000);
3757 	poll_threads();
3758 
3759 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3760 	poll_threads();
3761 
3762 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3763 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3764 
3765 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3766 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3767 
3768 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3769 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3770 
3771 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3772 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3773 
3774 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3775 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3776 
3777 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3778 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3779 
3780 	set_thread(0);
3781 
3782 	ch = spdk_get_io_channel(bdev);
3783 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3784 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3785 
3786 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3787 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3788 
3789 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3790 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3791 
3792 	set_thread(1);
3793 
3794 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3795 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3796 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3797 
3798 	ctrlr3->ns[0].uuid = &uuid1;
3799 
3800 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3801 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3802 	CU_ASSERT(rc == 0);
3803 
3804 	spdk_delay_us(1000);
3805 	poll_threads();
3806 
3807 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3808 	poll_threads();
3809 
3810 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3811 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3812 
3813 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3814 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3815 
3816 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3817 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3818 
3819 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3820 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
3821 	CU_ASSERT(rc == 0);
3822 
3823 	poll_threads();
3824 	spdk_delay_us(1000);
3825 	poll_threads();
3826 
3827 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3828 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3829 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3830 
3831 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3832 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3833 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3834 
3835 	set_thread(0);
3836 
3837 	spdk_put_io_channel(ch);
3838 
3839 	poll_threads();
3840 
3841 	set_thread(1);
3842 
3843 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3844 	CU_ASSERT(rc == 0);
3845 
3846 	poll_threads();
3847 	spdk_delay_us(1000);
3848 	poll_threads();
3849 
3850 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3851 }
3852 
3853 static void
3854 test_admin_path(void)
3855 {
3856 	struct nvme_path_id path1 = {}, path2 = {};
3857 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3858 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3859 	const int STRING_SIZE = 32;
3860 	const char *attached_names[STRING_SIZE];
3861 	struct nvme_bdev *bdev;
3862 	struct spdk_io_channel *ch;
3863 	struct spdk_bdev_io *bdev_io;
3864 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3865 	int rc;
3866 
3867 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3868 	ut_init_trid(&path1.trid);
3869 	ut_init_trid2(&path2.trid);
3870 	g_ut_attach_ctrlr_status = 0;
3871 	g_ut_attach_bdev_count = 1;
3872 
3873 	set_thread(0);
3874 
3875 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3876 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3877 
3878 	ctrlr1->ns[0].uuid = &uuid1;
3879 
3880 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3881 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3882 	CU_ASSERT(rc == 0);
3883 
3884 	spdk_delay_us(1000);
3885 	poll_threads();
3886 
3887 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3888 	poll_threads();
3889 
3890 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3891 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3892 
3893 	ctrlr2->ns[0].uuid = &uuid1;
3894 
3895 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3896 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3897 	CU_ASSERT(rc == 0);
3898 
3899 	spdk_delay_us(1000);
3900 	poll_threads();
3901 
3902 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3903 	poll_threads();
3904 
3905 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3906 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3907 
3908 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3909 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3910 
3911 	ch = spdk_get_io_channel(bdev);
3912 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3913 
3914 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3915 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3916 
3917 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3918 	 * submitted to ctrlr2.
3919 	 */
3920 	ctrlr1->is_failed = true;
3921 	bdev_io->internal.in_submit_request = true;
3922 
3923 	bdev_nvme_submit_request(ch, bdev_io);
3924 
3925 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3926 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3927 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3928 
3929 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3930 	poll_threads();
3931 
3932 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3933 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3934 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3935 
3936 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3937 	ctrlr2->is_failed = true;
3938 	bdev_io->internal.in_submit_request = true;
3939 
3940 	bdev_nvme_submit_request(ch, bdev_io);
3941 
3942 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3943 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3944 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3945 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3946 
3947 	free(bdev_io);
3948 
3949 	spdk_put_io_channel(ch);
3950 
3951 	poll_threads();
3952 
3953 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
3954 	CU_ASSERT(rc == 0);
3955 
3956 	poll_threads();
3957 	spdk_delay_us(1000);
3958 	poll_threads();
3959 
3960 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3961 }
3962 
3963 static struct nvme_io_path *
3964 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3965 			struct nvme_ctrlr *nvme_ctrlr)
3966 {
3967 	struct nvme_io_path *io_path;
3968 
3969 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3970 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
3971 			return io_path;
3972 		}
3973 	}
3974 
3975 	return NULL;
3976 }
3977 
3978 static void
3979 test_reset_bdev_ctrlr(void)
3980 {
3981 	struct nvme_path_id path1 = {}, path2 = {};
3982 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3983 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3984 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3985 	struct nvme_path_id *curr_path1, *curr_path2;
3986 	const int STRING_SIZE = 32;
3987 	const char *attached_names[STRING_SIZE];
3988 	struct nvme_bdev *bdev;
3989 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3990 	struct nvme_bdev_io *first_bio;
3991 	struct spdk_io_channel *ch1, *ch2;
3992 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3993 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3994 	int rc;
3995 
3996 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3997 	ut_init_trid(&path1.trid);
3998 	ut_init_trid2(&path2.trid);
3999 	g_ut_attach_ctrlr_status = 0;
4000 	g_ut_attach_bdev_count = 1;
4001 
4002 	set_thread(0);
4003 
4004 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4005 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4006 
4007 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4008 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4009 	CU_ASSERT(rc == 0);
4010 
4011 	spdk_delay_us(1000);
4012 	poll_threads();
4013 
4014 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4015 	poll_threads();
4016 
4017 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4018 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4019 
4020 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4021 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4022 	CU_ASSERT(rc == 0);
4023 
4024 	spdk_delay_us(1000);
4025 	poll_threads();
4026 
4027 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4028 	poll_threads();
4029 
4030 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4031 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4032 
4033 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4034 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
4035 
4036 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
4037 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
4038 
4039 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4040 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
4041 
4042 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
4043 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
4044 
4045 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4046 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4047 
4048 	set_thread(0);
4049 
4050 	ch1 = spdk_get_io_channel(bdev);
4051 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
4052 
4053 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
4054 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
4055 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
4056 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
4057 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4058 
4059 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
4060 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4061 
4062 	set_thread(1);
4063 
4064 	ch2 = spdk_get_io_channel(bdev);
4065 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4066 
4067 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4068 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4069 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4070 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4071 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4072 
4073 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
4074 
4075 	/* The first reset request from bdev_io is submitted on thread 0.
4076 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4077 	 *
4078 	 * A few extra polls are necessary after resetting ctrlr1 to check
4079 	 * pending reset requests for ctrlr1.
4080 	 */
4081 	ctrlr1->is_failed = true;
4082 	curr_path1->last_failed_tsc = spdk_get_ticks();
4083 	ctrlr2->is_failed = true;
4084 	curr_path2->last_failed_tsc = spdk_get_ticks();
4085 
4086 	set_thread(0);
4087 
4088 	bdev_nvme_submit_request(ch1, first_bdev_io);
4089 	CU_ASSERT(first_bio->io_path == io_path11);
4090 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4091 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4092 
4093 	poll_thread_times(0, 3);
4094 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4095 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4096 
4097 	poll_thread_times(1, 2);
4098 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4099 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4100 	CU_ASSERT(ctrlr1->is_failed == true);
4101 
4102 	poll_thread_times(0, 1);
4103 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4104 	CU_ASSERT(ctrlr1->is_failed == false);
4105 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4106 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4107 
4108 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4109 	poll_thread_times(0, 2);
4110 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4111 
4112 	poll_thread_times(0, 1);
4113 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4114 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4115 
4116 	poll_thread_times(1, 1);
4117 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4118 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4119 
4120 	poll_thread_times(0, 2);
4121 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4122 	poll_thread_times(1, 1);
4123 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4124 	poll_thread_times(0, 2);
4125 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4126 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4127 	CU_ASSERT(first_bio->io_path == io_path12);
4128 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4129 
4130 	poll_thread_times(0, 3);
4131 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4132 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4133 
4134 	poll_thread_times(1, 2);
4135 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4136 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4137 	CU_ASSERT(ctrlr2->is_failed == true);
4138 
4139 	poll_thread_times(0, 1);
4140 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4141 	CU_ASSERT(ctrlr2->is_failed == false);
4142 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4143 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4144 
4145 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4146 	poll_thread_times(0, 2);
4147 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4148 
4149 	poll_thread_times(0, 1);
4150 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4151 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4152 
4153 	poll_thread_times(1, 2);
4154 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4155 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4156 
4157 	poll_thread_times(0, 2);
4158 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4159 	poll_thread_times(1, 1);
4160 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4161 	poll_thread_times(0, 2);
4162 	CU_ASSERT(first_bio->io_path == NULL);
4163 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4164 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4165 
4166 	poll_threads();
4167 
4168 	/* There is a race between two reset requests from bdev_io.
4169 	 *
4170 	 * The first reset request is submitted on thread 0, and the second reset
4171 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4172 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4173 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4174 	 * The second is pending on ctrlr2 again. After the first completes resetting
4175 	 * ctrl2, both complete successfully.
4176 	 */
4177 	ctrlr1->is_failed = true;
4178 	curr_path1->last_failed_tsc = spdk_get_ticks();
4179 	ctrlr2->is_failed = true;
4180 	curr_path2->last_failed_tsc = spdk_get_ticks();
4181 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4182 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4183 
4184 	set_thread(0);
4185 
4186 	bdev_nvme_submit_request(ch1, first_bdev_io);
4187 
4188 	set_thread(1);
4189 
4190 	bdev_nvme_submit_request(ch2, second_bdev_io);
4191 
4192 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4193 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4194 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io);
4195 
4196 	poll_threads();
4197 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4198 	poll_threads();
4199 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4200 	poll_threads();
4201 
4202 	CU_ASSERT(ctrlr1->is_failed == false);
4203 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4204 	CU_ASSERT(ctrlr2->is_failed == false);
4205 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4206 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4207 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4208 
4209 	set_thread(0);
4210 
4211 	spdk_put_io_channel(ch1);
4212 
4213 	set_thread(1);
4214 
4215 	spdk_put_io_channel(ch2);
4216 
4217 	poll_threads();
4218 
4219 	set_thread(0);
4220 
4221 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4222 	CU_ASSERT(rc == 0);
4223 
4224 	poll_threads();
4225 	spdk_delay_us(1000);
4226 	poll_threads();
4227 
4228 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4229 
4230 	free(first_bdev_io);
4231 	free(second_bdev_io);
4232 }
4233 
4234 static void
4235 test_find_io_path(void)
4236 {
4237 	struct nvme_bdev_channel nbdev_ch = {
4238 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4239 	};
4240 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4241 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4242 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4243 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4244 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4245 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4246 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
4247 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4248 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4249 
4250 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4251 
4252 	/* Test if io_path whose ANA state is not accessible is excluded. */
4253 
4254 	nvme_qpair1.qpair = &qpair1;
4255 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4256 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4257 
4258 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4259 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4260 
4261 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4262 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4263 
4264 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4265 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4266 
4267 	nbdev_ch.current_io_path = NULL;
4268 
4269 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4270 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4271 
4272 	nbdev_ch.current_io_path = NULL;
4273 
4274 	/* Test if io_path whose qpair is resetting is excluded. */
4275 
4276 	nvme_qpair1.qpair = NULL;
4277 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4278 
4279 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4280 
4281 	/* Test if ANA optimized state or the first found ANA non-optimized state
4282 	 * is prioritized.
4283 	 */
4284 
4285 	nvme_qpair1.qpair = &qpair1;
4286 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4287 	nvme_qpair2.qpair = &qpair2;
4288 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4289 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4290 
4291 	nbdev_ch.current_io_path = NULL;
4292 
4293 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4294 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4295 
4296 	nbdev_ch.current_io_path = NULL;
4297 }
4298 
4299 static void
4300 test_retry_io_if_ana_state_is_updating(void)
4301 {
4302 	struct nvme_path_id path = {};
4303 	struct nvme_ctrlr_opts opts = {};
4304 	struct spdk_nvme_ctrlr *ctrlr;
4305 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4306 	struct nvme_ctrlr *nvme_ctrlr;
4307 	const int STRING_SIZE = 32;
4308 	const char *attached_names[STRING_SIZE];
4309 	struct nvme_bdev *bdev;
4310 	struct nvme_ns *nvme_ns;
4311 	struct spdk_bdev_io *bdev_io1;
4312 	struct spdk_io_channel *ch;
4313 	struct nvme_bdev_channel *nbdev_ch;
4314 	struct nvme_io_path *io_path;
4315 	struct nvme_qpair *nvme_qpair;
4316 	int rc;
4317 
4318 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4319 	ut_init_trid(&path.trid);
4320 
4321 	set_thread(0);
4322 
4323 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4324 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4325 
4326 	g_ut_attach_ctrlr_status = 0;
4327 	g_ut_attach_bdev_count = 1;
4328 
4329 	opts.ctrlr_loss_timeout_sec = -1;
4330 	opts.reconnect_delay_sec = 1;
4331 
4332 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4333 			      attach_ctrlr_done, NULL, NULL, &opts, false);
4334 	CU_ASSERT(rc == 0);
4335 
4336 	spdk_delay_us(1000);
4337 	poll_threads();
4338 
4339 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4340 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4341 
4342 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4343 	CU_ASSERT(nvme_ctrlr != NULL);
4344 
4345 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4346 	CU_ASSERT(bdev != NULL);
4347 
4348 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4349 	CU_ASSERT(nvme_ns != NULL);
4350 
4351 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4352 	ut_bdev_io_set_buf(bdev_io1);
4353 
4354 	ch = spdk_get_io_channel(bdev);
4355 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4356 
4357 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4358 
4359 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4360 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4361 
4362 	nvme_qpair = io_path->qpair;
4363 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4364 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4365 
4366 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4367 
4368 	/* If qpair is connected, I/O should succeed. */
4369 	bdev_io1->internal.in_submit_request = true;
4370 
4371 	bdev_nvme_submit_request(ch, bdev_io1);
4372 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4373 
4374 	poll_threads();
4375 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4376 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4377 
4378 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4379 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4380 	nbdev_ch->current_io_path = NULL;
4381 
4382 	bdev_io1->internal.in_submit_request = true;
4383 
4384 	bdev_nvme_submit_request(ch, bdev_io1);
4385 
4386 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4387 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4388 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4389 
4390 	/* ANA state became accessible while I/O was queued. */
4391 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4392 
4393 	spdk_delay_us(1000000);
4394 
4395 	poll_thread_times(0, 1);
4396 
4397 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4398 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4399 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4400 
4401 	poll_threads();
4402 
4403 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4404 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4405 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4406 
4407 	free(bdev_io1);
4408 
4409 	spdk_put_io_channel(ch);
4410 
4411 	poll_threads();
4412 
4413 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4414 	CU_ASSERT(rc == 0);
4415 
4416 	poll_threads();
4417 	spdk_delay_us(1000);
4418 	poll_threads();
4419 
4420 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4421 }
4422 
4423 static void
4424 test_retry_io_for_io_path_error(void)
4425 {
4426 	struct nvme_path_id path1 = {}, path2 = {};
4427 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4428 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4429 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4430 	const int STRING_SIZE = 32;
4431 	const char *attached_names[STRING_SIZE];
4432 	struct nvme_bdev *bdev;
4433 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4434 	struct spdk_bdev_io *bdev_io;
4435 	struct nvme_bdev_io *bio;
4436 	struct spdk_io_channel *ch;
4437 	struct nvme_bdev_channel *nbdev_ch;
4438 	struct nvme_io_path *io_path1, *io_path2;
4439 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4440 	struct ut_nvme_req *req;
4441 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4442 	int rc;
4443 
4444 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4445 	ut_init_trid(&path1.trid);
4446 	ut_init_trid2(&path2.trid);
4447 
4448 	g_opts.bdev_retry_count = 1;
4449 
4450 	set_thread(0);
4451 
4452 	g_ut_attach_ctrlr_status = 0;
4453 	g_ut_attach_bdev_count = 1;
4454 
4455 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4456 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4457 
4458 	ctrlr1->ns[0].uuid = &uuid1;
4459 
4460 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4461 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4462 	CU_ASSERT(rc == 0);
4463 
4464 	spdk_delay_us(1000);
4465 	poll_threads();
4466 
4467 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4468 	poll_threads();
4469 
4470 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4471 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4472 
4473 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4474 	CU_ASSERT(nvme_ctrlr1 != NULL);
4475 
4476 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4477 	CU_ASSERT(bdev != NULL);
4478 
4479 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4480 	CU_ASSERT(nvme_ns1 != NULL);
4481 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4482 
4483 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4484 	ut_bdev_io_set_buf(bdev_io);
4485 
4486 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4487 
4488 	ch = spdk_get_io_channel(bdev);
4489 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4490 
4491 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4492 
4493 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4494 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4495 
4496 	nvme_qpair1 = io_path1->qpair;
4497 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4498 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4499 
4500 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4501 
4502 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4503 	bdev_io->internal.in_submit_request = true;
4504 
4505 	bdev_nvme_submit_request(ch, bdev_io);
4506 
4507 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4508 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4509 
4510 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4511 	SPDK_CU_ASSERT_FATAL(req != NULL);
4512 
4513 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4514 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4515 	req->cpl.status.dnr = 1;
4516 
4517 	poll_thread_times(0, 1);
4518 
4519 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4520 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4521 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4522 
4523 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4524 	bdev_io->internal.in_submit_request = true;
4525 
4526 	bdev_nvme_submit_request(ch, bdev_io);
4527 
4528 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4529 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4530 
4531 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4532 	SPDK_CU_ASSERT_FATAL(req != NULL);
4533 
4534 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4535 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4536 
4537 	poll_thread_times(0, 1);
4538 
4539 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4540 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4541 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4542 
4543 	poll_threads();
4544 
4545 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4546 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4547 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4548 
4549 	/* Add io_path2 dynamically, and create a multipath configuration. */
4550 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4551 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4552 
4553 	ctrlr2->ns[0].uuid = &uuid1;
4554 
4555 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4556 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4557 	CU_ASSERT(rc == 0);
4558 
4559 	spdk_delay_us(1000);
4560 	poll_threads();
4561 
4562 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4563 	poll_threads();
4564 
4565 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4566 	CU_ASSERT(nvme_ctrlr2 != NULL);
4567 
4568 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4569 	CU_ASSERT(nvme_ns2 != NULL);
4570 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4571 
4572 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4573 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4574 
4575 	nvme_qpair2 = io_path2->qpair;
4576 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4577 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4578 
4579 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4580 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4581 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4582 	 */
4583 	bdev_io->internal.in_submit_request = true;
4584 
4585 	bdev_nvme_submit_request(ch, bdev_io);
4586 
4587 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4588 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4589 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4590 
4591 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4592 	SPDK_CU_ASSERT_FATAL(req != NULL);
4593 
4594 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4595 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4596 
4597 	poll_thread_times(0, 1);
4598 
4599 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4600 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4601 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4602 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4603 
4604 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4605 	nvme_qpair1->qpair = NULL;
4606 
4607 	poll_threads();
4608 
4609 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4610 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4611 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4612 
4613 	free(bdev_io);
4614 
4615 	spdk_put_io_channel(ch);
4616 
4617 	poll_threads();
4618 
4619 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4620 	CU_ASSERT(rc == 0);
4621 
4622 	poll_threads();
4623 	spdk_delay_us(1000);
4624 	poll_threads();
4625 
4626 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4627 
4628 	g_opts.bdev_retry_count = 0;
4629 }
4630 
4631 static void
4632 test_retry_io_count(void)
4633 {
4634 	struct nvme_path_id path = {};
4635 	struct spdk_nvme_ctrlr *ctrlr;
4636 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4637 	struct nvme_ctrlr *nvme_ctrlr;
4638 	const int STRING_SIZE = 32;
4639 	const char *attached_names[STRING_SIZE];
4640 	struct nvme_bdev *bdev;
4641 	struct nvme_ns *nvme_ns;
4642 	struct spdk_bdev_io *bdev_io;
4643 	struct nvme_bdev_io *bio;
4644 	struct spdk_io_channel *ch;
4645 	struct nvme_bdev_channel *nbdev_ch;
4646 	struct nvme_io_path *io_path;
4647 	struct nvme_qpair *nvme_qpair;
4648 	struct ut_nvme_req *req;
4649 	int rc;
4650 
4651 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4652 	ut_init_trid(&path.trid);
4653 
4654 	set_thread(0);
4655 
4656 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4657 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4658 
4659 	g_ut_attach_ctrlr_status = 0;
4660 	g_ut_attach_bdev_count = 1;
4661 
4662 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4663 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4664 	CU_ASSERT(rc == 0);
4665 
4666 	spdk_delay_us(1000);
4667 	poll_threads();
4668 
4669 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4670 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4671 
4672 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4673 	CU_ASSERT(nvme_ctrlr != NULL);
4674 
4675 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4676 	CU_ASSERT(bdev != NULL);
4677 
4678 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4679 	CU_ASSERT(nvme_ns != NULL);
4680 
4681 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4682 	ut_bdev_io_set_buf(bdev_io);
4683 
4684 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4685 
4686 	ch = spdk_get_io_channel(bdev);
4687 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4688 
4689 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4690 
4691 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4692 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4693 
4694 	nvme_qpair = io_path->qpair;
4695 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4696 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4697 
4698 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4699 
4700 	/* If I/O is aborted by request, it should not be retried. */
4701 	g_opts.bdev_retry_count = 1;
4702 
4703 	bdev_io->internal.in_submit_request = true;
4704 
4705 	bdev_nvme_submit_request(ch, bdev_io);
4706 
4707 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4708 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4709 
4710 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4711 	SPDK_CU_ASSERT_FATAL(req != NULL);
4712 
4713 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4714 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4715 
4716 	poll_thread_times(0, 1);
4717 
4718 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4719 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4720 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4721 
4722 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4723 	 * the failed I/O should not be retried.
4724 	 */
4725 	g_opts.bdev_retry_count = 4;
4726 
4727 	bdev_io->internal.in_submit_request = true;
4728 
4729 	bdev_nvme_submit_request(ch, bdev_io);
4730 
4731 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4732 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4733 
4734 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4735 	SPDK_CU_ASSERT_FATAL(req != NULL);
4736 
4737 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4738 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4739 	bio->retry_count = 4;
4740 
4741 	poll_thread_times(0, 1);
4742 
4743 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4744 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4745 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4746 
4747 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4748 	g_opts.bdev_retry_count = -1;
4749 
4750 	bdev_io->internal.in_submit_request = true;
4751 
4752 	bdev_nvme_submit_request(ch, bdev_io);
4753 
4754 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4755 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4756 
4757 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4758 	SPDK_CU_ASSERT_FATAL(req != NULL);
4759 
4760 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4761 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4762 	bio->retry_count = 4;
4763 
4764 	poll_thread_times(0, 1);
4765 
4766 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4767 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4768 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4769 
4770 	poll_threads();
4771 
4772 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4773 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4774 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4775 
4776 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4777 	 * the failed I/O should be retried.
4778 	 */
4779 	g_opts.bdev_retry_count = 4;
4780 
4781 	bdev_io->internal.in_submit_request = true;
4782 
4783 	bdev_nvme_submit_request(ch, bdev_io);
4784 
4785 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4786 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4787 
4788 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4789 	SPDK_CU_ASSERT_FATAL(req != NULL);
4790 
4791 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4792 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4793 	bio->retry_count = 3;
4794 
4795 	poll_thread_times(0, 1);
4796 
4797 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4798 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4799 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4800 
4801 	poll_threads();
4802 
4803 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4804 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4805 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4806 
4807 	free(bdev_io);
4808 
4809 	spdk_put_io_channel(ch);
4810 
4811 	poll_threads();
4812 
4813 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4814 	CU_ASSERT(rc == 0);
4815 
4816 	poll_threads();
4817 	spdk_delay_us(1000);
4818 	poll_threads();
4819 
4820 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4821 
4822 	g_opts.bdev_retry_count = 0;
4823 }
4824 
4825 static void
4826 test_concurrent_read_ana_log_page(void)
4827 {
4828 	struct spdk_nvme_transport_id trid = {};
4829 	struct spdk_nvme_ctrlr *ctrlr;
4830 	struct nvme_ctrlr *nvme_ctrlr;
4831 	const int STRING_SIZE = 32;
4832 	const char *attached_names[STRING_SIZE];
4833 	int rc;
4834 
4835 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4836 	ut_init_trid(&trid);
4837 
4838 	set_thread(0);
4839 
4840 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4841 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4842 
4843 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4844 
4845 	g_ut_attach_ctrlr_status = 0;
4846 	g_ut_attach_bdev_count = 1;
4847 
4848 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
4849 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4850 	CU_ASSERT(rc == 0);
4851 
4852 	spdk_delay_us(1000);
4853 	poll_threads();
4854 
4855 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4856 	poll_threads();
4857 
4858 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4859 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4860 
4861 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4862 
4863 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4864 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4865 
4866 	/* Following read request should be rejected. */
4867 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4868 
4869 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4870 
4871 	set_thread(1);
4872 
4873 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4874 
4875 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4876 
4877 	/* Reset request while reading ANA log page should not be rejected. */
4878 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
4879 	CU_ASSERT(rc == 0);
4880 
4881 	poll_threads();
4882 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4883 	poll_threads();
4884 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4885 	poll_threads();
4886 
4887 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4888 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4889 
4890 	/* Read ANA log page while resetting ctrlr should be rejected. */
4891 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
4892 	CU_ASSERT(rc == 0);
4893 
4894 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4895 
4896 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4897 
4898 	poll_threads();
4899 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4900 	poll_threads();
4901 
4902 	set_thread(0);
4903 
4904 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
4905 	CU_ASSERT(rc == 0);
4906 
4907 	poll_threads();
4908 	spdk_delay_us(1000);
4909 	poll_threads();
4910 
4911 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4912 }
4913 
4914 static void
4915 test_retry_io_for_ana_error(void)
4916 {
4917 	struct nvme_path_id path = {};
4918 	struct spdk_nvme_ctrlr *ctrlr;
4919 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4920 	struct nvme_ctrlr *nvme_ctrlr;
4921 	const int STRING_SIZE = 32;
4922 	const char *attached_names[STRING_SIZE];
4923 	struct nvme_bdev *bdev;
4924 	struct nvme_ns *nvme_ns;
4925 	struct spdk_bdev_io *bdev_io;
4926 	struct nvme_bdev_io *bio;
4927 	struct spdk_io_channel *ch;
4928 	struct nvme_bdev_channel *nbdev_ch;
4929 	struct nvme_io_path *io_path;
4930 	struct nvme_qpair *nvme_qpair;
4931 	struct ut_nvme_req *req;
4932 	uint64_t now;
4933 	int rc;
4934 
4935 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4936 	ut_init_trid(&path.trid);
4937 
4938 	g_opts.bdev_retry_count = 1;
4939 
4940 	set_thread(0);
4941 
4942 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4943 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4944 
4945 	g_ut_attach_ctrlr_status = 0;
4946 	g_ut_attach_bdev_count = 1;
4947 
4948 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4949 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4950 	CU_ASSERT(rc == 0);
4951 
4952 	spdk_delay_us(1000);
4953 	poll_threads();
4954 
4955 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4956 	poll_threads();
4957 
4958 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4959 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4960 
4961 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4962 	CU_ASSERT(nvme_ctrlr != NULL);
4963 
4964 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4965 	CU_ASSERT(bdev != NULL);
4966 
4967 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4968 	CU_ASSERT(nvme_ns != NULL);
4969 
4970 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4971 	ut_bdev_io_set_buf(bdev_io);
4972 
4973 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4974 
4975 	ch = spdk_get_io_channel(bdev);
4976 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4977 
4978 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4979 
4980 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4981 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4982 
4983 	nvme_qpair = io_path->qpair;
4984 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4985 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4986 
4987 	now = spdk_get_ticks();
4988 
4989 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4990 
4991 	/* If I/O got ANA error, it should be queued, the corresponding namespace
4992 	 * should be freezed and its ANA state should be updated.
4993 	 */
4994 	bdev_io->internal.in_submit_request = true;
4995 
4996 	bdev_nvme_submit_request(ch, bdev_io);
4997 
4998 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4999 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5000 
5001 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
5002 	SPDK_CU_ASSERT_FATAL(req != NULL);
5003 
5004 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5005 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
5006 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
5007 
5008 	poll_thread_times(0, 1);
5009 
5010 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5011 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5012 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5013 	/* I/O should be retried immediately. */
5014 	CU_ASSERT(bio->retry_ticks == now);
5015 	CU_ASSERT(nvme_ns->ana_state_updating == true);
5016 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5017 
5018 	poll_threads();
5019 
5020 	/* Namespace is inaccessible, and hence I/O should be queued again. */
5021 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5022 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5023 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5024 	/* I/O should be retried after a second if no I/O path was found but
5025 	 * any I/O path may become available.
5026 	 */
5027 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
5028 
5029 	/* Namespace should be unfreezed after completing to update its ANA state. */
5030 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5031 	poll_threads();
5032 
5033 	CU_ASSERT(nvme_ns->ana_state_updating == false);
5034 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5035 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5036 
5037 	/* Retry the queued I/O should succeed. */
5038 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
5039 	poll_threads();
5040 
5041 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5042 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5043 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5044 
5045 	free(bdev_io);
5046 
5047 	spdk_put_io_channel(ch);
5048 
5049 	poll_threads();
5050 
5051 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5052 	CU_ASSERT(rc == 0);
5053 
5054 	poll_threads();
5055 	spdk_delay_us(1000);
5056 	poll_threads();
5057 
5058 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5059 
5060 	g_opts.bdev_retry_count = 0;
5061 }
5062 
5063 static void
5064 test_check_io_error_resiliency_params(void)
5065 {
5066 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5067 	 * 3rd parameter is fast_io_fail_timeout_sec.
5068 	 */
5069 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5070 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5071 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5072 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5073 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5074 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5075 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5076 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5077 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5078 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5079 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5080 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5081 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5082 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5083 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5084 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5085 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5086 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5087 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5088 }
5089 
5090 static void
5091 test_retry_io_if_ctrlr_is_resetting(void)
5092 {
5093 	struct nvme_path_id path = {};
5094 	struct nvme_ctrlr_opts opts = {};
5095 	struct spdk_nvme_ctrlr *ctrlr;
5096 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5097 	struct nvme_ctrlr *nvme_ctrlr;
5098 	const int STRING_SIZE = 32;
5099 	const char *attached_names[STRING_SIZE];
5100 	struct nvme_bdev *bdev;
5101 	struct nvme_ns *nvme_ns;
5102 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5103 	struct spdk_io_channel *ch;
5104 	struct nvme_bdev_channel *nbdev_ch;
5105 	struct nvme_io_path *io_path;
5106 	struct nvme_qpair *nvme_qpair;
5107 	int rc;
5108 
5109 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5110 	ut_init_trid(&path.trid);
5111 
5112 	set_thread(0);
5113 
5114 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5115 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5116 
5117 	g_ut_attach_ctrlr_status = 0;
5118 	g_ut_attach_bdev_count = 1;
5119 
5120 	opts.ctrlr_loss_timeout_sec = -1;
5121 	opts.reconnect_delay_sec = 1;
5122 
5123 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5124 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5125 	CU_ASSERT(rc == 0);
5126 
5127 	spdk_delay_us(1000);
5128 	poll_threads();
5129 
5130 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5131 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5132 
5133 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5134 	CU_ASSERT(nvme_ctrlr != NULL);
5135 
5136 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5137 	CU_ASSERT(bdev != NULL);
5138 
5139 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5140 	CU_ASSERT(nvme_ns != NULL);
5141 
5142 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5143 	ut_bdev_io_set_buf(bdev_io1);
5144 
5145 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5146 	ut_bdev_io_set_buf(bdev_io2);
5147 
5148 	ch = spdk_get_io_channel(bdev);
5149 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5150 
5151 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5152 
5153 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5154 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5155 
5156 	nvme_qpair = io_path->qpair;
5157 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5158 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5159 
5160 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5161 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5162 
5163 	/* If qpair is connected, I/O should succeed. */
5164 	bdev_io1->internal.in_submit_request = true;
5165 
5166 	bdev_nvme_submit_request(ch, bdev_io1);
5167 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5168 
5169 	poll_threads();
5170 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5171 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5172 
5173 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5174 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5175 	 * while resetting the nvme_ctrlr.
5176 	 */
5177 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5178 	ctrlr->is_failed = true;
5179 
5180 	poll_thread_times(0, 5);
5181 
5182 	CU_ASSERT(nvme_qpair->qpair == NULL);
5183 	CU_ASSERT(nvme_ctrlr->resetting == true);
5184 	CU_ASSERT(ctrlr->is_failed == false);
5185 
5186 	bdev_io1->internal.in_submit_request = true;
5187 
5188 	bdev_nvme_submit_request(ch, bdev_io1);
5189 
5190 	spdk_delay_us(1);
5191 
5192 	bdev_io2->internal.in_submit_request = true;
5193 
5194 	bdev_nvme_submit_request(ch, bdev_io2);
5195 
5196 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5197 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5198 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5199 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5200 
5201 	poll_threads();
5202 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5203 	poll_threads();
5204 
5205 	CU_ASSERT(nvme_qpair->qpair != NULL);
5206 	CU_ASSERT(nvme_ctrlr->resetting == false);
5207 
5208 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5209 
5210 	poll_thread_times(0, 1);
5211 
5212 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5213 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5214 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5215 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5216 
5217 	poll_threads();
5218 
5219 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5220 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5221 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5222 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5223 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5224 
5225 	spdk_delay_us(1);
5226 
5227 	poll_thread_times(0, 1);
5228 
5229 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5230 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5231 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5232 
5233 	poll_threads();
5234 
5235 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5236 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5237 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5238 
5239 	free(bdev_io1);
5240 	free(bdev_io2);
5241 
5242 	spdk_put_io_channel(ch);
5243 
5244 	poll_threads();
5245 
5246 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5247 	CU_ASSERT(rc == 0);
5248 
5249 	poll_threads();
5250 	spdk_delay_us(1000);
5251 	poll_threads();
5252 
5253 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5254 }
5255 
5256 static void
5257 test_reconnect_ctrlr(void)
5258 {
5259 	struct spdk_nvme_transport_id trid = {};
5260 	struct spdk_nvme_ctrlr ctrlr = {};
5261 	struct nvme_ctrlr *nvme_ctrlr;
5262 	struct spdk_io_channel *ch1, *ch2;
5263 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5264 	int rc;
5265 
5266 	ut_init_trid(&trid);
5267 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5268 
5269 	set_thread(0);
5270 
5271 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5272 	CU_ASSERT(rc == 0);
5273 
5274 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5275 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5276 
5277 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5278 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5279 
5280 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5281 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5282 
5283 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5284 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5285 
5286 	set_thread(1);
5287 
5288 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5289 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5290 
5291 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5292 
5293 	/* Reset starts from thread 1. */
5294 	set_thread(1);
5295 
5296 	/* The reset should fail and a reconnect timer should be registered. */
5297 	ctrlr.fail_reset = true;
5298 	ctrlr.is_failed = true;
5299 
5300 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5301 	CU_ASSERT(rc == 0);
5302 	CU_ASSERT(nvme_ctrlr->resetting == true);
5303 	CU_ASSERT(ctrlr.is_failed == true);
5304 
5305 	poll_threads();
5306 
5307 	CU_ASSERT(nvme_ctrlr->resetting == false);
5308 	CU_ASSERT(ctrlr.is_failed == false);
5309 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5310 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5311 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5312 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5313 
5314 	/* A new reset starts from thread 0. */
5315 	set_thread(1);
5316 
5317 	/* The reset should cancel the reconnect timer and should start from reconnection.
5318 	 * Then, the reset should fail and a reconnect timer should be registered again.
5319 	 */
5320 	ctrlr.fail_reset = true;
5321 	ctrlr.is_failed = true;
5322 
5323 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5324 	CU_ASSERT(rc == 0);
5325 	CU_ASSERT(nvme_ctrlr->resetting == true);
5326 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5327 	CU_ASSERT(ctrlr.is_failed == true);
5328 
5329 	poll_threads();
5330 
5331 	CU_ASSERT(nvme_ctrlr->resetting == false);
5332 	CU_ASSERT(ctrlr.is_failed == false);
5333 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5334 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5335 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5336 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5337 
5338 	/* Then a reconnect retry should suceeed. */
5339 	ctrlr.fail_reset = false;
5340 
5341 	spdk_delay_us(SPDK_SEC_TO_USEC);
5342 	poll_thread_times(0, 1);
5343 
5344 	CU_ASSERT(nvme_ctrlr->resetting == true);
5345 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5346 
5347 	poll_threads();
5348 
5349 	CU_ASSERT(nvme_ctrlr->resetting == false);
5350 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5351 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5352 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5353 
5354 	/* The reset should fail and a reconnect timer should be registered. */
5355 	ctrlr.fail_reset = true;
5356 	ctrlr.is_failed = true;
5357 
5358 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5359 	CU_ASSERT(rc == 0);
5360 	CU_ASSERT(nvme_ctrlr->resetting == true);
5361 	CU_ASSERT(ctrlr.is_failed == true);
5362 
5363 	poll_threads();
5364 
5365 	CU_ASSERT(nvme_ctrlr->resetting == false);
5366 	CU_ASSERT(ctrlr.is_failed == false);
5367 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5368 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5369 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5370 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5371 
5372 	/* Then a reconnect retry should still fail. */
5373 	spdk_delay_us(SPDK_SEC_TO_USEC);
5374 	poll_thread_times(0, 1);
5375 
5376 	CU_ASSERT(nvme_ctrlr->resetting == true);
5377 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5378 
5379 	poll_threads();
5380 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5381 	poll_threads();
5382 
5383 	CU_ASSERT(nvme_ctrlr->resetting == false);
5384 	CU_ASSERT(ctrlr.is_failed == false);
5385 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5386 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5387 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5388 
5389 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5390 	spdk_delay_us(SPDK_SEC_TO_USEC);
5391 	poll_threads();
5392 
5393 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5394 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5395 	CU_ASSERT(nvme_ctrlr->destruct == true);
5396 
5397 	spdk_put_io_channel(ch2);
5398 
5399 	set_thread(0);
5400 
5401 	spdk_put_io_channel(ch1);
5402 
5403 	poll_threads();
5404 	spdk_delay_us(1000);
5405 	poll_threads();
5406 
5407 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5408 }
5409 
5410 static struct nvme_path_id *
5411 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5412 		       const struct spdk_nvme_transport_id *trid)
5413 {
5414 	struct nvme_path_id *p;
5415 
5416 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5417 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5418 			break;
5419 		}
5420 	}
5421 
5422 	return p;
5423 }
5424 
5425 static void
5426 test_retry_failover_ctrlr(void)
5427 {
5428 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5429 	struct spdk_nvme_ctrlr ctrlr = {};
5430 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5431 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5432 	struct spdk_io_channel *ch;
5433 	struct nvme_ctrlr_channel *ctrlr_ch;
5434 	int rc;
5435 
5436 	ut_init_trid(&trid1);
5437 	ut_init_trid2(&trid2);
5438 	ut_init_trid3(&trid3);
5439 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5440 
5441 	set_thread(0);
5442 
5443 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5444 	CU_ASSERT(rc == 0);
5445 
5446 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5447 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5448 
5449 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5450 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5451 
5452 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5453 	CU_ASSERT(rc == 0);
5454 
5455 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5456 	CU_ASSERT(rc == 0);
5457 
5458 	ch = spdk_get_io_channel(nvme_ctrlr);
5459 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5460 
5461 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5462 
5463 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5464 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5465 	CU_ASSERT(path_id1->last_failed_tsc == 0);
5466 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5467 
5468 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5469 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5470 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5471 
5472 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5473 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5474 
5475 	/* It is expected that connecting both of trid1, trid2, and trid3 fail,
5476 	 * and a reconnect timer is started. */
5477 	ctrlr.fail_reset = true;
5478 	ctrlr.is_failed = true;
5479 
5480 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5481 	CU_ASSERT(rc == 0);
5482 
5483 	poll_threads();
5484 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5485 	poll_threads();
5486 
5487 	CU_ASSERT(nvme_ctrlr->resetting == false);
5488 	CU_ASSERT(ctrlr.is_failed == false);
5489 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5490 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5491 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5492 	CU_ASSERT(path_id1->last_failed_tsc != 0);
5493 
5494 	CU_ASSERT(path_id2->last_failed_tsc != 0);
5495 	CU_ASSERT(path_id3->last_failed_tsc != 0);
5496 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5497 
5498 	/* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is
5499 	 * switched to trid2 but reset is not started.
5500 	 */
5501 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, true);
5502 	CU_ASSERT(rc == 0);
5503 
5504 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL);
5505 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5506 
5507 	CU_ASSERT(nvme_ctrlr->resetting == false);
5508 
5509 	/* If reconnect succeeds, trid2 should be the active path_id */
5510 	ctrlr.fail_reset = false;
5511 
5512 	spdk_delay_us(SPDK_SEC_TO_USEC);
5513 	poll_thread_times(0, 1);
5514 
5515 	CU_ASSERT(nvme_ctrlr->resetting == true);
5516 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5517 
5518 	poll_threads();
5519 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5520 	poll_threads();
5521 
5522 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL);
5523 	CU_ASSERT(path_id2->last_failed_tsc == 0);
5524 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5525 	CU_ASSERT(nvme_ctrlr->resetting == false);
5526 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5527 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5528 
5529 	spdk_put_io_channel(ch);
5530 
5531 	poll_threads();
5532 
5533 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5534 	CU_ASSERT(rc == 0);
5535 
5536 	poll_threads();
5537 	spdk_delay_us(1000);
5538 	poll_threads();
5539 
5540 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5541 }
5542 
5543 static void
5544 test_fail_path(void)
5545 {
5546 	struct nvme_path_id path = {};
5547 	struct nvme_ctrlr_opts opts = {};
5548 	struct spdk_nvme_ctrlr *ctrlr;
5549 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5550 	struct nvme_ctrlr *nvme_ctrlr;
5551 	const int STRING_SIZE = 32;
5552 	const char *attached_names[STRING_SIZE];
5553 	struct nvme_bdev *bdev;
5554 	struct nvme_ns *nvme_ns;
5555 	struct spdk_bdev_io *bdev_io;
5556 	struct spdk_io_channel *ch;
5557 	struct nvme_bdev_channel *nbdev_ch;
5558 	struct nvme_io_path *io_path;
5559 	struct nvme_ctrlr_channel *ctrlr_ch;
5560 	int rc;
5561 
5562 	/* The test scenario is the following.
5563 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5564 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5565 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5566 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5567 	 *   comes first. The queued I/O is failed.
5568 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5569 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5570 	 */
5571 
5572 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5573 	ut_init_trid(&path.trid);
5574 
5575 	set_thread(0);
5576 
5577 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5578 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5579 
5580 	g_ut_attach_ctrlr_status = 0;
5581 	g_ut_attach_bdev_count = 1;
5582 
5583 	opts.ctrlr_loss_timeout_sec = 4;
5584 	opts.reconnect_delay_sec = 1;
5585 	opts.fast_io_fail_timeout_sec = 2;
5586 
5587 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5588 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5589 	CU_ASSERT(rc == 0);
5590 
5591 	spdk_delay_us(1000);
5592 	poll_threads();
5593 
5594 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5595 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5596 
5597 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5598 	CU_ASSERT(nvme_ctrlr != NULL);
5599 
5600 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5601 	CU_ASSERT(bdev != NULL);
5602 
5603 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5604 	CU_ASSERT(nvme_ns != NULL);
5605 
5606 	ch = spdk_get_io_channel(bdev);
5607 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5608 
5609 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5610 
5611 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5612 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5613 
5614 	ctrlr_ch = io_path->qpair->ctrlr_ch;
5615 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5616 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
5617 
5618 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5619 	ut_bdev_io_set_buf(bdev_io);
5620 
5621 
5622 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5623 	ctrlr->fail_reset = true;
5624 	ctrlr->is_failed = true;
5625 
5626 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5627 	CU_ASSERT(rc == 0);
5628 	CU_ASSERT(nvme_ctrlr->resetting == true);
5629 	CU_ASSERT(ctrlr->is_failed == true);
5630 
5631 	poll_threads();
5632 
5633 	CU_ASSERT(nvme_ctrlr->resetting == false);
5634 	CU_ASSERT(ctrlr->is_failed == false);
5635 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5636 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5637 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5638 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5639 
5640 	/* I/O should be queued. */
5641 	bdev_io->internal.in_submit_request = true;
5642 
5643 	bdev_nvme_submit_request(ch, bdev_io);
5644 
5645 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5646 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5647 
5648 	/* After a second, the I/O should be still queued and the ctrlr should be
5649 	 * still recovering.
5650 	 */
5651 	spdk_delay_us(SPDK_SEC_TO_USEC);
5652 	poll_threads();
5653 
5654 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5655 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5656 
5657 	CU_ASSERT(nvme_ctrlr->resetting == false);
5658 	CU_ASSERT(ctrlr->is_failed == false);
5659 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5660 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5661 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5662 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5663 
5664 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5665 
5666 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5667 	spdk_delay_us(SPDK_SEC_TO_USEC);
5668 	poll_threads();
5669 
5670 	CU_ASSERT(nvme_ctrlr->resetting == false);
5671 	CU_ASSERT(ctrlr->is_failed == false);
5672 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5673 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5674 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5675 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5676 
5677 	/* Then within a second, pending I/O should be failed. */
5678 	spdk_delay_us(SPDK_SEC_TO_USEC);
5679 	poll_threads();
5680 
5681 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5682 	poll_threads();
5683 
5684 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5685 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5686 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5687 
5688 	/* Another I/O submission should be failed immediately. */
5689 	bdev_io->internal.in_submit_request = true;
5690 
5691 	bdev_nvme_submit_request(ch, bdev_io);
5692 
5693 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5694 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5695 
5696 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5697 	 * be deleted.
5698 	 */
5699 	spdk_delay_us(SPDK_SEC_TO_USEC);
5700 	poll_threads();
5701 
5702 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5703 	poll_threads();
5704 
5705 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5706 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5707 	CU_ASSERT(nvme_ctrlr->destruct == true);
5708 
5709 	spdk_put_io_channel(ch);
5710 
5711 	poll_threads();
5712 	spdk_delay_us(1000);
5713 	poll_threads();
5714 
5715 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5716 
5717 	free(bdev_io);
5718 }
5719 
5720 static void
5721 test_nvme_ns_cmp(void)
5722 {
5723 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
5724 
5725 	nvme_ns1.id = 0;
5726 	nvme_ns2.id = UINT32_MAX;
5727 
5728 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
5729 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
5730 }
5731 
5732 static void
5733 test_ana_transition(void)
5734 {
5735 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
5736 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
5737 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
5738 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
5739 
5740 	/* case 1: ANA transition timedout is canceled. */
5741 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5742 	nvme_ns.ana_transition_timedout = true;
5743 
5744 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5745 
5746 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5747 
5748 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
5749 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5750 
5751 	/* case 2: ANATT timer is kept. */
5752 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5753 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
5754 			      &nvme_ns,
5755 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5756 
5757 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5758 
5759 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5760 
5761 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5762 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
5763 
5764 	/* case 3: ANATT timer is stopped. */
5765 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5766 
5767 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5768 
5769 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5770 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5771 
5772 	/* ANATT timer is started. */
5773 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5774 
5775 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5776 
5777 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5778 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
5779 
5780 	/* ANATT timer is expired. */
5781 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5782 
5783 	poll_threads();
5784 
5785 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5786 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
5787 }
5788 
5789 static void
5790 _set_preferred_path_cb(void *cb_arg, int rc)
5791 {
5792 	bool *done = cb_arg;
5793 
5794 	*done = true;
5795 }
5796 
5797 static void
5798 test_set_preferred_path(void)
5799 {
5800 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
5801 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
5802 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5803 	const int STRING_SIZE = 32;
5804 	const char *attached_names[STRING_SIZE];
5805 	struct nvme_bdev *bdev;
5806 	struct spdk_io_channel *ch;
5807 	struct nvme_bdev_channel *nbdev_ch;
5808 	struct nvme_io_path *io_path;
5809 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
5810 	const struct spdk_nvme_ctrlr_data *cdata;
5811 	bool done;
5812 	int rc;
5813 
5814 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5815 	ut_init_trid(&path1.trid);
5816 	ut_init_trid2(&path2.trid);
5817 	ut_init_trid3(&path3.trid);
5818 	g_ut_attach_ctrlr_status = 0;
5819 	g_ut_attach_bdev_count = 1;
5820 
5821 	set_thread(0);
5822 
5823 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
5824 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
5825 
5826 	ctrlr1->ns[0].uuid = &uuid1;
5827 
5828 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
5829 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5830 	CU_ASSERT(rc == 0);
5831 
5832 	spdk_delay_us(1000);
5833 	poll_threads();
5834 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5835 	poll_threads();
5836 
5837 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
5838 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
5839 
5840 	ctrlr2->ns[0].uuid = &uuid1;
5841 
5842 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
5843 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5844 	CU_ASSERT(rc == 0);
5845 
5846 	spdk_delay_us(1000);
5847 	poll_threads();
5848 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5849 	poll_threads();
5850 
5851 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
5852 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
5853 
5854 	ctrlr3->ns[0].uuid = &uuid1;
5855 
5856 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
5857 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5858 	CU_ASSERT(rc == 0);
5859 
5860 	spdk_delay_us(1000);
5861 	poll_threads();
5862 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5863 	poll_threads();
5864 
5865 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5866 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5867 
5868 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5869 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
5870 
5871 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
5872 
5873 	ch = spdk_get_io_channel(bdev);
5874 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5875 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5876 
5877 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5878 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5879 
5880 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
5881 
5882 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
5883 	 * should return io_path to ctrlr2.
5884 	 */
5885 
5886 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
5887 	done = false;
5888 
5889 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5890 
5891 	poll_threads();
5892 	CU_ASSERT(done == true);
5893 
5894 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5895 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5896 
5897 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
5898 
5899 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
5900 	 * acquired, find_io_path() should return io_path to ctrlr3.
5901 	 */
5902 
5903 	spdk_put_io_channel(ch);
5904 
5905 	poll_threads();
5906 
5907 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
5908 	done = false;
5909 
5910 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5911 
5912 	poll_threads();
5913 	CU_ASSERT(done == true);
5914 
5915 	ch = spdk_get_io_channel(bdev);
5916 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5917 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5918 
5919 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5920 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5921 
5922 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
5923 
5924 	spdk_put_io_channel(ch);
5925 
5926 	poll_threads();
5927 
5928 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
5929 	CU_ASSERT(rc == 0);
5930 
5931 	poll_threads();
5932 	spdk_delay_us(1000);
5933 	poll_threads();
5934 
5935 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5936 }
5937 
5938 static void
5939 test_find_next_io_path(void)
5940 {
5941 	struct nvme_bdev_channel nbdev_ch = {
5942 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
5943 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
5944 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
5945 	};
5946 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
5947 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
5948 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
5949 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
5950 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
5951 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
5952 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
5953 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
5954 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
5955 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
5956 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
5957 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
5958 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
5959 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
5960 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
5961 
5962 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
5963 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
5964 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
5965 
5966 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
5967 	 * is covered in test_find_io_path.
5968 	 */
5969 
5970 	nbdev_ch.current_io_path = &io_path2;
5971 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5972 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5973 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5974 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5975 
5976 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5977 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5978 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5979 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5980 
5981 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5982 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5983 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5984 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5985 
5986 	nbdev_ch.current_io_path = &io_path3;
5987 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5988 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5989 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5990 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5991 
5992 	/* Test if next io_path is selected according to rr_min_io */
5993 
5994 	nbdev_ch.current_io_path = NULL;
5995 	nbdev_ch.rr_min_io = 2;
5996 	nbdev_ch.rr_counter = 0;
5997 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5998 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5999 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6000 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6001 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6002 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6003 
6004 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6005 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6006 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6007 }
6008 
6009 static void
6010 test_find_io_path_min_qd(void)
6011 {
6012 	struct nvme_bdev_channel nbdev_ch = {
6013 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6014 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6015 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
6016 	};
6017 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6018 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6019 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6020 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6021 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6022 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6023 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6024 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6025 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6026 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6027 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6028 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
6029 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6030 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6031 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6032 
6033 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6034 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6035 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6036 
6037 	/* Test if the minumum io_outstanding or the ANA optimized state is
6038 	 * prioritized when using least queue depth selector
6039 	 */
6040 	qpair1.num_outstanding_reqs = 2;
6041 	qpair2.num_outstanding_reqs = 1;
6042 	qpair3.num_outstanding_reqs = 0;
6043 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6044 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6045 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6046 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6047 
6048 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6049 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6050 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6051 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6052 
6053 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6054 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6055 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6056 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6057 
6058 	qpair2.num_outstanding_reqs = 4;
6059 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6060 }
6061 
6062 static void
6063 test_disable_auto_failback(void)
6064 {
6065 	struct nvme_path_id path1 = {}, path2 = {};
6066 	struct nvme_ctrlr_opts opts = {};
6067 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6068 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6069 	struct nvme_ctrlr *nvme_ctrlr1;
6070 	const int STRING_SIZE = 32;
6071 	const char *attached_names[STRING_SIZE];
6072 	struct nvme_bdev *bdev;
6073 	struct spdk_io_channel *ch;
6074 	struct nvme_bdev_channel *nbdev_ch;
6075 	struct nvme_io_path *io_path;
6076 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6077 	const struct spdk_nvme_ctrlr_data *cdata;
6078 	bool done;
6079 	int rc;
6080 
6081 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6082 	ut_init_trid(&path1.trid);
6083 	ut_init_trid2(&path2.trid);
6084 	g_ut_attach_ctrlr_status = 0;
6085 	g_ut_attach_bdev_count = 1;
6086 
6087 	g_opts.disable_auto_failback = true;
6088 
6089 	opts.ctrlr_loss_timeout_sec = -1;
6090 	opts.reconnect_delay_sec = 1;
6091 
6092 	set_thread(0);
6093 
6094 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6095 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6096 
6097 	ctrlr1->ns[0].uuid = &uuid1;
6098 
6099 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6100 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6101 	CU_ASSERT(rc == 0);
6102 
6103 	spdk_delay_us(1000);
6104 	poll_threads();
6105 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6106 	poll_threads();
6107 
6108 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6109 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6110 
6111 	ctrlr2->ns[0].uuid = &uuid1;
6112 
6113 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6114 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6115 	CU_ASSERT(rc == 0);
6116 
6117 	spdk_delay_us(1000);
6118 	poll_threads();
6119 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6120 	poll_threads();
6121 
6122 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6123 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6124 
6125 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6126 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6127 
6128 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
6129 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6130 
6131 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6132 
6133 	ch = spdk_get_io_channel(bdev);
6134 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6135 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6136 
6137 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6138 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6139 
6140 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6141 
6142 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6143 	ctrlr1->fail_reset = true;
6144 	ctrlr1->is_failed = true;
6145 
6146 	bdev_nvme_reset_ctrlr(nvme_ctrlr1);
6147 
6148 	poll_threads();
6149 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6150 	poll_threads();
6151 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6152 	poll_threads();
6153 
6154 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6155 
6156 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6157 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6158 
6159 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6160 
6161 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6162 	 * Hence, io_path to ctrlr2 should still be used.
6163 	 */
6164 	ctrlr1->fail_reset = false;
6165 
6166 	spdk_delay_us(SPDK_SEC_TO_USEC);
6167 	poll_threads();
6168 
6169 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6170 
6171 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6172 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6173 
6174 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6175 
6176 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6177 	 * be used again.
6178 	 */
6179 
6180 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6181 	done = false;
6182 
6183 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6184 
6185 	poll_threads();
6186 	CU_ASSERT(done == true);
6187 
6188 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6189 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6190 
6191 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6192 
6193 	spdk_put_io_channel(ch);
6194 
6195 	poll_threads();
6196 
6197 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6198 	CU_ASSERT(rc == 0);
6199 
6200 	poll_threads();
6201 	spdk_delay_us(1000);
6202 	poll_threads();
6203 
6204 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6205 
6206 	g_opts.disable_auto_failback = false;
6207 }
6208 
6209 static void
6210 ut_set_multipath_policy_done(void *cb_arg, int rc)
6211 {
6212 	int *done = cb_arg;
6213 
6214 	SPDK_CU_ASSERT_FATAL(done != NULL);
6215 	*done = rc;
6216 }
6217 
6218 static void
6219 test_set_multipath_policy(void)
6220 {
6221 	struct nvme_path_id path1 = {}, path2 = {};
6222 	struct nvme_ctrlr_opts opts = {};
6223 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6224 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6225 	const int STRING_SIZE = 32;
6226 	const char *attached_names[STRING_SIZE];
6227 	struct nvme_bdev *bdev;
6228 	struct spdk_io_channel *ch;
6229 	struct nvme_bdev_channel *nbdev_ch;
6230 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6231 	int done;
6232 	int rc;
6233 
6234 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6235 	ut_init_trid(&path1.trid);
6236 	ut_init_trid2(&path2.trid);
6237 	g_ut_attach_ctrlr_status = 0;
6238 	g_ut_attach_bdev_count = 1;
6239 
6240 	g_opts.disable_auto_failback = true;
6241 
6242 	opts.ctrlr_loss_timeout_sec = -1;
6243 	opts.reconnect_delay_sec = 1;
6244 
6245 	set_thread(0);
6246 
6247 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6248 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6249 
6250 	ctrlr1->ns[0].uuid = &uuid1;
6251 
6252 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6253 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6254 	CU_ASSERT(rc == 0);
6255 
6256 	spdk_delay_us(1000);
6257 	poll_threads();
6258 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6259 	poll_threads();
6260 
6261 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6262 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6263 
6264 	ctrlr2->ns[0].uuid = &uuid1;
6265 
6266 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6267 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6268 	CU_ASSERT(rc == 0);
6269 
6270 	spdk_delay_us(1000);
6271 	poll_threads();
6272 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6273 	poll_threads();
6274 
6275 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6276 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6277 
6278 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6279 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6280 
6281 	/* If multipath policy is updated before getting any I/O channel,
6282 	 * an new I/O channel should have the update.
6283 	 */
6284 	done = -1;
6285 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6286 				       BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6287 				       ut_set_multipath_policy_done, &done);
6288 	poll_threads();
6289 	CU_ASSERT(done == 0);
6290 
6291 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6292 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6293 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6294 
6295 	ch = spdk_get_io_channel(bdev);
6296 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6297 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6298 
6299 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6300 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6301 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6302 
6303 	/* If multipath policy is updated while a I/O channel is active,
6304 	 * the update should be applied to the I/O channel immediately.
6305 	 */
6306 	done = -1;
6307 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6308 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6309 				       ut_set_multipath_policy_done, &done);
6310 	poll_threads();
6311 	CU_ASSERT(done == 0);
6312 
6313 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6314 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6315 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6316 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6317 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6318 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6319 
6320 	spdk_put_io_channel(ch);
6321 
6322 	poll_threads();
6323 
6324 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6325 	CU_ASSERT(rc == 0);
6326 
6327 	poll_threads();
6328 	spdk_delay_us(1000);
6329 	poll_threads();
6330 
6331 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6332 }
6333 
6334 static void
6335 test_uuid_generation(void)
6336 {
6337 	uint32_t nsid1 = 1, nsid2 = 2;
6338 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6339 	char sn3[21] = "                    ";
6340 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6341 	struct spdk_uuid uuid1, uuid2;
6342 
6343 	/* Test case 1:
6344 	 * Serial numbers are the same, nsids are different.
6345 	 * Compare two generated UUID - they should be different. */
6346 	uuid1 = nvme_generate_uuid(sn1, nsid1);
6347 	uuid2 = nvme_generate_uuid(sn1, nsid2);
6348 
6349 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6350 
6351 	/* Test case 2:
6352 	 * Serial numbers differ only by one character, nsids are the same.
6353 	 * Compare two generated UUID - they should be different. */
6354 	uuid1 = nvme_generate_uuid(sn1, nsid1);
6355 	uuid2 = nvme_generate_uuid(sn2, nsid1);
6356 
6357 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6358 
6359 	/* Test case 3:
6360 	 * Serial number comprises only of space characters.
6361 	 * Validate the generated UUID. */
6362 	uuid1 = nvme_generate_uuid(sn3, nsid1);
6363 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6364 }
6365 
6366 static void
6367 test_retry_io_to_same_path(void)
6368 {
6369 	struct nvme_path_id path1 = {}, path2 = {};
6370 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6371 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6372 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6373 	const int STRING_SIZE = 32;
6374 	const char *attached_names[STRING_SIZE];
6375 	struct nvme_bdev *bdev;
6376 	struct spdk_bdev_io *bdev_io;
6377 	struct nvme_bdev_io *bio;
6378 	struct spdk_io_channel *ch;
6379 	struct nvme_bdev_channel *nbdev_ch;
6380 	struct nvme_io_path *io_path1, *io_path2;
6381 	struct ut_nvme_req *req;
6382 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6383 	int done;
6384 	int rc;
6385 
6386 	g_opts.nvme_ioq_poll_period_us = 1;
6387 
6388 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6389 	ut_init_trid(&path1.trid);
6390 	ut_init_trid2(&path2.trid);
6391 	g_ut_attach_ctrlr_status = 0;
6392 	g_ut_attach_bdev_count = 1;
6393 
6394 	set_thread(0);
6395 
6396 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6397 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6398 
6399 	ctrlr1->ns[0].uuid = &uuid1;
6400 
6401 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6402 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6403 	CU_ASSERT(rc == 0);
6404 
6405 	spdk_delay_us(1000);
6406 	poll_threads();
6407 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6408 	poll_threads();
6409 
6410 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6411 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6412 
6413 	ctrlr2->ns[0].uuid = &uuid1;
6414 
6415 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6416 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6417 	CU_ASSERT(rc == 0);
6418 
6419 	spdk_delay_us(1000);
6420 	poll_threads();
6421 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6422 	poll_threads();
6423 
6424 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6425 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6426 
6427 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
6428 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6429 
6430 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
6431 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6432 
6433 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6434 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6435 
6436 	done = -1;
6437 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6438 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6439 	poll_threads();
6440 	CU_ASSERT(done == 0);
6441 
6442 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6443 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6444 	CU_ASSERT(bdev->rr_min_io == 1);
6445 
6446 	ch = spdk_get_io_channel(bdev);
6447 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6448 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6449 
6450 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6451 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6452 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6453 
6454 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6455 	ut_bdev_io_set_buf(bdev_io);
6456 
6457 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6458 
6459 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6460 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6461 
6462 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6463 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6464 
6465 	/* The 1st I/O should be submitted to io_path1. */
6466 	bdev_io->internal.in_submit_request = true;
6467 
6468 	bdev_nvme_submit_request(ch, bdev_io);
6469 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6470 	CU_ASSERT(bio->io_path == io_path1);
6471 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6472 
6473 	spdk_delay_us(1);
6474 
6475 	poll_threads();
6476 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6477 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6478 
6479 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6480 	 * policy is round-robin.
6481 	 */
6482 	bdev_io->internal.in_submit_request = true;
6483 
6484 	bdev_nvme_submit_request(ch, bdev_io);
6485 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6486 	CU_ASSERT(bio->io_path == io_path2);
6487 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6488 
6489 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6490 	SPDK_CU_ASSERT_FATAL(req != NULL);
6491 
6492 	/* Set retry count to non-zero. */
6493 	g_opts.bdev_retry_count = 2;
6494 
6495 	/* Inject an I/O error. */
6496 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6497 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6498 
6499 	/* The 2nd I/O should be queued to nbdev_ch. */
6500 	spdk_delay_us(1);
6501 	poll_thread_times(0, 1);
6502 
6503 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6504 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6505 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
6506 
6507 	/* The 2nd I/O should keep caching io_path2. */
6508 	CU_ASSERT(bio->io_path == io_path2);
6509 
6510 	/* The 2nd I/O should be submitted to io_path2 again. */
6511 	poll_thread_times(0, 1);
6512 
6513 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6514 	CU_ASSERT(bio->io_path == io_path2);
6515 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6516 
6517 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6518 	SPDK_CU_ASSERT_FATAL(req != NULL);
6519 
6520 	/* Inject an I/O error again. */
6521 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6522 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6523 	req->cpl.status.crd = 1;
6524 
6525 	ctrlr2->cdata.crdt[1] = 1;
6526 
6527 	/* The 2nd I/O should be queued to nbdev_ch. */
6528 	spdk_delay_us(1);
6529 	poll_thread_times(0, 1);
6530 
6531 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6532 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6533 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
6534 
6535 	/* The 2nd I/O should keep caching io_path2. */
6536 	CU_ASSERT(bio->io_path == io_path2);
6537 
6538 	/* Detach ctrlr2 dynamically. */
6539 	rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL);
6540 	CU_ASSERT(rc == 0);
6541 
6542 	spdk_delay_us(1000);
6543 	poll_threads();
6544 	spdk_delay_us(1000);
6545 	poll_threads();
6546 	spdk_delay_us(1000);
6547 	poll_threads();
6548 	spdk_delay_us(1000);
6549 	poll_threads();
6550 
6551 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
6552 
6553 	poll_threads();
6554 	spdk_delay_us(100000);
6555 	poll_threads();
6556 	spdk_delay_us(1);
6557 	poll_threads();
6558 
6559 	/* The 2nd I/O should succeed by io_path1. */
6560 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6561 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6562 	CU_ASSERT(bio->io_path == io_path1);
6563 
6564 	free(bdev_io);
6565 
6566 	spdk_put_io_channel(ch);
6567 
6568 	poll_threads();
6569 	spdk_delay_us(1);
6570 	poll_threads();
6571 
6572 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6573 	CU_ASSERT(rc == 0);
6574 
6575 	poll_threads();
6576 	spdk_delay_us(1000);
6577 	poll_threads();
6578 
6579 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
6580 
6581 	g_opts.nvme_ioq_poll_period_us = 0;
6582 	g_opts.bdev_retry_count = 0;
6583 }
6584 
6585 /* This case is to verify a fix for a complex race condition that
6586  * failover is lost if fabric connect command gets timeout while
6587  * controller is being reset.
6588  */
6589 static void
6590 test_race_between_reset_and_disconnected(void)
6591 {
6592 	struct spdk_nvme_transport_id trid = {};
6593 	struct spdk_nvme_ctrlr ctrlr = {};
6594 	struct nvme_ctrlr *nvme_ctrlr = NULL;
6595 	struct nvme_path_id *curr_trid;
6596 	struct spdk_io_channel *ch1, *ch2;
6597 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
6598 	int rc;
6599 
6600 	ut_init_trid(&trid);
6601 	TAILQ_INIT(&ctrlr.active_io_qpairs);
6602 
6603 	set_thread(0);
6604 
6605 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
6606 	CU_ASSERT(rc == 0);
6607 
6608 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
6609 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
6610 
6611 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
6612 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
6613 
6614 	ch1 = spdk_get_io_channel(nvme_ctrlr);
6615 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
6616 
6617 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
6618 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6619 
6620 	set_thread(1);
6621 
6622 	ch2 = spdk_get_io_channel(nvme_ctrlr);
6623 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
6624 
6625 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
6626 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6627 
6628 	/* Reset starts from thread 1. */
6629 	set_thread(1);
6630 
6631 	nvme_ctrlr->resetting = false;
6632 	curr_trid->last_failed_tsc = spdk_get_ticks();
6633 	ctrlr.is_failed = true;
6634 
6635 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
6636 	CU_ASSERT(rc == 0);
6637 	CU_ASSERT(nvme_ctrlr->resetting == true);
6638 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6639 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6640 
6641 	poll_thread_times(0, 3);
6642 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
6643 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6644 
6645 	poll_thread_times(0, 1);
6646 	poll_thread_times(1, 1);
6647 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
6648 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
6649 	CU_ASSERT(ctrlr.is_failed == true);
6650 
6651 	poll_thread_times(1, 1);
6652 	poll_thread_times(0, 1);
6653 	CU_ASSERT(ctrlr.is_failed == false);
6654 	CU_ASSERT(ctrlr.adminq.is_connected == false);
6655 
6656 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6657 	poll_thread_times(0, 2);
6658 	CU_ASSERT(ctrlr.adminq.is_connected == true);
6659 
6660 	poll_thread_times(0, 1);
6661 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6662 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
6663 
6664 	poll_thread_times(1, 1);
6665 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6666 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6667 	CU_ASSERT(nvme_ctrlr->resetting == true);
6668 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
6669 
6670 	poll_thread_times(0, 2);
6671 	CU_ASSERT(nvme_ctrlr->resetting == true);
6672 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6673 	poll_thread_times(1, 1);
6674 	CU_ASSERT(nvme_ctrlr->resetting == true);
6675 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6676 
6677 	/* Here is just one poll before _bdev_nvme_reset_complete() is executed.
6678 	 *
6679 	 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric
6680 	 * connect command is executed. If fabric connect command gets timeout,
6681 	 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until
6682 	 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false.
6683 	 *
6684 	 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr().
6685 	 */
6686 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
6687 	CU_ASSERT(rc == -EINPROGRESS);
6688 	CU_ASSERT(nvme_ctrlr->resetting == true);
6689 	CU_ASSERT(nvme_ctrlr->pending_failover == true);
6690 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6691 
6692 	poll_thread_times(0, 1);
6693 
6694 	CU_ASSERT(nvme_ctrlr->resetting == true);
6695 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6696 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
6697 
6698 	poll_threads();
6699 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6700 	poll_threads();
6701 
6702 	CU_ASSERT(nvme_ctrlr->resetting == false);
6703 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6704 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6705 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6706 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6707 
6708 	spdk_put_io_channel(ch2);
6709 
6710 	set_thread(0);
6711 
6712 	spdk_put_io_channel(ch1);
6713 
6714 	poll_threads();
6715 
6716 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6717 	CU_ASSERT(rc == 0);
6718 
6719 	poll_threads();
6720 	spdk_delay_us(1000);
6721 	poll_threads();
6722 
6723 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6724 }
6725 static void
6726 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
6727 {
6728 	int *_rc = (int *)cb_arg;
6729 
6730 	SPDK_CU_ASSERT_FATAL(_rc != NULL);
6731 	*_rc = rc;
6732 }
6733 
6734 static void
6735 test_ctrlr_op_rpc(void)
6736 {
6737 	struct spdk_nvme_transport_id trid = {};
6738 	struct spdk_nvme_ctrlr ctrlr = {};
6739 	struct nvme_ctrlr *nvme_ctrlr = NULL;
6740 	struct nvme_path_id *curr_trid;
6741 	struct spdk_io_channel *ch1, *ch2;
6742 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
6743 	int ctrlr_op_rc;
6744 	int rc;
6745 
6746 	ut_init_trid(&trid);
6747 	TAILQ_INIT(&ctrlr.active_io_qpairs);
6748 
6749 	set_thread(0);
6750 
6751 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
6752 	CU_ASSERT(rc == 0);
6753 
6754 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
6755 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
6756 
6757 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
6758 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
6759 
6760 	ch1 = spdk_get_io_channel(nvme_ctrlr);
6761 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
6762 
6763 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
6764 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6765 
6766 	set_thread(1);
6767 
6768 	ch2 = spdk_get_io_channel(nvme_ctrlr);
6769 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
6770 
6771 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
6772 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6773 
6774 	/* Reset starts from thread 1. */
6775 	set_thread(1);
6776 
6777 	/* Case 1: ctrlr is already being destructed. */
6778 	nvme_ctrlr->destruct = true;
6779 	ctrlr_op_rc = 0;
6780 
6781 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6782 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6783 
6784 	poll_threads();
6785 
6786 	CU_ASSERT(ctrlr_op_rc == -ENXIO);
6787 
6788 	/* Case 2: reset is in progress. */
6789 	nvme_ctrlr->destruct = false;
6790 	nvme_ctrlr->resetting = true;
6791 	ctrlr_op_rc = 0;
6792 
6793 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6794 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6795 
6796 	poll_threads();
6797 
6798 	CU_ASSERT(ctrlr_op_rc == -EBUSY);
6799 
6800 	/* Case 3: reset completes successfully. */
6801 	nvme_ctrlr->resetting = false;
6802 	curr_trid->last_failed_tsc = spdk_get_ticks();
6803 	ctrlr.is_failed = true;
6804 	ctrlr_op_rc = -1;
6805 
6806 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6807 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6808 
6809 	CU_ASSERT(nvme_ctrlr->resetting == true);
6810 	CU_ASSERT(ctrlr_op_rc == -1);
6811 
6812 	poll_threads();
6813 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6814 	poll_threads();
6815 
6816 	CU_ASSERT(nvme_ctrlr->resetting == false);
6817 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6818 	CU_ASSERT(ctrlr.is_failed == false);
6819 	CU_ASSERT(ctrlr_op_rc == 0);
6820 
6821 	/* Case 4: invalid operation. */
6822 	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
6823 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6824 
6825 	poll_threads();
6826 
6827 	CU_ASSERT(ctrlr_op_rc == -EINVAL);
6828 
6829 	spdk_put_io_channel(ch2);
6830 
6831 	set_thread(0);
6832 
6833 	spdk_put_io_channel(ch1);
6834 
6835 	poll_threads();
6836 
6837 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
6838 	CU_ASSERT(rc == 0);
6839 
6840 	poll_threads();
6841 	spdk_delay_us(1000);
6842 	poll_threads();
6843 
6844 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6845 }
6846 
6847 static void
6848 test_bdev_ctrlr_op_rpc(void)
6849 {
6850 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
6851 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
6852 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6853 	struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL;
6854 	struct nvme_path_id *curr_trid1, *curr_trid2;
6855 	struct spdk_io_channel *ch11, *ch12, *ch21, *ch22;
6856 	struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22;
6857 	int ctrlr_op_rc;
6858 	int rc;
6859 
6860 	ut_init_trid(&trid1);
6861 	ut_init_trid2(&trid2);
6862 	TAILQ_INIT(&ctrlr1.active_io_qpairs);
6863 	TAILQ_INIT(&ctrlr2.active_io_qpairs);
6864 	ctrlr1.cdata.cmic.multi_ctrlr = 1;
6865 	ctrlr2.cdata.cmic.multi_ctrlr = 1;
6866 	ctrlr1.cdata.cntlid = 1;
6867 	ctrlr2.cdata.cntlid = 2;
6868 	ctrlr1.adminq.is_connected = true;
6869 	ctrlr2.adminq.is_connected = true;
6870 
6871 	set_thread(0);
6872 
6873 	rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL);
6874 	CU_ASSERT(rc == 0);
6875 
6876 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6877 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6878 
6879 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1);
6880 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6881 
6882 	curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
6883 	SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL);
6884 
6885 	ch11 = spdk_get_io_channel(nvme_ctrlr1);
6886 	SPDK_CU_ASSERT_FATAL(ch11 != NULL);
6887 
6888 	ctrlr_ch11 = spdk_io_channel_get_ctx(ch11);
6889 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
6890 
6891 	set_thread(1);
6892 
6893 	ch12 = spdk_get_io_channel(nvme_ctrlr1);
6894 	SPDK_CU_ASSERT_FATAL(ch12 != NULL);
6895 
6896 	ctrlr_ch12 = spdk_io_channel_get_ctx(ch12);
6897 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
6898 
6899 	set_thread(0);
6900 
6901 	rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL);
6902 	CU_ASSERT(rc == 0);
6903 
6904 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2);
6905 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6906 
6907 	curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
6908 	SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL);
6909 
6910 	ch21 = spdk_get_io_channel(nvme_ctrlr2);
6911 	SPDK_CU_ASSERT_FATAL(ch21 != NULL);
6912 
6913 	ctrlr_ch21 = spdk_io_channel_get_ctx(ch21);
6914 	CU_ASSERT(ctrlr_ch21->qpair != NULL);
6915 
6916 	set_thread(1);
6917 
6918 	ch22 = spdk_get_io_channel(nvme_ctrlr2);
6919 	SPDK_CU_ASSERT_FATAL(ch22 != NULL);
6920 
6921 	ctrlr_ch22 = spdk_io_channel_get_ctx(ch22);
6922 	CU_ASSERT(ctrlr_ch22->qpair != NULL);
6923 
6924 	/* Reset starts from thread 1. */
6925 	set_thread(1);
6926 
6927 	nvme_ctrlr1->resetting = false;
6928 	nvme_ctrlr2->resetting = false;
6929 	curr_trid1->last_failed_tsc = spdk_get_ticks();
6930 	curr_trid2->last_failed_tsc = spdk_get_ticks();
6931 	ctrlr_op_rc = -1;
6932 
6933 	nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET,
6934 			       ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6935 
6936 	CU_ASSERT(nvme_ctrlr1->resetting == true);
6937 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
6938 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
6939 	CU_ASSERT(nvme_ctrlr2->resetting == false);
6940 
6941 	poll_thread_times(0, 3);
6942 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
6943 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
6944 
6945 	poll_thread_times(0, 1);
6946 	poll_thread_times(1, 1);
6947 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
6948 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
6949 
6950 	poll_thread_times(1, 1);
6951 	poll_thread_times(0, 1);
6952 	CU_ASSERT(ctrlr1.adminq.is_connected == false);
6953 
6954 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6955 	poll_thread_times(0, 2);
6956 	CU_ASSERT(ctrlr1.adminq.is_connected == true);
6957 
6958 	poll_thread_times(0, 1);
6959 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
6960 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
6961 
6962 	poll_thread_times(1, 1);
6963 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
6964 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
6965 	CU_ASSERT(nvme_ctrlr1->resetting == true);
6966 	CU_ASSERT(curr_trid1->last_failed_tsc != 0);
6967 
6968 	poll_thread_times(0, 2);
6969 	poll_thread_times(1, 1);
6970 	poll_thread_times(0, 1);
6971 	poll_thread_times(1, 1);
6972 	poll_thread_times(0, 1);
6973 	poll_thread_times(1, 1);
6974 	poll_thread_times(0, 1);
6975 
6976 	CU_ASSERT(nvme_ctrlr1->resetting == false);
6977 	CU_ASSERT(curr_trid1->last_failed_tsc == 0);
6978 	CU_ASSERT(nvme_ctrlr2->resetting == true);
6979 
6980 	poll_threads();
6981 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6982 	poll_threads();
6983 
6984 	CU_ASSERT(nvme_ctrlr2->resetting == false);
6985 	CU_ASSERT(ctrlr_op_rc == 0);
6986 
6987 	set_thread(1);
6988 
6989 	spdk_put_io_channel(ch12);
6990 	spdk_put_io_channel(ch22);
6991 
6992 	set_thread(0);
6993 
6994 	spdk_put_io_channel(ch11);
6995 	spdk_put_io_channel(ch21);
6996 
6997 	poll_threads();
6998 
6999 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7000 	CU_ASSERT(rc == 0);
7001 
7002 	poll_threads();
7003 	spdk_delay_us(1000);
7004 	poll_threads();
7005 
7006 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
7007 }
7008 
7009 static void
7010 test_disable_enable_ctrlr(void)
7011 {
7012 	struct spdk_nvme_transport_id trid = {};
7013 	struct spdk_nvme_ctrlr ctrlr = {};
7014 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7015 	struct nvme_path_id *curr_trid;
7016 	struct spdk_io_channel *ch1, *ch2;
7017 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7018 	int rc;
7019 
7020 	ut_init_trid(&trid);
7021 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7022 	ctrlr.adminq.is_connected = true;
7023 
7024 	set_thread(0);
7025 
7026 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7027 	CU_ASSERT(rc == 0);
7028 
7029 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7030 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7031 
7032 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7033 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7034 
7035 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7036 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7037 
7038 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7039 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7040 
7041 	set_thread(1);
7042 
7043 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7044 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7045 
7046 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7047 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7048 
7049 	/* Disable starts from thread 1. */
7050 	set_thread(1);
7051 
7052 	/* Case 1: ctrlr is already disabled. */
7053 	nvme_ctrlr->disabled = true;
7054 
7055 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7056 	CU_ASSERT(rc == -EALREADY);
7057 
7058 	/* Case 2: ctrlr is already being destructed. */
7059 	nvme_ctrlr->disabled = false;
7060 	nvme_ctrlr->destruct = true;
7061 
7062 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7063 	CU_ASSERT(rc == -ENXIO);
7064 
7065 	/* Case 3: reset is in progress. */
7066 	nvme_ctrlr->destruct = false;
7067 	nvme_ctrlr->resetting = true;
7068 
7069 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7070 	CU_ASSERT(rc == -EBUSY);
7071 
7072 	/* Case 4: disable completes successfully. */
7073 	nvme_ctrlr->resetting = false;
7074 
7075 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7076 	CU_ASSERT(rc == 0);
7077 	CU_ASSERT(nvme_ctrlr->resetting == true);
7078 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7079 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7080 
7081 	poll_thread_times(0, 3);
7082 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7083 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7084 
7085 	poll_thread_times(0, 1);
7086 	poll_thread_times(1, 1);
7087 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7088 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7089 
7090 	poll_thread_times(1, 1);
7091 	poll_thread_times(0, 1);
7092 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7093 	poll_thread_times(1, 1);
7094 	poll_thread_times(0, 1);
7095 	poll_thread_times(1, 1);
7096 	poll_thread_times(0, 1);
7097 	CU_ASSERT(nvme_ctrlr->resetting == false);
7098 	CU_ASSERT(nvme_ctrlr->disabled == true);
7099 
7100 	/* Case 5: enable completes successfully. */
7101 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7102 	CU_ASSERT(rc == 0);
7103 
7104 	CU_ASSERT(nvme_ctrlr->resetting == true);
7105 	CU_ASSERT(nvme_ctrlr->disabled == false);
7106 
7107 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7108 	poll_thread_times(0, 2);
7109 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7110 
7111 	poll_thread_times(0, 1);
7112 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7113 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7114 
7115 	poll_thread_times(1, 1);
7116 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7117 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7118 	CU_ASSERT(nvme_ctrlr->resetting == true);
7119 
7120 	poll_thread_times(0, 2);
7121 	CU_ASSERT(nvme_ctrlr->resetting == true);
7122 	poll_thread_times(1, 1);
7123 	CU_ASSERT(nvme_ctrlr->resetting == true);
7124 	poll_thread_times(0, 1);
7125 	CU_ASSERT(nvme_ctrlr->resetting == false);
7126 
7127 	/* Case 6: ctrlr is already enabled. */
7128 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7129 	CU_ASSERT(rc == -EALREADY);
7130 
7131 	set_thread(0);
7132 
7133 	/* Case 7: disable cancels delayed reconnect. */
7134 	nvme_ctrlr->opts.reconnect_delay_sec = 10;
7135 	ctrlr.fail_reset = true;
7136 
7137 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7138 	CU_ASSERT(rc == 0);
7139 
7140 	poll_threads();
7141 
7142 	CU_ASSERT(nvme_ctrlr->resetting == false);
7143 	CU_ASSERT(ctrlr.is_failed == false);
7144 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7145 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7146 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
7147 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
7148 
7149 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7150 	CU_ASSERT(rc == 0);
7151 
7152 	CU_ASSERT(nvme_ctrlr->resetting == true);
7153 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
7154 
7155 	poll_threads();
7156 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7157 	poll_threads();
7158 
7159 	CU_ASSERT(nvme_ctrlr->resetting == false);
7160 	CU_ASSERT(nvme_ctrlr->disabled == true);
7161 
7162 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7163 	CU_ASSERT(rc == 0);
7164 
7165 	CU_ASSERT(nvme_ctrlr->resetting == true);
7166 	CU_ASSERT(nvme_ctrlr->disabled == false);
7167 
7168 	poll_threads();
7169 
7170 	CU_ASSERT(nvme_ctrlr->resetting == false);
7171 
7172 	set_thread(1);
7173 
7174 	spdk_put_io_channel(ch2);
7175 
7176 	set_thread(0);
7177 
7178 	spdk_put_io_channel(ch1);
7179 
7180 	poll_threads();
7181 
7182 	rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL);
7183 	CU_ASSERT(rc == 0);
7184 
7185 	poll_threads();
7186 	spdk_delay_us(1000);
7187 	poll_threads();
7188 
7189 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7190 }
7191 
7192 static void
7193 ut_delete_done(void *ctx, int rc)
7194 {
7195 	int *delete_done_rc = ctx;
7196 	*delete_done_rc = rc;
7197 }
7198 
7199 static void
7200 test_delete_ctrlr_done(void)
7201 {
7202 	struct spdk_nvme_transport_id trid = {};
7203 	struct spdk_nvme_ctrlr ctrlr = {};
7204 	int delete_done_rc = 0xDEADBEEF;
7205 	int rc;
7206 
7207 	ut_init_trid(&trid);
7208 
7209 	nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7210 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
7211 
7212 	rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc);
7213 	CU_ASSERT(rc == 0);
7214 
7215 	for (int i = 0; i < 20; i++) {
7216 		poll_threads();
7217 		if (delete_done_rc == 0) {
7218 			break;
7219 		}
7220 		spdk_delay_us(1000);
7221 	}
7222 
7223 	CU_ASSERT(delete_done_rc == 0);
7224 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7225 }
7226 
7227 int
7228 main(int argc, char **argv)
7229 {
7230 	CU_pSuite	suite = NULL;
7231 	unsigned int	num_failures;
7232 
7233 	CU_initialize_registry();
7234 
7235 	suite = CU_add_suite("nvme", NULL, NULL);
7236 
7237 	CU_ADD_TEST(suite, test_create_ctrlr);
7238 	CU_ADD_TEST(suite, test_reset_ctrlr);
7239 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
7240 	CU_ADD_TEST(suite, test_failover_ctrlr);
7241 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
7242 	CU_ADD_TEST(suite, test_pending_reset);
7243 	CU_ADD_TEST(suite, test_attach_ctrlr);
7244 	CU_ADD_TEST(suite, test_aer_cb);
7245 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
7246 	CU_ADD_TEST(suite, test_add_remove_trid);
7247 	CU_ADD_TEST(suite, test_abort);
7248 	CU_ADD_TEST(suite, test_get_io_qpair);
7249 	CU_ADD_TEST(suite, test_bdev_unregister);
7250 	CU_ADD_TEST(suite, test_compare_ns);
7251 	CU_ADD_TEST(suite, test_init_ana_log_page);
7252 	CU_ADD_TEST(suite, test_get_memory_domains);
7253 	CU_ADD_TEST(suite, test_reconnect_qpair);
7254 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
7255 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
7256 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
7257 	CU_ADD_TEST(suite, test_admin_path);
7258 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
7259 	CU_ADD_TEST(suite, test_find_io_path);
7260 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
7261 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
7262 	CU_ADD_TEST(suite, test_retry_io_count);
7263 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
7264 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
7265 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
7266 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
7267 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
7268 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
7269 	CU_ADD_TEST(suite, test_fail_path);
7270 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
7271 	CU_ADD_TEST(suite, test_ana_transition);
7272 	CU_ADD_TEST(suite, test_set_preferred_path);
7273 	CU_ADD_TEST(suite, test_find_next_io_path);
7274 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
7275 	CU_ADD_TEST(suite, test_disable_auto_failback);
7276 	CU_ADD_TEST(suite, test_set_multipath_policy);
7277 	CU_ADD_TEST(suite, test_uuid_generation);
7278 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
7279 	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
7280 	CU_ADD_TEST(suite, test_ctrlr_op_rpc);
7281 	CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc);
7282 	CU_ADD_TEST(suite, test_disable_enable_ctrlr);
7283 	CU_ADD_TEST(suite, test_delete_ctrlr_done);
7284 
7285 	allocate_threads(3);
7286 	set_thread(0);
7287 	bdev_nvme_library_init();
7288 	init_accel();
7289 
7290 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
7291 
7292 	set_thread(0);
7293 	bdev_nvme_library_fini();
7294 	fini_accel();
7295 	free_threads();
7296 
7297 	CU_cleanup_registry();
7298 
7299 	return num_failures;
7300 }
7301