xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 #include "spdk/bdev_module.h"
12 
13 #include "common/lib/ut_multithread.c"
14 
15 #include "bdev/nvme/bdev_nvme.c"
16 
17 #include "unit/lib/json_mock.c"
18 
19 #include "bdev/nvme/bdev_mdns_client.c"
20 
21 static void *g_accel_p = (void *)0xdeadbeaf;
22 
23 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
24 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
25 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
26 	     spdk_nvme_remove_cb remove_cb), NULL);
27 
28 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
29 		enum spdk_nvme_transport_type trtype));
30 
31 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
32 	    NULL);
33 
34 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
35 
36 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
37 		struct spdk_nvme_transport_id *trid), 0);
38 
39 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
40 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
41 
42 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
43 
44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
46 
47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
48 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
49 
50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
51 
52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request,
53 		int error_code, const char *msg));
54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *,
55 	    (struct spdk_jsonrpc_request *request), NULL);
56 DEFINE_STUB_V(spdk_jsonrpc_end_result,
57 	      (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w));
58 
59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts,
60 		size_t opts_size));
61 
62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts,
63 		size_t opts_size), 0);
64 
65 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
66 
67 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat,
68 					enum spdk_bdev_reset_stat_mode mode));
69 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total,
70 				      struct spdk_bdev_io_stat *add));
71 
72 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr));
73 
74 int
75 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
76 				   struct spdk_memory_domain **domains, int array_size)
77 {
78 	int i, min_array_size;
79 
80 	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
81 		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
82 		for (i = 0; i < min_array_size; i++) {
83 			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
84 		}
85 	}
86 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
87 
88 	return 0;
89 }
90 
91 struct spdk_io_channel *
92 spdk_accel_get_io_channel(void)
93 {
94 	return spdk_get_io_channel(g_accel_p);
95 }
96 
97 void
98 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
99 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
100 {
101 	/* Avoid warning that opts is used uninitialised */
102 	memset(opts, 0, opts_size);
103 }
104 
105 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
106 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
107 
108 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
109 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
110 
111 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
112 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
113 
114 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
115 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
116 
117 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
118 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
119 
120 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
121 
122 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
123 
124 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
125 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
126 
127 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
128 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
129 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
130 
131 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
132 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
133 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
134 
135 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
136 		size_t *size), 0);
137 
138 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
139 
140 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
141 
142 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
143 
144 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
145 
146 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
147 
148 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
149 
150 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
151 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
152 
153 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
154 
155 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
156 		char *name, size_t *size), 0);
157 
158 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
159 	    (struct spdk_nvme_ns *ns), 0);
160 
161 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
162 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
163 
164 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
165 	    (struct spdk_nvme_ns *ns), 0);
166 
167 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
168 	    (struct spdk_nvme_ns *ns), 0);
169 
170 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
171 	    (struct spdk_nvme_ns *ns), 0);
172 
173 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
174 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
175 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
176 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
177 
178 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
179 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
180 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
181 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
182 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
183 
184 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
185 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
186 	     void *payload, uint32_t payload_size, uint64_t slba,
187 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
188 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
189 
190 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
191 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
192 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
193 
194 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
195 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
196 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
197 
198 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
199 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
200 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
201 
202 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
203 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
204 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
205 
206 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
207 
208 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
209 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
210 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
211 
212 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *,
213 	    (const struct spdk_nvme_status *status), NULL);
214 
215 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *,
216 	    (const struct spdk_nvme_status *status), NULL);
217 
218 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
219 
220 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
221 
222 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
223 
224 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
225 
226 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
227 
228 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
229 		struct iovec *iov,
230 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
231 
232 struct ut_nvme_req {
233 	uint16_t			opc;
234 	spdk_nvme_cmd_cb		cb_fn;
235 	void				*cb_arg;
236 	struct spdk_nvme_cpl		cpl;
237 	TAILQ_ENTRY(ut_nvme_req)	tailq;
238 };
239 
240 struct spdk_nvme_ns {
241 	struct spdk_nvme_ctrlr		*ctrlr;
242 	uint32_t			id;
243 	bool				is_active;
244 	struct spdk_uuid		*uuid;
245 	enum spdk_nvme_ana_state	ana_state;
246 	enum spdk_nvme_csi		csi;
247 };
248 
249 struct spdk_nvme_qpair {
250 	struct spdk_nvme_ctrlr		*ctrlr;
251 	uint8_t				failure_reason;
252 	bool				is_connected;
253 	bool				in_completion_context;
254 	bool				delete_after_completion_context;
255 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
256 	uint32_t			num_outstanding_reqs;
257 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
258 	struct spdk_nvme_poll_group	*poll_group;
259 	void				*poll_group_tailq_head;
260 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
261 };
262 
263 struct spdk_nvme_ctrlr {
264 	uint32_t			num_ns;
265 	struct spdk_nvme_ns		*ns;
266 	struct spdk_nvme_ns_data	*nsdata;
267 	struct spdk_nvme_qpair		adminq;
268 	struct spdk_nvme_ctrlr_data	cdata;
269 	bool				attached;
270 	bool				is_failed;
271 	bool				fail_reset;
272 	bool				is_removed;
273 	struct spdk_nvme_transport_id	trid;
274 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
275 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
276 	struct spdk_nvme_ctrlr_opts	opts;
277 };
278 
279 struct spdk_nvme_poll_group {
280 	void				*ctx;
281 	struct spdk_nvme_accel_fn_table	accel_fn_table;
282 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
283 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
284 };
285 
286 struct spdk_nvme_probe_ctx {
287 	struct spdk_nvme_transport_id	trid;
288 	void				*cb_ctx;
289 	spdk_nvme_attach_cb		attach_cb;
290 	struct spdk_nvme_ctrlr		*init_ctrlr;
291 };
292 
293 uint32_t
294 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
295 {
296 	uint32_t nsid;
297 
298 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
299 		if (ctrlr->ns[nsid - 1].is_active) {
300 			return nsid;
301 		}
302 	}
303 
304 	return 0;
305 }
306 
307 uint32_t
308 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
309 {
310 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
311 		if (ctrlr->ns[nsid - 1].is_active) {
312 			return nsid;
313 		}
314 	}
315 
316 	return 0;
317 }
318 
319 uint32_t
320 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair)
321 {
322 	return qpair->num_outstanding_reqs;
323 }
324 
325 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
326 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
327 			g_ut_attached_ctrlrs);
328 static int g_ut_attach_ctrlr_status;
329 static size_t g_ut_attach_bdev_count;
330 static int g_ut_register_bdev_status;
331 static struct spdk_bdev *g_ut_registered_bdev;
332 static uint16_t g_ut_cntlid;
333 static struct nvme_path_id g_any_path = {};
334 
335 static void
336 ut_init_trid(struct spdk_nvme_transport_id *trid)
337 {
338 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
339 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
340 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
341 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
342 }
343 
344 static void
345 ut_init_trid2(struct spdk_nvme_transport_id *trid)
346 {
347 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
348 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
349 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
350 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
351 }
352 
353 static void
354 ut_init_trid3(struct spdk_nvme_transport_id *trid)
355 {
356 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
357 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
358 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
359 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
360 }
361 
362 static int
363 cmp_int(int a, int b)
364 {
365 	return a - b;
366 }
367 
368 int
369 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
370 			       const struct spdk_nvme_transport_id *trid2)
371 {
372 	int cmp;
373 
374 	/* We assume trtype is TCP for now. */
375 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
376 
377 	cmp = cmp_int(trid1->trtype, trid2->trtype);
378 	if (cmp) {
379 		return cmp;
380 	}
381 
382 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
383 	if (cmp) {
384 		return cmp;
385 	}
386 
387 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
388 	if (cmp) {
389 		return cmp;
390 	}
391 
392 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
393 	if (cmp) {
394 		return cmp;
395 	}
396 
397 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
398 	if (cmp) {
399 		return cmp;
400 	}
401 
402 	return 0;
403 }
404 
405 static struct spdk_nvme_ctrlr *
406 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
407 		bool ana_reporting, bool multipath)
408 {
409 	struct spdk_nvme_ctrlr *ctrlr;
410 	uint32_t i;
411 
412 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
413 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
414 			/* There is a ctrlr whose trid matches. */
415 			return NULL;
416 		}
417 	}
418 
419 	ctrlr = calloc(1, sizeof(*ctrlr));
420 	if (ctrlr == NULL) {
421 		return NULL;
422 	}
423 
424 	ctrlr->attached = true;
425 	ctrlr->adminq.ctrlr = ctrlr;
426 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
427 	ctrlr->adminq.is_connected = true;
428 
429 	if (num_ns != 0) {
430 		ctrlr->num_ns = num_ns;
431 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
432 		if (ctrlr->ns == NULL) {
433 			free(ctrlr);
434 			return NULL;
435 		}
436 
437 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
438 		if (ctrlr->nsdata == NULL) {
439 			free(ctrlr->ns);
440 			free(ctrlr);
441 			return NULL;
442 		}
443 
444 		for (i = 0; i < num_ns; i++) {
445 			ctrlr->ns[i].id = i + 1;
446 			ctrlr->ns[i].ctrlr = ctrlr;
447 			ctrlr->ns[i].is_active = true;
448 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
449 			ctrlr->nsdata[i].nsze = 1024;
450 			ctrlr->nsdata[i].nmic.can_share = multipath;
451 		}
452 
453 		ctrlr->cdata.nn = num_ns;
454 		ctrlr->cdata.mnan = num_ns;
455 		ctrlr->cdata.nanagrpid = num_ns;
456 	}
457 
458 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
459 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
460 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
461 	ctrlr->trid = *trid;
462 	TAILQ_INIT(&ctrlr->active_io_qpairs);
463 
464 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
465 
466 	return ctrlr;
467 }
468 
469 static void
470 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
471 {
472 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
473 
474 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
475 	free(ctrlr->nsdata);
476 	free(ctrlr->ns);
477 	free(ctrlr);
478 }
479 
480 static int
481 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
482 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
483 {
484 	struct ut_nvme_req *req;
485 
486 	req = calloc(1, sizeof(*req));
487 	if (req == NULL) {
488 		return -ENOMEM;
489 	}
490 
491 	req->opc = opc;
492 	req->cb_fn = cb_fn;
493 	req->cb_arg = cb_arg;
494 
495 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
496 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
497 
498 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
499 	qpair->num_outstanding_reqs++;
500 
501 	return 0;
502 }
503 
504 static struct ut_nvme_req *
505 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
506 {
507 	struct ut_nvme_req *req;
508 
509 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
510 		if (req->cb_arg == cb_arg) {
511 			break;
512 		}
513 	}
514 
515 	return req;
516 }
517 
518 static struct spdk_bdev_io *
519 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
520 		 struct spdk_io_channel *ch)
521 {
522 	struct spdk_bdev_io *bdev_io;
523 
524 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
525 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
526 	bdev_io->type = type;
527 	bdev_io->bdev = &nbdev->disk;
528 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
529 
530 	return bdev_io;
531 }
532 
533 static void
534 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
535 {
536 	bdev_io->u.bdev.iovs = &bdev_io->iov;
537 	bdev_io->u.bdev.iovcnt = 1;
538 
539 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
540 	bdev_io->iov.iov_len = 4096;
541 }
542 
543 static void
544 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
545 {
546 	if (ctrlr->is_failed) {
547 		free(ctrlr);
548 		return;
549 	}
550 
551 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
552 	if (probe_ctx->cb_ctx) {
553 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
554 	}
555 
556 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
557 
558 	if (probe_ctx->attach_cb) {
559 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
560 	}
561 }
562 
563 int
564 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
565 {
566 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
567 
568 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
569 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
570 			continue;
571 		}
572 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
573 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
574 	}
575 
576 	free(probe_ctx);
577 
578 	return 0;
579 }
580 
581 struct spdk_nvme_probe_ctx *
582 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
583 			const struct spdk_nvme_ctrlr_opts *opts,
584 			spdk_nvme_attach_cb attach_cb)
585 {
586 	struct spdk_nvme_probe_ctx *probe_ctx;
587 
588 	if (trid == NULL) {
589 		return NULL;
590 	}
591 
592 	probe_ctx = calloc(1, sizeof(*probe_ctx));
593 	if (probe_ctx == NULL) {
594 		return NULL;
595 	}
596 
597 	probe_ctx->trid = *trid;
598 	probe_ctx->cb_ctx = (void *)opts;
599 	probe_ctx->attach_cb = attach_cb;
600 
601 	return probe_ctx;
602 }
603 
604 int
605 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
606 {
607 	if (ctrlr->attached) {
608 		ut_detach_ctrlr(ctrlr);
609 	}
610 
611 	return 0;
612 }
613 
614 int
615 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
616 {
617 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
618 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
619 
620 	return 0;
621 }
622 
623 int
624 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
625 {
626 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
627 }
628 
629 void
630 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
631 {
632 	memset(opts, 0, opts_size);
633 
634 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
635 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
636 }
637 
638 const struct spdk_nvme_ctrlr_data *
639 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
640 {
641 	return &ctrlr->cdata;
642 }
643 
644 uint32_t
645 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
646 {
647 	return ctrlr->num_ns;
648 }
649 
650 struct spdk_nvme_ns *
651 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
652 {
653 	if (nsid < 1 || nsid > ctrlr->num_ns) {
654 		return NULL;
655 	}
656 
657 	return &ctrlr->ns[nsid - 1];
658 }
659 
660 bool
661 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
662 {
663 	if (nsid < 1 || nsid > ctrlr->num_ns) {
664 		return false;
665 	}
666 
667 	return ctrlr->ns[nsid - 1].is_active;
668 }
669 
670 union spdk_nvme_csts_register
671 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
672 {
673 	union spdk_nvme_csts_register csts;
674 
675 	csts.raw = 0;
676 
677 	return csts;
678 }
679 
680 union spdk_nvme_vs_register
681 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
682 {
683 	union spdk_nvme_vs_register vs;
684 
685 	vs.raw = 0;
686 
687 	return vs;
688 }
689 
690 struct spdk_nvme_qpair *
691 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
692 			       const struct spdk_nvme_io_qpair_opts *user_opts,
693 			       size_t opts_size)
694 {
695 	struct spdk_nvme_qpair *qpair;
696 
697 	qpair = calloc(1, sizeof(*qpair));
698 	if (qpair == NULL) {
699 		return NULL;
700 	}
701 
702 	qpair->ctrlr = ctrlr;
703 	TAILQ_INIT(&qpair->outstanding_reqs);
704 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
705 
706 	return qpair;
707 }
708 
709 static void
710 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
711 {
712 	struct spdk_nvme_poll_group *group = qpair->poll_group;
713 
714 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
715 
716 	qpair->poll_group_tailq_head = &group->connected_qpairs;
717 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
718 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
719 }
720 
721 static void
722 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
723 {
724 	struct spdk_nvme_poll_group *group = qpair->poll_group;
725 
726 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
727 
728 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
729 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
730 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
731 }
732 
733 int
734 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
735 				 struct spdk_nvme_qpair *qpair)
736 {
737 	if (qpair->is_connected) {
738 		return -EISCONN;
739 	}
740 
741 	qpair->is_connected = true;
742 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
743 
744 	if (qpair->poll_group) {
745 		nvme_poll_group_connect_qpair(qpair);
746 	}
747 
748 	return 0;
749 }
750 
751 void
752 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
753 {
754 	if (!qpair->is_connected) {
755 		return;
756 	}
757 
758 	qpair->is_connected = false;
759 
760 	if (qpair->poll_group != NULL) {
761 		nvme_poll_group_disconnect_qpair(qpair);
762 	}
763 }
764 
765 int
766 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
767 {
768 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
769 
770 	if (qpair->in_completion_context) {
771 		qpair->delete_after_completion_context = true;
772 		return 0;
773 	}
774 
775 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
776 
777 	if (qpair->poll_group != NULL) {
778 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
779 	}
780 
781 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
782 
783 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
784 
785 	free(qpair);
786 
787 	return 0;
788 }
789 
790 int
791 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
792 {
793 	if (ctrlr->fail_reset) {
794 		ctrlr->is_failed = true;
795 		return -EIO;
796 	}
797 
798 	ctrlr->adminq.is_connected = true;
799 	return 0;
800 }
801 
802 void
803 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
804 {
805 }
806 
807 int
808 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
809 {
810 	if (ctrlr->is_removed) {
811 		return -ENXIO;
812 	}
813 
814 	ctrlr->adminq.is_connected = false;
815 	ctrlr->is_failed = false;
816 
817 	return 0;
818 }
819 
820 void
821 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
822 {
823 	ctrlr->is_failed = true;
824 }
825 
826 bool
827 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
828 {
829 	return ctrlr->is_failed;
830 }
831 
832 spdk_nvme_qp_failure_reason
833 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
834 {
835 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
836 }
837 
838 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
839 				 sizeof(uint32_t))
840 static void
841 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
842 {
843 	struct spdk_nvme_ana_page ana_hdr;
844 	char _ana_desc[UT_ANA_DESC_SIZE];
845 	struct spdk_nvme_ana_group_descriptor *ana_desc;
846 	struct spdk_nvme_ns *ns;
847 	uint32_t i;
848 
849 	memset(&ana_hdr, 0, sizeof(ana_hdr));
850 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
851 
852 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
853 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
854 
855 	buf += sizeof(ana_hdr);
856 	length -= sizeof(ana_hdr);
857 
858 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
859 
860 	for (i = 0; i < ctrlr->num_ns; i++) {
861 		ns = &ctrlr->ns[i];
862 
863 		if (!ns->is_active) {
864 			continue;
865 		}
866 
867 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
868 
869 		ana_desc->ana_group_id = ns->id;
870 		ana_desc->num_of_nsid = 1;
871 		ana_desc->ana_state = ns->ana_state;
872 		ana_desc->nsid[0] = ns->id;
873 
874 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
875 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
876 
877 		buf += UT_ANA_DESC_SIZE;
878 		length -= UT_ANA_DESC_SIZE;
879 	}
880 }
881 
882 int
883 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
884 				 uint8_t log_page, uint32_t nsid,
885 				 void *payload, uint32_t payload_size,
886 				 uint64_t offset,
887 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
888 {
889 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
890 		SPDK_CU_ASSERT_FATAL(offset == 0);
891 		ut_create_ana_log_page(ctrlr, payload, payload_size);
892 	}
893 
894 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
895 				      cb_fn, cb_arg);
896 }
897 
898 int
899 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
900 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
901 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
902 {
903 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
904 }
905 
906 int
907 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
908 			      void *cmd_cb_arg,
909 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
910 {
911 	struct ut_nvme_req *req = NULL, *abort_req;
912 
913 	if (qpair == NULL) {
914 		qpair = &ctrlr->adminq;
915 	}
916 
917 	abort_req = calloc(1, sizeof(*abort_req));
918 	if (abort_req == NULL) {
919 		return -ENOMEM;
920 	}
921 
922 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
923 		if (req->cb_arg == cmd_cb_arg) {
924 			break;
925 		}
926 	}
927 
928 	if (req == NULL) {
929 		free(abort_req);
930 		return -ENOENT;
931 	}
932 
933 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
934 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
935 
936 	abort_req->opc = SPDK_NVME_OPC_ABORT;
937 	abort_req->cb_fn = cb_fn;
938 	abort_req->cb_arg = cb_arg;
939 
940 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
941 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
942 	abort_req->cpl.cdw0 = 0;
943 
944 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
945 	ctrlr->adminq.num_outstanding_reqs++;
946 
947 	return 0;
948 }
949 
950 int32_t
951 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
952 {
953 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
954 }
955 
956 uint32_t
957 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
958 {
959 	return ns->id;
960 }
961 
962 struct spdk_nvme_ctrlr *
963 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
964 {
965 	return ns->ctrlr;
966 }
967 
968 static inline struct spdk_nvme_ns_data *
969 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
970 {
971 	return &ns->ctrlr->nsdata[ns->id - 1];
972 }
973 
974 const struct spdk_nvme_ns_data *
975 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
976 {
977 	return _nvme_ns_get_data(ns);
978 }
979 
980 uint64_t
981 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
982 {
983 	return _nvme_ns_get_data(ns)->nsze;
984 }
985 
986 const struct spdk_uuid *
987 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
988 {
989 	return ns->uuid;
990 }
991 
992 enum spdk_nvme_csi
993 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
994 	return ns->csi;
995 }
996 
997 int
998 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
999 			      void *metadata, uint64_t lba, uint32_t lba_count,
1000 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1001 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1002 {
1003 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1004 }
1005 
1006 int
1007 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1008 			       void *buffer, void *metadata, uint64_t lba,
1009 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1010 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1011 {
1012 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1013 }
1014 
1015 int
1016 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1017 			       uint64_t lba, uint32_t lba_count,
1018 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1019 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1020 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1021 			       uint16_t apptag_mask, uint16_t apptag)
1022 {
1023 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1024 }
1025 
1026 int
1027 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1028 				uint64_t lba, uint32_t lba_count,
1029 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1030 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1031 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
1032 				uint16_t apptag_mask, uint16_t apptag)
1033 {
1034 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1035 }
1036 
1037 static bool g_ut_readv_ext_called;
1038 int
1039 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1040 			   uint64_t lba, uint32_t lba_count,
1041 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1042 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1043 			   spdk_nvme_req_next_sge_cb next_sge_fn,
1044 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1045 {
1046 	g_ut_readv_ext_called = true;
1047 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1048 }
1049 
1050 static bool g_ut_writev_ext_called;
1051 int
1052 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1053 			    uint64_t lba, uint32_t lba_count,
1054 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1055 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1056 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1057 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1058 {
1059 	g_ut_writev_ext_called = true;
1060 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1061 }
1062 
1063 int
1064 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1065 				  uint64_t lba, uint32_t lba_count,
1066 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1067 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1068 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1069 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1070 {
1071 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1072 }
1073 
1074 int
1075 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1076 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1077 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1078 {
1079 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1080 }
1081 
1082 int
1083 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1084 			      uint64_t lba, uint32_t lba_count,
1085 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1086 			      uint32_t io_flags)
1087 {
1088 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1089 }
1090 
1091 int
1092 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1093 		      const struct spdk_nvme_scc_source_range *ranges,
1094 		      uint16_t num_ranges, uint64_t dest_lba,
1095 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1096 {
1097 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1098 }
1099 
1100 struct spdk_nvme_poll_group *
1101 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1102 {
1103 	struct spdk_nvme_poll_group *group;
1104 
1105 	group = calloc(1, sizeof(*group));
1106 	if (group == NULL) {
1107 		return NULL;
1108 	}
1109 
1110 	group->ctx = ctx;
1111 	if (table != NULL) {
1112 		group->accel_fn_table = *table;
1113 	}
1114 	TAILQ_INIT(&group->connected_qpairs);
1115 	TAILQ_INIT(&group->disconnected_qpairs);
1116 
1117 	return group;
1118 }
1119 
1120 int
1121 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1122 {
1123 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1124 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1125 		return -EBUSY;
1126 	}
1127 
1128 	free(group);
1129 
1130 	return 0;
1131 }
1132 
1133 spdk_nvme_qp_failure_reason
1134 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1135 {
1136 	return qpair->failure_reason;
1137 }
1138 
1139 bool
1140 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair)
1141 {
1142 	return qpair->is_connected;
1143 }
1144 
1145 int32_t
1146 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1147 				    uint32_t max_completions)
1148 {
1149 	struct ut_nvme_req *req, *tmp;
1150 	uint32_t num_completions = 0;
1151 
1152 	if (!qpair->is_connected) {
1153 		return -ENXIO;
1154 	}
1155 
1156 	qpair->in_completion_context = true;
1157 
1158 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1159 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1160 		qpair->num_outstanding_reqs--;
1161 
1162 		req->cb_fn(req->cb_arg, &req->cpl);
1163 
1164 		free(req);
1165 		num_completions++;
1166 	}
1167 
1168 	qpair->in_completion_context = false;
1169 	if (qpair->delete_after_completion_context) {
1170 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1171 	}
1172 
1173 	return num_completions;
1174 }
1175 
1176 int64_t
1177 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1178 		uint32_t completions_per_qpair,
1179 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1180 {
1181 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1182 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1183 
1184 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1185 
1186 	if (disconnected_qpair_cb == NULL) {
1187 		return -EINVAL;
1188 	}
1189 
1190 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1191 		disconnected_qpair_cb(qpair, group->ctx);
1192 	}
1193 
1194 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1195 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1196 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1197 			/* Bump the number of completions so this counts as "busy" */
1198 			num_completions++;
1199 			continue;
1200 		}
1201 
1202 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1203 				    completions_per_qpair);
1204 		if (local_completions < 0 && error_reason == 0) {
1205 			error_reason = local_completions;
1206 		} else {
1207 			num_completions += local_completions;
1208 			assert(num_completions >= 0);
1209 		}
1210 	}
1211 
1212 	return error_reason ? error_reason : num_completions;
1213 }
1214 
1215 int
1216 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1217 			 struct spdk_nvme_qpair *qpair)
1218 {
1219 	CU_ASSERT(!qpair->is_connected);
1220 
1221 	qpair->poll_group = group;
1222 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1223 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1224 
1225 	return 0;
1226 }
1227 
1228 int
1229 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1230 			    struct spdk_nvme_qpair *qpair)
1231 {
1232 	CU_ASSERT(!qpair->is_connected);
1233 
1234 	if (qpair->poll_group == NULL) {
1235 		return -ENOENT;
1236 	}
1237 
1238 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1239 
1240 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1241 
1242 	qpair->poll_group = NULL;
1243 	qpair->poll_group_tailq_head = NULL;
1244 
1245 	return 0;
1246 }
1247 
1248 int
1249 spdk_bdev_register(struct spdk_bdev *bdev)
1250 {
1251 	g_ut_registered_bdev = bdev;
1252 
1253 	return g_ut_register_bdev_status;
1254 }
1255 
1256 void
1257 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1258 {
1259 	int rc;
1260 
1261 	rc = bdev->fn_table->destruct(bdev->ctxt);
1262 
1263 	if (bdev == g_ut_registered_bdev) {
1264 		g_ut_registered_bdev = NULL;
1265 	}
1266 
1267 	if (rc <= 0 && cb_fn != NULL) {
1268 		cb_fn(cb_arg, rc);
1269 	}
1270 }
1271 
1272 int
1273 spdk_bdev_open_ext(const char *bdev_name, bool write,
1274 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1275 		   struct spdk_bdev_desc **desc)
1276 {
1277 	if (g_ut_registered_bdev == NULL ||
1278 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1279 		return -ENODEV;
1280 	}
1281 
1282 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1283 
1284 	return 0;
1285 }
1286 
1287 struct spdk_bdev *
1288 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1289 {
1290 	return (struct spdk_bdev *)desc;
1291 }
1292 
1293 int
1294 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1295 {
1296 	bdev->blockcnt = size;
1297 
1298 	return 0;
1299 }
1300 
1301 struct spdk_io_channel *
1302 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1303 {
1304 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1305 }
1306 
1307 struct spdk_thread *
1308 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
1309 {
1310 	return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io));
1311 }
1312 
1313 void
1314 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1315 {
1316 	bdev_io->internal.status = status;
1317 	bdev_io->internal.in_submit_request = false;
1318 }
1319 
1320 void
1321 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1322 {
1323 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1324 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1325 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1326 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1327 	} else {
1328 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1329 	}
1330 
1331 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1332 	bdev_io->internal.error.nvme.sct = sct;
1333 	bdev_io->internal.error.nvme.sc = sc;
1334 
1335 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1336 }
1337 
1338 void
1339 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1340 {
1341 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1342 
1343 	ut_bdev_io_set_buf(bdev_io);
1344 
1345 	cb(ch, bdev_io, true);
1346 }
1347 
1348 static void
1349 test_create_ctrlr(void)
1350 {
1351 	struct spdk_nvme_transport_id trid = {};
1352 	struct spdk_nvme_ctrlr ctrlr = {};
1353 	int rc;
1354 
1355 	ut_init_trid(&trid);
1356 
1357 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1358 	CU_ASSERT(rc == 0);
1359 
1360 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1361 
1362 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1363 	CU_ASSERT(rc == 0);
1364 
1365 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1366 
1367 	poll_threads();
1368 	spdk_delay_us(1000);
1369 	poll_threads();
1370 
1371 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1372 }
1373 
1374 static void
1375 ut_check_hotplug_on_reset(void *cb_arg, int rc)
1376 {
1377 	bool *detect_remove = cb_arg;
1378 
1379 	CU_ASSERT(rc != 0);
1380 	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1381 
1382 	*detect_remove = true;
1383 }
1384 
1385 static void
1386 test_reset_ctrlr(void)
1387 {
1388 	struct spdk_nvme_transport_id trid = {};
1389 	struct spdk_nvme_ctrlr ctrlr = {};
1390 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1391 	struct nvme_path_id *curr_trid;
1392 	struct spdk_io_channel *ch1, *ch2;
1393 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1394 	bool detect_remove;
1395 	int rc;
1396 
1397 	ut_init_trid(&trid);
1398 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1399 
1400 	set_thread(0);
1401 
1402 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1403 	CU_ASSERT(rc == 0);
1404 
1405 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1406 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1407 
1408 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1409 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1410 
1411 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1412 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1413 
1414 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1415 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1416 
1417 	set_thread(1);
1418 
1419 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1420 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1421 
1422 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1423 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1424 
1425 	/* Reset starts from thread 1. */
1426 	set_thread(1);
1427 
1428 	/* Case 1: ctrlr is already being destructed. */
1429 	nvme_ctrlr->destruct = true;
1430 
1431 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1432 	CU_ASSERT(rc == -ENXIO);
1433 
1434 	/* Case 2: reset is in progress. */
1435 	nvme_ctrlr->destruct = false;
1436 	nvme_ctrlr->resetting = true;
1437 
1438 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1439 	CU_ASSERT(rc == -EBUSY);
1440 
1441 	/* Case 3: reset completes successfully. */
1442 	nvme_ctrlr->resetting = false;
1443 	curr_trid->last_failed_tsc = spdk_get_ticks();
1444 	ctrlr.is_failed = true;
1445 
1446 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1447 	CU_ASSERT(rc == 0);
1448 	CU_ASSERT(nvme_ctrlr->resetting == true);
1449 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1450 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1451 
1452 	poll_thread_times(0, 3);
1453 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1454 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1455 
1456 	poll_thread_times(0, 1);
1457 	poll_thread_times(1, 1);
1458 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1459 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1460 	CU_ASSERT(ctrlr.is_failed == true);
1461 
1462 	poll_thread_times(1, 1);
1463 	poll_thread_times(0, 1);
1464 	CU_ASSERT(ctrlr.is_failed == false);
1465 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1466 
1467 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1468 	poll_thread_times(0, 2);
1469 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1470 
1471 	poll_thread_times(0, 1);
1472 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1473 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1474 
1475 	poll_thread_times(1, 1);
1476 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1477 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1478 	CU_ASSERT(nvme_ctrlr->resetting == true);
1479 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1480 
1481 	poll_thread_times(0, 2);
1482 	CU_ASSERT(nvme_ctrlr->resetting == true);
1483 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1484 	poll_thread_times(1, 1);
1485 	CU_ASSERT(nvme_ctrlr->resetting == true);
1486 	poll_thread_times(0, 1);
1487 	CU_ASSERT(nvme_ctrlr->resetting == false);
1488 
1489 	/* Case 4: ctrlr is already removed. */
1490 	ctrlr.is_removed = true;
1491 
1492 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1493 	CU_ASSERT(rc == 0);
1494 
1495 	detect_remove = false;
1496 	nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset;
1497 	nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove;
1498 
1499 	poll_threads();
1500 
1501 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL);
1502 	CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL);
1503 	CU_ASSERT(detect_remove == true);
1504 
1505 	ctrlr.is_removed = false;
1506 
1507 	spdk_put_io_channel(ch2);
1508 
1509 	set_thread(0);
1510 
1511 	spdk_put_io_channel(ch1);
1512 
1513 	poll_threads();
1514 
1515 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1516 	CU_ASSERT(rc == 0);
1517 
1518 	poll_threads();
1519 	spdk_delay_us(1000);
1520 	poll_threads();
1521 
1522 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1523 }
1524 
1525 static void
1526 test_race_between_reset_and_destruct_ctrlr(void)
1527 {
1528 	struct spdk_nvme_transport_id trid = {};
1529 	struct spdk_nvme_ctrlr ctrlr = {};
1530 	struct nvme_ctrlr *nvme_ctrlr;
1531 	struct spdk_io_channel *ch1, *ch2;
1532 	int rc;
1533 
1534 	ut_init_trid(&trid);
1535 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1536 
1537 	set_thread(0);
1538 
1539 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1540 	CU_ASSERT(rc == 0);
1541 
1542 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1543 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1544 
1545 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1546 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1547 
1548 	set_thread(1);
1549 
1550 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1551 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1552 
1553 	/* Reset starts from thread 1. */
1554 	set_thread(1);
1555 
1556 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1557 	CU_ASSERT(rc == 0);
1558 	CU_ASSERT(nvme_ctrlr->resetting == true);
1559 
1560 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1561 	set_thread(0);
1562 
1563 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1564 	CU_ASSERT(rc == 0);
1565 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1566 	CU_ASSERT(nvme_ctrlr->destruct == true);
1567 	CU_ASSERT(nvme_ctrlr->resetting == true);
1568 
1569 	poll_threads();
1570 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1571 	poll_threads();
1572 
1573 	/* Reset completed but ctrlr is not still destructed yet. */
1574 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1575 	CU_ASSERT(nvme_ctrlr->destruct == true);
1576 	CU_ASSERT(nvme_ctrlr->resetting == false);
1577 
1578 	/* New reset request is rejected. */
1579 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1580 	CU_ASSERT(rc == -ENXIO);
1581 
1582 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1583 	 * However there are two channels and destruct is not completed yet.
1584 	 */
1585 	poll_threads();
1586 
1587 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1588 
1589 	set_thread(0);
1590 
1591 	spdk_put_io_channel(ch1);
1592 
1593 	set_thread(1);
1594 
1595 	spdk_put_io_channel(ch2);
1596 
1597 	poll_threads();
1598 	spdk_delay_us(1000);
1599 	poll_threads();
1600 
1601 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1602 }
1603 
1604 static void
1605 test_failover_ctrlr(void)
1606 {
1607 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1608 	struct spdk_nvme_ctrlr ctrlr = {};
1609 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1610 	struct nvme_path_id *curr_trid, *next_trid;
1611 	struct spdk_io_channel *ch1, *ch2;
1612 	int rc;
1613 
1614 	ut_init_trid(&trid1);
1615 	ut_init_trid2(&trid2);
1616 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1617 
1618 	set_thread(0);
1619 
1620 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1621 	CU_ASSERT(rc == 0);
1622 
1623 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1624 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1625 
1626 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1627 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1628 
1629 	set_thread(1);
1630 
1631 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1632 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1633 
1634 	/* First, test one trid case. */
1635 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1636 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1637 
1638 	/* Failover starts from thread 1. */
1639 	set_thread(1);
1640 
1641 	/* Case 1: ctrlr is already being destructed. */
1642 	nvme_ctrlr->destruct = true;
1643 
1644 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1645 	CU_ASSERT(rc == -ENXIO);
1646 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1647 
1648 	/* Case 2: reset is in progress. */
1649 	nvme_ctrlr->destruct = false;
1650 	nvme_ctrlr->resetting = true;
1651 
1652 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1653 	CU_ASSERT(rc == -EINPROGRESS);
1654 
1655 	/* Case 3: reset completes successfully. */
1656 	nvme_ctrlr->resetting = false;
1657 
1658 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1659 	CU_ASSERT(rc == 0);
1660 
1661 	CU_ASSERT(nvme_ctrlr->resetting == true);
1662 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
1663 
1664 	poll_threads();
1665 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1666 	poll_threads();
1667 
1668 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1669 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1670 
1671 	CU_ASSERT(nvme_ctrlr->resetting == false);
1672 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
1673 
1674 	set_thread(0);
1675 
1676 	/* Second, test two trids case. */
1677 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1678 	CU_ASSERT(rc == 0);
1679 
1680 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1681 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1682 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1683 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1684 
1685 	/* Failover starts from thread 1. */
1686 	set_thread(1);
1687 
1688 	/* Case 4: reset is in progress. */
1689 	nvme_ctrlr->resetting = true;
1690 
1691 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1692 	CU_ASSERT(rc == -EINPROGRESS);
1693 
1694 	/* Case 5: failover completes successfully. */
1695 	nvme_ctrlr->resetting = false;
1696 
1697 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
1698 	CU_ASSERT(rc == 0);
1699 
1700 	CU_ASSERT(nvme_ctrlr->resetting == true);
1701 
1702 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1703 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1704 	CU_ASSERT(next_trid != curr_trid);
1705 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1706 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1707 
1708 	poll_threads();
1709 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1710 	poll_threads();
1711 
1712 	CU_ASSERT(nvme_ctrlr->resetting == false);
1713 
1714 	spdk_put_io_channel(ch2);
1715 
1716 	set_thread(0);
1717 
1718 	spdk_put_io_channel(ch1);
1719 
1720 	poll_threads();
1721 
1722 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1723 	CU_ASSERT(rc == 0);
1724 
1725 	poll_threads();
1726 	spdk_delay_us(1000);
1727 	poll_threads();
1728 
1729 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1730 }
1731 
1732 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1733  *
1734  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1735  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1736  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1737  * have been active, i.e., the head of the list until the failover completed.
1738  * However trid3 was inserted to the head of the list by mistake.
1739  *
1740  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1741  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1742  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1743  * may be executed repeatedly before failover is executed. Hence this bug is real.
1744  *
1745  * The following test verifies the fix.
1746  */
1747 static void
1748 test_race_between_failover_and_add_secondary_trid(void)
1749 {
1750 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1751 	struct spdk_nvme_ctrlr ctrlr = {};
1752 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1753 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1754 	struct spdk_io_channel *ch1, *ch2;
1755 	int rc;
1756 
1757 	ut_init_trid(&trid1);
1758 	ut_init_trid2(&trid2);
1759 	ut_init_trid3(&trid3);
1760 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1761 
1762 	set_thread(0);
1763 
1764 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1765 	CU_ASSERT(rc == 0);
1766 
1767 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1768 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1769 
1770 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1771 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1772 
1773 	set_thread(1);
1774 
1775 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1776 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1777 
1778 	set_thread(0);
1779 
1780 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1781 	CU_ASSERT(rc == 0);
1782 
1783 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1784 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1785 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1786 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1787 	path_id2 = TAILQ_NEXT(path_id1, link);
1788 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1789 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1790 
1791 	ctrlr.fail_reset = true;
1792 
1793 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1794 	CU_ASSERT(rc == 0);
1795 
1796 	poll_threads();
1797 
1798 	CU_ASSERT(path_id1->last_failed_tsc != 0);
1799 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1800 
1801 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
1802 	CU_ASSERT(rc == 0);
1803 
1804 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1805 	CU_ASSERT(rc == 0);
1806 
1807 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1808 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1809 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1810 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1811 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1812 	path_id3 = TAILQ_NEXT(path_id2, link);
1813 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1814 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1815 
1816 	poll_threads();
1817 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1818 	poll_threads();
1819 
1820 	spdk_put_io_channel(ch1);
1821 
1822 	set_thread(1);
1823 
1824 	spdk_put_io_channel(ch2);
1825 
1826 	poll_threads();
1827 
1828 	set_thread(0);
1829 
1830 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1831 	CU_ASSERT(rc == 0);
1832 
1833 	poll_threads();
1834 	spdk_delay_us(1000);
1835 	poll_threads();
1836 
1837 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1838 }
1839 
1840 static void
1841 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1842 {
1843 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1844 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1845 }
1846 
1847 static void
1848 test_pending_reset(void)
1849 {
1850 	struct spdk_nvme_transport_id trid = {};
1851 	struct spdk_nvme_ctrlr *ctrlr;
1852 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1853 	const int STRING_SIZE = 32;
1854 	const char *attached_names[STRING_SIZE];
1855 	struct nvme_bdev *bdev;
1856 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1857 	struct spdk_io_channel *ch1, *ch2;
1858 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1859 	struct nvme_io_path *io_path1, *io_path2;
1860 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1861 	int rc;
1862 
1863 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1864 	ut_init_trid(&trid);
1865 
1866 	set_thread(0);
1867 
1868 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1869 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1870 
1871 	g_ut_attach_ctrlr_status = 0;
1872 	g_ut_attach_bdev_count = 1;
1873 
1874 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1875 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1876 	CU_ASSERT(rc == 0);
1877 
1878 	spdk_delay_us(1000);
1879 	poll_threads();
1880 
1881 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1882 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1883 
1884 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1885 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1886 
1887 	ch1 = spdk_get_io_channel(bdev);
1888 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1889 
1890 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1891 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1892 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1893 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1894 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1895 
1896 	set_thread(1);
1897 
1898 	ch2 = spdk_get_io_channel(bdev);
1899 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1900 
1901 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1902 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1903 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1904 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1905 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1906 
1907 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1908 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1909 
1910 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1911 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1912 
1913 	/* The first reset request is submitted on thread 1, and the second reset request
1914 	 * is submitted on thread 0 while processing the first request.
1915 	 */
1916 	bdev_nvme_submit_request(ch2, first_bdev_io);
1917 	CU_ASSERT(nvme_ctrlr->resetting == true);
1918 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1919 
1920 	set_thread(0);
1921 
1922 	bdev_nvme_submit_request(ch1, second_bdev_io);
1923 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1924 
1925 	poll_threads();
1926 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1927 	poll_threads();
1928 
1929 	CU_ASSERT(nvme_ctrlr->resetting == false);
1930 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1931 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1932 
1933 	/* The first reset request is submitted on thread 1, and the second reset request
1934 	 * is submitted on thread 0 while processing the first request.
1935 	 *
1936 	 * The difference from the above scenario is that the controller is removed while
1937 	 * processing the first request. Hence both reset requests should fail.
1938 	 */
1939 	set_thread(1);
1940 
1941 	bdev_nvme_submit_request(ch2, first_bdev_io);
1942 	CU_ASSERT(nvme_ctrlr->resetting == true);
1943 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1944 
1945 	set_thread(0);
1946 
1947 	bdev_nvme_submit_request(ch1, second_bdev_io);
1948 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1949 
1950 	ctrlr->fail_reset = true;
1951 
1952 	poll_threads();
1953 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1954 	poll_threads();
1955 
1956 	CU_ASSERT(nvme_ctrlr->resetting == false);
1957 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1958 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1959 
1960 	spdk_put_io_channel(ch1);
1961 
1962 	set_thread(1);
1963 
1964 	spdk_put_io_channel(ch2);
1965 
1966 	poll_threads();
1967 
1968 	set_thread(0);
1969 
1970 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1971 	CU_ASSERT(rc == 0);
1972 
1973 	poll_threads();
1974 	spdk_delay_us(1000);
1975 	poll_threads();
1976 
1977 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1978 
1979 	free(first_bdev_io);
1980 	free(second_bdev_io);
1981 }
1982 
1983 static void
1984 test_attach_ctrlr(void)
1985 {
1986 	struct spdk_nvme_transport_id trid = {};
1987 	struct spdk_nvme_ctrlr *ctrlr;
1988 	struct nvme_ctrlr *nvme_ctrlr;
1989 	const int STRING_SIZE = 32;
1990 	const char *attached_names[STRING_SIZE];
1991 	struct nvme_bdev *nbdev;
1992 	int rc;
1993 
1994 	set_thread(0);
1995 
1996 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1997 	ut_init_trid(&trid);
1998 
1999 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
2000 	 * by probe polling.
2001 	 */
2002 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2003 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2004 
2005 	ctrlr->is_failed = true;
2006 	g_ut_attach_ctrlr_status = -EIO;
2007 	g_ut_attach_bdev_count = 0;
2008 
2009 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2010 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2011 	CU_ASSERT(rc == 0);
2012 
2013 	spdk_delay_us(1000);
2014 	poll_threads();
2015 
2016 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2017 
2018 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
2019 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
2020 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2021 
2022 	g_ut_attach_ctrlr_status = 0;
2023 
2024 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2025 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2026 	CU_ASSERT(rc == 0);
2027 
2028 	spdk_delay_us(1000);
2029 	poll_threads();
2030 
2031 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2032 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2033 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2034 
2035 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2036 	CU_ASSERT(rc == 0);
2037 
2038 	poll_threads();
2039 	spdk_delay_us(1000);
2040 	poll_threads();
2041 
2042 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2043 
2044 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
2045 	 * one nvme_bdev is created.
2046 	 */
2047 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2048 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2049 
2050 	g_ut_attach_bdev_count = 1;
2051 
2052 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2053 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2054 	CU_ASSERT(rc == 0);
2055 
2056 	spdk_delay_us(1000);
2057 	poll_threads();
2058 
2059 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2060 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2061 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2062 
2063 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2064 	attached_names[0] = NULL;
2065 
2066 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2067 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2068 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2069 
2070 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2071 	CU_ASSERT(rc == 0);
2072 
2073 	poll_threads();
2074 	spdk_delay_us(1000);
2075 	poll_threads();
2076 
2077 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2078 
2079 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2080 	 * created because creating one nvme_bdev failed.
2081 	 */
2082 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2083 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2084 
2085 	g_ut_register_bdev_status = -EINVAL;
2086 	g_ut_attach_bdev_count = 0;
2087 
2088 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2089 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2090 	CU_ASSERT(rc == 0);
2091 
2092 	spdk_delay_us(1000);
2093 	poll_threads();
2094 
2095 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2096 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2097 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2098 
2099 	CU_ASSERT(attached_names[0] == NULL);
2100 
2101 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2102 	CU_ASSERT(rc == 0);
2103 
2104 	poll_threads();
2105 	spdk_delay_us(1000);
2106 	poll_threads();
2107 
2108 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2109 
2110 	g_ut_register_bdev_status = 0;
2111 }
2112 
2113 static void
2114 test_aer_cb(void)
2115 {
2116 	struct spdk_nvme_transport_id trid = {};
2117 	struct spdk_nvme_ctrlr *ctrlr;
2118 	struct nvme_ctrlr *nvme_ctrlr;
2119 	struct nvme_bdev *bdev;
2120 	const int STRING_SIZE = 32;
2121 	const char *attached_names[STRING_SIZE];
2122 	union spdk_nvme_async_event_completion event = {};
2123 	struct spdk_nvme_cpl cpl = {};
2124 	int rc;
2125 
2126 	set_thread(0);
2127 
2128 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2129 	ut_init_trid(&trid);
2130 
2131 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2132 	 * namespaces are populated.
2133 	 */
2134 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2135 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2136 
2137 	ctrlr->ns[0].is_active = false;
2138 
2139 	g_ut_attach_ctrlr_status = 0;
2140 	g_ut_attach_bdev_count = 3;
2141 
2142 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2143 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2144 	CU_ASSERT(rc == 0);
2145 
2146 	spdk_delay_us(1000);
2147 	poll_threads();
2148 
2149 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2150 	poll_threads();
2151 
2152 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2153 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2154 
2155 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2156 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2157 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2158 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2159 
2160 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2161 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2162 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2163 
2164 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2165 	 * change the size of the 4th namespace.
2166 	 */
2167 	ctrlr->ns[0].is_active = true;
2168 	ctrlr->ns[2].is_active = false;
2169 	ctrlr->nsdata[3].nsze = 2048;
2170 
2171 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2172 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2173 	cpl.cdw0 = event.raw;
2174 
2175 	aer_cb(nvme_ctrlr, &cpl);
2176 
2177 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2178 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2179 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2180 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2181 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2182 
2183 	/* Change ANA state of active namespaces. */
2184 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2185 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2186 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2187 
2188 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2189 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2190 	cpl.cdw0 = event.raw;
2191 
2192 	aer_cb(nvme_ctrlr, &cpl);
2193 
2194 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2195 	poll_threads();
2196 
2197 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2198 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2199 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2200 
2201 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2202 	CU_ASSERT(rc == 0);
2203 
2204 	poll_threads();
2205 	spdk_delay_us(1000);
2206 	poll_threads();
2207 
2208 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2209 }
2210 
2211 static void
2212 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2213 			enum spdk_bdev_io_type io_type)
2214 {
2215 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2216 	struct nvme_io_path *io_path;
2217 	struct spdk_nvme_qpair *qpair;
2218 
2219 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2220 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2221 	qpair = io_path->qpair->qpair;
2222 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2223 
2224 	bdev_io->type = io_type;
2225 	bdev_io->internal.in_submit_request = true;
2226 
2227 	bdev_nvme_submit_request(ch, bdev_io);
2228 
2229 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2230 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2231 
2232 	poll_threads();
2233 
2234 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2235 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2236 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2237 }
2238 
2239 static void
2240 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2241 		   enum spdk_bdev_io_type io_type)
2242 {
2243 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2244 	struct nvme_io_path *io_path;
2245 	struct spdk_nvme_qpair *qpair;
2246 
2247 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2248 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2249 	qpair = io_path->qpair->qpair;
2250 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2251 
2252 	bdev_io->type = io_type;
2253 	bdev_io->internal.in_submit_request = true;
2254 
2255 	bdev_nvme_submit_request(ch, bdev_io);
2256 
2257 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2258 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2259 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2260 }
2261 
2262 static void
2263 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2264 {
2265 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2266 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2267 	struct ut_nvme_req *req;
2268 	struct nvme_io_path *io_path;
2269 	struct spdk_nvme_qpair *qpair;
2270 
2271 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2272 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2273 	qpair = io_path->qpair->qpair;
2274 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2275 
2276 	/* Only compare and write now. */
2277 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2278 	bdev_io->internal.in_submit_request = true;
2279 
2280 	bdev_nvme_submit_request(ch, bdev_io);
2281 
2282 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2283 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2284 	CU_ASSERT(bio->first_fused_submitted == true);
2285 
2286 	/* First outstanding request is compare operation. */
2287 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2288 	SPDK_CU_ASSERT_FATAL(req != NULL);
2289 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2290 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2291 
2292 	poll_threads();
2293 
2294 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2295 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2296 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2297 }
2298 
2299 static void
2300 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2301 			 struct spdk_nvme_ctrlr *ctrlr)
2302 {
2303 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2304 	bdev_io->internal.in_submit_request = true;
2305 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2306 
2307 	bdev_nvme_submit_request(ch, bdev_io);
2308 
2309 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2310 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2311 
2312 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2313 	poll_thread_times(1, 1);
2314 
2315 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2316 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2317 
2318 	poll_thread_times(0, 1);
2319 
2320 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2321 }
2322 
2323 static void
2324 test_submit_nvme_cmd(void)
2325 {
2326 	struct spdk_nvme_transport_id trid = {};
2327 	struct spdk_nvme_ctrlr *ctrlr;
2328 	struct nvme_ctrlr *nvme_ctrlr;
2329 	const int STRING_SIZE = 32;
2330 	const char *attached_names[STRING_SIZE];
2331 	struct nvme_bdev *bdev;
2332 	struct spdk_bdev_io *bdev_io;
2333 	struct spdk_io_channel *ch;
2334 	int rc;
2335 
2336 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2337 	ut_init_trid(&trid);
2338 
2339 	set_thread(1);
2340 
2341 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2342 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2343 
2344 	g_ut_attach_ctrlr_status = 0;
2345 	g_ut_attach_bdev_count = 1;
2346 
2347 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2348 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2349 	CU_ASSERT(rc == 0);
2350 
2351 	spdk_delay_us(1000);
2352 	poll_threads();
2353 
2354 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2355 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2356 
2357 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2358 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2359 
2360 	set_thread(0);
2361 
2362 	ch = spdk_get_io_channel(bdev);
2363 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2364 
2365 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2366 
2367 	bdev_io->u.bdev.iovs = NULL;
2368 
2369 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2370 
2371 	ut_bdev_io_set_buf(bdev_io);
2372 
2373 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2374 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2375 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2376 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2377 
2378 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2379 
2380 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2381 
2382 	/* Verify that ext NVME API is called when data is described by memory domain  */
2383 	g_ut_readv_ext_called = false;
2384 	bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef;
2385 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2386 	CU_ASSERT(g_ut_readv_ext_called == true);
2387 	g_ut_readv_ext_called = false;
2388 	bdev_io->u.bdev.memory_domain = NULL;
2389 
2390 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2391 
2392 	free(bdev_io);
2393 
2394 	spdk_put_io_channel(ch);
2395 
2396 	poll_threads();
2397 
2398 	set_thread(1);
2399 
2400 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2401 	CU_ASSERT(rc == 0);
2402 
2403 	poll_threads();
2404 	spdk_delay_us(1000);
2405 	poll_threads();
2406 
2407 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2408 }
2409 
2410 static void
2411 test_add_remove_trid(void)
2412 {
2413 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2414 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2415 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2416 	const int STRING_SIZE = 32;
2417 	const char *attached_names[STRING_SIZE];
2418 	struct nvme_path_id *ctrid;
2419 	int rc;
2420 
2421 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2422 	ut_init_trid(&path1.trid);
2423 	ut_init_trid2(&path2.trid);
2424 	ut_init_trid3(&path3.trid);
2425 
2426 	set_thread(0);
2427 
2428 	g_ut_attach_ctrlr_status = 0;
2429 	g_ut_attach_bdev_count = 0;
2430 
2431 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2432 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2433 
2434 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2435 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2436 	CU_ASSERT(rc == 0);
2437 
2438 	spdk_delay_us(1000);
2439 	poll_threads();
2440 
2441 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2442 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2443 
2444 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2445 
2446 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2447 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2448 
2449 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2450 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2451 	CU_ASSERT(rc == 0);
2452 
2453 	spdk_delay_us(1000);
2454 	poll_threads();
2455 
2456 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2457 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2458 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2459 			break;
2460 		}
2461 	}
2462 	CU_ASSERT(ctrid != NULL);
2463 
2464 	/* trid3 is not in the registered list. */
2465 	rc = bdev_nvme_delete("nvme0", &path3);
2466 	CU_ASSERT(rc == -ENXIO);
2467 
2468 	/* trid2 is not used, and simply removed. */
2469 	rc = bdev_nvme_delete("nvme0", &path2);
2470 	CU_ASSERT(rc == 0);
2471 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2472 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2473 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2474 	}
2475 
2476 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2477 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2478 
2479 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2480 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2481 	CU_ASSERT(rc == 0);
2482 
2483 	spdk_delay_us(1000);
2484 	poll_threads();
2485 
2486 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2487 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2488 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2489 			break;
2490 		}
2491 	}
2492 	CU_ASSERT(ctrid != NULL);
2493 
2494 	/* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully.
2495 	 * If we add path2 again, path2 should be inserted between path1 and path3.
2496 	 * Then, we remove path2. It is not used, and simply removed.
2497 	 */
2498 	ctrid->last_failed_tsc = spdk_get_ticks() + 1;
2499 
2500 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2501 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2502 
2503 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2504 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2505 	CU_ASSERT(rc == 0);
2506 
2507 	spdk_delay_us(1000);
2508 	poll_threads();
2509 
2510 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2511 
2512 	ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link);
2513 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2514 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0);
2515 
2516 	ctrid = TAILQ_NEXT(ctrid, link);
2517 	SPDK_CU_ASSERT_FATAL(ctrid != NULL);
2518 	CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0);
2519 
2520 	rc = bdev_nvme_delete("nvme0", &path2);
2521 	CU_ASSERT(rc == 0);
2522 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2523 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2524 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2525 	}
2526 
2527 	/* path1 is currently used and path3 is an alternative path.
2528 	 * If we remove path1, path is changed to path3.
2529 	 */
2530 	rc = bdev_nvme_delete("nvme0", &path1);
2531 	CU_ASSERT(rc == 0);
2532 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2533 	CU_ASSERT(nvme_ctrlr->resetting == true);
2534 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2535 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2536 	}
2537 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2538 
2539 	poll_threads();
2540 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2541 	poll_threads();
2542 
2543 	CU_ASSERT(nvme_ctrlr->resetting == false);
2544 
2545 	/* path3 is the current and only path. If we remove path3, the corresponding
2546 	 * nvme_ctrlr is removed.
2547 	 */
2548 	rc = bdev_nvme_delete("nvme0", &path3);
2549 	CU_ASSERT(rc == 0);
2550 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2551 
2552 	poll_threads();
2553 	spdk_delay_us(1000);
2554 	poll_threads();
2555 
2556 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2557 
2558 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2559 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2560 
2561 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2562 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2563 	CU_ASSERT(rc == 0);
2564 
2565 	spdk_delay_us(1000);
2566 	poll_threads();
2567 
2568 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2569 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2570 
2571 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2572 
2573 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2574 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2575 
2576 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2577 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2578 	CU_ASSERT(rc == 0);
2579 
2580 	spdk_delay_us(1000);
2581 	poll_threads();
2582 
2583 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2584 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2585 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2586 			break;
2587 		}
2588 	}
2589 	CU_ASSERT(ctrid != NULL);
2590 
2591 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2592 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2593 	CU_ASSERT(rc == 0);
2594 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2595 
2596 	poll_threads();
2597 	spdk_delay_us(1000);
2598 	poll_threads();
2599 
2600 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2601 }
2602 
2603 static void
2604 test_abort(void)
2605 {
2606 	struct spdk_nvme_transport_id trid = {};
2607 	struct nvme_ctrlr_opts opts = {};
2608 	struct spdk_nvme_ctrlr *ctrlr;
2609 	struct nvme_ctrlr *nvme_ctrlr;
2610 	const int STRING_SIZE = 32;
2611 	const char *attached_names[STRING_SIZE];
2612 	struct nvme_bdev *bdev;
2613 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2614 	struct spdk_io_channel *ch1, *ch2;
2615 	struct nvme_bdev_channel *nbdev_ch1;
2616 	struct nvme_io_path *io_path1;
2617 	struct nvme_qpair *nvme_qpair1;
2618 	int rc;
2619 
2620 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2621 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2622 	 * are submitted on thread 1. Both should succeed.
2623 	 */
2624 
2625 	ut_init_trid(&trid);
2626 
2627 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2628 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2629 
2630 	g_ut_attach_ctrlr_status = 0;
2631 	g_ut_attach_bdev_count = 1;
2632 
2633 	set_thread(1);
2634 
2635 	opts.ctrlr_loss_timeout_sec = -1;
2636 	opts.reconnect_delay_sec = 1;
2637 
2638 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2639 			      attach_ctrlr_done, NULL, NULL, &opts, false);
2640 	CU_ASSERT(rc == 0);
2641 
2642 	spdk_delay_us(1000);
2643 	poll_threads();
2644 
2645 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2646 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2647 
2648 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2649 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2650 
2651 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2652 	ut_bdev_io_set_buf(write_io);
2653 
2654 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2655 	ut_bdev_io_set_buf(fuse_io);
2656 
2657 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2658 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2659 
2660 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2661 
2662 	set_thread(0);
2663 
2664 	ch1 = spdk_get_io_channel(bdev);
2665 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2666 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2667 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2668 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2669 	nvme_qpair1 = io_path1->qpair;
2670 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2671 
2672 	set_thread(1);
2673 
2674 	ch2 = spdk_get_io_channel(bdev);
2675 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2676 
2677 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2678 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2679 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2680 
2681 	/* Aborting the already completed request should fail. */
2682 	write_io->internal.in_submit_request = true;
2683 	bdev_nvme_submit_request(ch1, write_io);
2684 	poll_threads();
2685 
2686 	CU_ASSERT(write_io->internal.in_submit_request == false);
2687 
2688 	abort_io->u.abort.bio_to_abort = write_io;
2689 	abort_io->internal.in_submit_request = true;
2690 
2691 	bdev_nvme_submit_request(ch1, abort_io);
2692 
2693 	poll_threads();
2694 
2695 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2696 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2697 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2698 
2699 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2700 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2701 
2702 	admin_io->internal.in_submit_request = true;
2703 	bdev_nvme_submit_request(ch1, admin_io);
2704 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2705 	poll_threads();
2706 
2707 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2708 
2709 	abort_io->u.abort.bio_to_abort = admin_io;
2710 	abort_io->internal.in_submit_request = true;
2711 
2712 	bdev_nvme_submit_request(ch2, abort_io);
2713 
2714 	poll_threads();
2715 
2716 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2717 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2718 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2719 
2720 	/* Aborting the write request should succeed. */
2721 	write_io->internal.in_submit_request = true;
2722 	bdev_nvme_submit_request(ch1, write_io);
2723 
2724 	CU_ASSERT(write_io->internal.in_submit_request == true);
2725 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2726 
2727 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2728 	abort_io->u.abort.bio_to_abort = write_io;
2729 	abort_io->internal.in_submit_request = true;
2730 
2731 	bdev_nvme_submit_request(ch1, abort_io);
2732 
2733 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2734 	poll_threads();
2735 
2736 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2737 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2738 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2739 	CU_ASSERT(write_io->internal.in_submit_request == false);
2740 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2741 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2742 
2743 	/* Aborting the fuse request should succeed. */
2744 	fuse_io->internal.in_submit_request = true;
2745 	bdev_nvme_submit_request(ch1, fuse_io);
2746 
2747 	CU_ASSERT(fuse_io->internal.in_submit_request == true);
2748 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2749 
2750 	abort_io->u.abort.bio_to_abort = fuse_io;
2751 	abort_io->internal.in_submit_request = true;
2752 
2753 	bdev_nvme_submit_request(ch1, abort_io);
2754 
2755 	spdk_delay_us(10000);
2756 	poll_threads();
2757 
2758 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2759 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2760 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2761 	CU_ASSERT(fuse_io->internal.in_submit_request == false);
2762 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2763 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2764 
2765 	/* Aborting the admin request should succeed. */
2766 	admin_io->internal.in_submit_request = true;
2767 	bdev_nvme_submit_request(ch1, admin_io);
2768 
2769 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2770 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2771 
2772 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2773 	abort_io->u.abort.bio_to_abort = admin_io;
2774 	abort_io->internal.in_submit_request = true;
2775 
2776 	bdev_nvme_submit_request(ch2, abort_io);
2777 
2778 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2779 	poll_threads();
2780 
2781 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2782 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2783 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2784 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2785 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2786 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2787 
2788 	set_thread(0);
2789 
2790 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2791 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2792 	 * while resetting the nvme_ctrlr.
2793 	 */
2794 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2795 
2796 	poll_thread_times(0, 3);
2797 
2798 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2799 	CU_ASSERT(nvme_ctrlr->resetting == true);
2800 
2801 	write_io->internal.in_submit_request = true;
2802 
2803 	bdev_nvme_submit_request(ch1, write_io);
2804 
2805 	CU_ASSERT(write_io->internal.in_submit_request == true);
2806 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2807 
2808 	/* Aborting the queued write request should succeed immediately. */
2809 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2810 	abort_io->u.abort.bio_to_abort = write_io;
2811 	abort_io->internal.in_submit_request = true;
2812 
2813 	bdev_nvme_submit_request(ch1, abort_io);
2814 
2815 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2816 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2817 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2818 	CU_ASSERT(write_io->internal.in_submit_request == false);
2819 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2820 
2821 	poll_threads();
2822 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2823 	poll_threads();
2824 
2825 	spdk_put_io_channel(ch1);
2826 
2827 	set_thread(1);
2828 
2829 	spdk_put_io_channel(ch2);
2830 
2831 	poll_threads();
2832 
2833 	free(write_io);
2834 	free(fuse_io);
2835 	free(admin_io);
2836 	free(abort_io);
2837 
2838 	set_thread(1);
2839 
2840 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2841 	CU_ASSERT(rc == 0);
2842 
2843 	poll_threads();
2844 	spdk_delay_us(1000);
2845 	poll_threads();
2846 
2847 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2848 }
2849 
2850 static void
2851 test_get_io_qpair(void)
2852 {
2853 	struct spdk_nvme_transport_id trid = {};
2854 	struct spdk_nvme_ctrlr ctrlr = {};
2855 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2856 	struct spdk_io_channel *ch;
2857 	struct nvme_ctrlr_channel *ctrlr_ch;
2858 	struct spdk_nvme_qpair *qpair;
2859 	int rc;
2860 
2861 	ut_init_trid(&trid);
2862 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2863 
2864 	set_thread(0);
2865 
2866 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2867 	CU_ASSERT(rc == 0);
2868 
2869 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2870 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2871 
2872 	ch = spdk_get_io_channel(nvme_ctrlr);
2873 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2874 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2875 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2876 
2877 	qpair = bdev_nvme_get_io_qpair(ch);
2878 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2879 
2880 	spdk_put_io_channel(ch);
2881 
2882 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2883 	CU_ASSERT(rc == 0);
2884 
2885 	poll_threads();
2886 	spdk_delay_us(1000);
2887 	poll_threads();
2888 
2889 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2890 }
2891 
2892 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2893  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2894  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2895  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2896  */
2897 static void
2898 test_bdev_unregister(void)
2899 {
2900 	struct spdk_nvme_transport_id trid = {};
2901 	struct spdk_nvme_ctrlr *ctrlr;
2902 	struct nvme_ctrlr *nvme_ctrlr;
2903 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2904 	const int STRING_SIZE = 32;
2905 	const char *attached_names[STRING_SIZE];
2906 	struct nvme_bdev *bdev1, *bdev2;
2907 	int rc;
2908 
2909 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2910 	ut_init_trid(&trid);
2911 
2912 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2913 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2914 
2915 	g_ut_attach_ctrlr_status = 0;
2916 	g_ut_attach_bdev_count = 2;
2917 
2918 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2919 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2920 	CU_ASSERT(rc == 0);
2921 
2922 	spdk_delay_us(1000);
2923 	poll_threads();
2924 
2925 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2926 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2927 
2928 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2929 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2930 
2931 	bdev1 = nvme_ns1->bdev;
2932 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2933 
2934 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2935 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2936 
2937 	bdev2 = nvme_ns2->bdev;
2938 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2939 
2940 	bdev_nvme_destruct(&bdev1->disk);
2941 	bdev_nvme_destruct(&bdev2->disk);
2942 
2943 	poll_threads();
2944 
2945 	CU_ASSERT(nvme_ns1->bdev == NULL);
2946 	CU_ASSERT(nvme_ns2->bdev == NULL);
2947 
2948 	nvme_ctrlr->destruct = true;
2949 	_nvme_ctrlr_destruct(nvme_ctrlr);
2950 
2951 	poll_threads();
2952 	spdk_delay_us(1000);
2953 	poll_threads();
2954 
2955 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2956 }
2957 
2958 static void
2959 test_compare_ns(void)
2960 {
2961 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2962 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2963 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2964 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
2965 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
2966 
2967 	/* No IDs are defined. */
2968 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2969 
2970 	/* Only EUI64 are defined and not matched. */
2971 	nsdata1.eui64 = 0xABCDEF0123456789;
2972 	nsdata2.eui64 = 0xBBCDEF0123456789;
2973 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2974 
2975 	/* Only EUI64 are defined and matched. */
2976 	nsdata2.eui64 = 0xABCDEF0123456789;
2977 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2978 
2979 	/* Only NGUID are defined and not matched. */
2980 	nsdata1.eui64 = 0x0;
2981 	nsdata2.eui64 = 0x0;
2982 	nsdata1.nguid[0] = 0x12;
2983 	nsdata2.nguid[0] = 0x10;
2984 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2985 
2986 	/* Only NGUID are defined and matched. */
2987 	nsdata2.nguid[0] = 0x12;
2988 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2989 
2990 	/* Only UUID are defined and not matched. */
2991 	nsdata1.nguid[0] = 0x0;
2992 	nsdata2.nguid[0] = 0x0;
2993 	ns1.uuid = &uuid1;
2994 	ns2.uuid = &uuid2;
2995 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2996 
2997 	/* Only one UUID is defined. */
2998 	ns1.uuid = NULL;
2999 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3000 
3001 	/* Only UUID are defined and matched. */
3002 	ns1.uuid = &uuid2;
3003 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3004 
3005 	/* All EUI64, NGUID, and UUID are defined and matched. */
3006 	nsdata1.eui64 = 0x123456789ABCDEF;
3007 	nsdata2.eui64 = 0x123456789ABCDEF;
3008 	nsdata1.nguid[15] = 0x34;
3009 	nsdata2.nguid[15] = 0x34;
3010 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
3011 
3012 	/* CSI are not matched. */
3013 	ns1.csi = SPDK_NVME_CSI_ZNS;
3014 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
3015 }
3016 
3017 static void
3018 test_init_ana_log_page(void)
3019 {
3020 	struct spdk_nvme_transport_id trid = {};
3021 	struct spdk_nvme_ctrlr *ctrlr;
3022 	struct nvme_ctrlr *nvme_ctrlr;
3023 	const int STRING_SIZE = 32;
3024 	const char *attached_names[STRING_SIZE];
3025 	int rc;
3026 
3027 	set_thread(0);
3028 
3029 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3030 	ut_init_trid(&trid);
3031 
3032 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
3033 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3034 
3035 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3036 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3037 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3038 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3039 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3040 
3041 	g_ut_attach_ctrlr_status = 0;
3042 	g_ut_attach_bdev_count = 5;
3043 
3044 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3045 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3046 	CU_ASSERT(rc == 0);
3047 
3048 	spdk_delay_us(1000);
3049 	poll_threads();
3050 
3051 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3052 	poll_threads();
3053 
3054 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3055 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3056 
3057 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
3058 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
3059 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
3060 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
3061 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
3062 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3063 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3064 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3065 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
3066 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
3067 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
3068 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
3069 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
3070 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
3071 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
3072 
3073 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3074 	CU_ASSERT(rc == 0);
3075 
3076 	poll_threads();
3077 	spdk_delay_us(1000);
3078 	poll_threads();
3079 
3080 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3081 }
3082 
3083 static void
3084 init_accel(void)
3085 {
3086 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3087 				sizeof(int), "accel_p");
3088 }
3089 
3090 static void
3091 fini_accel(void)
3092 {
3093 	spdk_io_device_unregister(g_accel_p, NULL);
3094 }
3095 
3096 static void
3097 test_get_memory_domains(void)
3098 {
3099 	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3100 	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3101 	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3102 	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3103 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3104 	struct spdk_memory_domain *domains[4] = {};
3105 	int rc = 0;
3106 
3107 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3108 
3109 	/* nvme controller doesn't have memory domains */
3110 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3111 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3112 	CU_ASSERT(rc == 0);
3113 	CU_ASSERT(domains[0] == NULL);
3114 	CU_ASSERT(domains[1] == NULL);
3115 
3116 	/* nvme controller has a memory domain */
3117 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3118 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3119 	CU_ASSERT(rc == 1);
3120 	CU_ASSERT(domains[0] != NULL);
3121 	memset(domains, 0, sizeof(domains));
3122 
3123 	/* multipath, 2 controllers report 1 memory domain each */
3124 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3125 
3126 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3127 	CU_ASSERT(rc == 2);
3128 	CU_ASSERT(domains[0] != NULL);
3129 	CU_ASSERT(domains[1] != NULL);
3130 	memset(domains, 0, sizeof(domains));
3131 
3132 	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3133 	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3134 	CU_ASSERT(rc == 2);
3135 
3136 	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3137 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3138 	CU_ASSERT(rc == 2);
3139 	CU_ASSERT(domains[0] == NULL);
3140 	CU_ASSERT(domains[1] == NULL);
3141 
3142 	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3143 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3144 	CU_ASSERT(rc == 2);
3145 	CU_ASSERT(domains[0] != NULL);
3146 	CU_ASSERT(domains[1] == NULL);
3147 	memset(domains, 0, sizeof(domains));
3148 
3149 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3150 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3151 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3152 	CU_ASSERT(rc == 4);
3153 	CU_ASSERT(domains[0] != NULL);
3154 	CU_ASSERT(domains[1] != NULL);
3155 	CU_ASSERT(domains[2] != NULL);
3156 	CU_ASSERT(domains[3] != NULL);
3157 	memset(domains, 0, sizeof(domains));
3158 
3159 	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3160 	 * Array size is less than the number of memory domains */
3161 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3162 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3163 	CU_ASSERT(rc == 4);
3164 	CU_ASSERT(domains[0] != NULL);
3165 	CU_ASSERT(domains[1] != NULL);
3166 	CU_ASSERT(domains[2] != NULL);
3167 	CU_ASSERT(domains[3] == NULL);
3168 	memset(domains, 0, sizeof(domains));
3169 
3170 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3171 }
3172 
3173 static void
3174 test_reconnect_qpair(void)
3175 {
3176 	struct spdk_nvme_transport_id trid = {};
3177 	struct spdk_nvme_ctrlr *ctrlr;
3178 	struct nvme_ctrlr *nvme_ctrlr;
3179 	const int STRING_SIZE = 32;
3180 	const char *attached_names[STRING_SIZE];
3181 	struct nvme_bdev *bdev;
3182 	struct spdk_io_channel *ch1, *ch2;
3183 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3184 	struct nvme_io_path *io_path1, *io_path2;
3185 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3186 	int rc;
3187 
3188 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3189 	ut_init_trid(&trid);
3190 
3191 	set_thread(0);
3192 
3193 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3194 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3195 
3196 	g_ut_attach_ctrlr_status = 0;
3197 	g_ut_attach_bdev_count = 1;
3198 
3199 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3200 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3201 	CU_ASSERT(rc == 0);
3202 
3203 	spdk_delay_us(1000);
3204 	poll_threads();
3205 
3206 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3207 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3208 
3209 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3210 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3211 
3212 	ch1 = spdk_get_io_channel(bdev);
3213 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3214 
3215 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3216 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3217 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3218 	nvme_qpair1 = io_path1->qpair;
3219 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3220 
3221 	set_thread(1);
3222 
3223 	ch2 = spdk_get_io_channel(bdev);
3224 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3225 
3226 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3227 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3228 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3229 	nvme_qpair2 = io_path2->qpair;
3230 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3231 
3232 	/* If a qpair is disconnected, it is freed and then reconnected via
3233 	 * resetting the corresponding nvme_ctrlr.
3234 	 */
3235 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3236 	ctrlr->is_failed = true;
3237 
3238 	poll_thread_times(1, 3);
3239 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3240 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3241 	CU_ASSERT(nvme_ctrlr->resetting == true);
3242 
3243 	poll_thread_times(0, 3);
3244 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3245 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3246 	CU_ASSERT(ctrlr->is_failed == true);
3247 
3248 	poll_thread_times(1, 2);
3249 	poll_thread_times(0, 1);
3250 	CU_ASSERT(ctrlr->is_failed == false);
3251 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3252 
3253 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3254 	poll_thread_times(0, 2);
3255 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3256 
3257 	poll_thread_times(0, 1);
3258 	poll_thread_times(1, 1);
3259 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3260 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3261 	CU_ASSERT(nvme_ctrlr->resetting == true);
3262 
3263 	poll_thread_times(0, 2);
3264 	poll_thread_times(1, 1);
3265 	poll_thread_times(0, 1);
3266 	CU_ASSERT(nvme_ctrlr->resetting == false);
3267 
3268 	poll_threads();
3269 
3270 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3271 	 * fails, the qpair is just freed.
3272 	 */
3273 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3274 	ctrlr->is_failed = true;
3275 	ctrlr->fail_reset = true;
3276 
3277 	poll_thread_times(1, 3);
3278 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3279 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3280 	CU_ASSERT(nvme_ctrlr->resetting == true);
3281 
3282 	poll_thread_times(0, 3);
3283 	poll_thread_times(1, 1);
3284 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3285 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3286 	CU_ASSERT(ctrlr->is_failed == true);
3287 
3288 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3289 	poll_thread_times(0, 3);
3290 	poll_thread_times(1, 1);
3291 	poll_thread_times(0, 1);
3292 	CU_ASSERT(ctrlr->is_failed == true);
3293 	CU_ASSERT(nvme_ctrlr->resetting == false);
3294 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3295 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3296 
3297 	poll_threads();
3298 
3299 	spdk_put_io_channel(ch2);
3300 
3301 	set_thread(0);
3302 
3303 	spdk_put_io_channel(ch1);
3304 
3305 	poll_threads();
3306 
3307 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3308 	CU_ASSERT(rc == 0);
3309 
3310 	poll_threads();
3311 	spdk_delay_us(1000);
3312 	poll_threads();
3313 
3314 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3315 }
3316 
3317 static void
3318 test_create_bdev_ctrlr(void)
3319 {
3320 	struct nvme_path_id path1 = {}, path2 = {};
3321 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3322 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3323 	const int STRING_SIZE = 32;
3324 	const char *attached_names[STRING_SIZE];
3325 	int rc;
3326 
3327 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3328 	ut_init_trid(&path1.trid);
3329 	ut_init_trid2(&path2.trid);
3330 
3331 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3332 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3333 
3334 	g_ut_attach_ctrlr_status = 0;
3335 	g_ut_attach_bdev_count = 0;
3336 
3337 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3338 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3339 	CU_ASSERT(rc == 0);
3340 
3341 	spdk_delay_us(1000);
3342 	poll_threads();
3343 
3344 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3345 	poll_threads();
3346 
3347 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3348 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3349 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3350 
3351 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3352 	g_ut_attach_ctrlr_status = -EINVAL;
3353 
3354 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3355 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3356 
3357 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3358 
3359 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3360 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3361 	CU_ASSERT(rc == 0);
3362 
3363 	spdk_delay_us(1000);
3364 	poll_threads();
3365 
3366 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3367 	poll_threads();
3368 
3369 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3370 
3371 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3372 	g_ut_attach_ctrlr_status = 0;
3373 
3374 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3375 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3376 
3377 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3378 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3379 	CU_ASSERT(rc == 0);
3380 
3381 	spdk_delay_us(1000);
3382 	poll_threads();
3383 
3384 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3385 	poll_threads();
3386 
3387 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3388 
3389 	/* Delete two ctrlrs at once. */
3390 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3391 	CU_ASSERT(rc == 0);
3392 
3393 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3394 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3395 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3396 
3397 	poll_threads();
3398 	spdk_delay_us(1000);
3399 	poll_threads();
3400 
3401 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3402 
3403 	/* Add two ctrlrs and delete one by one. */
3404 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3405 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3406 
3407 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3408 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3409 
3410 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3411 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3412 	CU_ASSERT(rc == 0);
3413 
3414 	spdk_delay_us(1000);
3415 	poll_threads();
3416 
3417 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3418 	poll_threads();
3419 
3420 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3421 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3422 	CU_ASSERT(rc == 0);
3423 
3424 	spdk_delay_us(1000);
3425 	poll_threads();
3426 
3427 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3428 	poll_threads();
3429 
3430 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3431 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3432 
3433 	rc = bdev_nvme_delete("nvme0", &path1);
3434 	CU_ASSERT(rc == 0);
3435 
3436 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3437 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3438 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3439 
3440 	poll_threads();
3441 	spdk_delay_us(1000);
3442 	poll_threads();
3443 
3444 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3445 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3446 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3447 
3448 	rc = bdev_nvme_delete("nvme0", &path2);
3449 	CU_ASSERT(rc == 0);
3450 
3451 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3452 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3453 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3454 
3455 	poll_threads();
3456 	spdk_delay_us(1000);
3457 	poll_threads();
3458 
3459 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3460 }
3461 
3462 static struct nvme_ns *
3463 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3464 {
3465 	struct nvme_ns *nvme_ns;
3466 
3467 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3468 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3469 			return nvme_ns;
3470 		}
3471 	}
3472 
3473 	return NULL;
3474 }
3475 
3476 static void
3477 test_add_multi_ns_to_bdev(void)
3478 {
3479 	struct nvme_path_id path1 = {}, path2 = {};
3480 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3481 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3482 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3483 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3484 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3485 	const int STRING_SIZE = 32;
3486 	const char *attached_names[STRING_SIZE];
3487 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3488 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3489 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3490 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3491 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3492 	int rc;
3493 
3494 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3495 	ut_init_trid(&path1.trid);
3496 	ut_init_trid2(&path2.trid);
3497 
3498 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3499 
3500 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3501 	 * namespaces are populated.
3502 	 */
3503 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3504 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3505 
3506 	ctrlr1->ns[1].is_active = false;
3507 	ctrlr1->ns[4].is_active = false;
3508 	ctrlr1->ns[0].uuid = &uuid1;
3509 	ctrlr1->ns[2].uuid = &uuid3;
3510 	ctrlr1->ns[3].uuid = &uuid4;
3511 
3512 	g_ut_attach_ctrlr_status = 0;
3513 	g_ut_attach_bdev_count = 3;
3514 
3515 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3516 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3517 	CU_ASSERT(rc == 0);
3518 
3519 	spdk_delay_us(1000);
3520 	poll_threads();
3521 
3522 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3523 	poll_threads();
3524 
3525 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3526 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3527 	 * adding 4th namespace to a bdev should fail.
3528 	 */
3529 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3530 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3531 
3532 	ctrlr2->ns[2].is_active = false;
3533 	ctrlr2->ns[4].is_active = false;
3534 	ctrlr2->ns[0].uuid = &uuid1;
3535 	ctrlr2->ns[1].uuid = &uuid2;
3536 	ctrlr2->ns[3].uuid = &uuid44;
3537 
3538 	g_ut_attach_ctrlr_status = 0;
3539 	g_ut_attach_bdev_count = 2;
3540 
3541 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3542 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3543 	CU_ASSERT(rc == 0);
3544 
3545 	spdk_delay_us(1000);
3546 	poll_threads();
3547 
3548 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3549 	poll_threads();
3550 
3551 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3552 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3553 
3554 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3555 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3556 
3557 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3558 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3559 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3560 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3561 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3562 
3563 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3564 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3565 
3566 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3567 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3568 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3569 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3570 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3571 
3572 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3573 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3574 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3575 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3576 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3577 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3578 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3579 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3580 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3581 
3582 	CU_ASSERT(bdev1->ref == 2);
3583 	CU_ASSERT(bdev2->ref == 1);
3584 	CU_ASSERT(bdev3->ref == 1);
3585 	CU_ASSERT(bdev4->ref == 1);
3586 
3587 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3588 	rc = bdev_nvme_delete("nvme0", &path1);
3589 	CU_ASSERT(rc == 0);
3590 
3591 	poll_threads();
3592 	spdk_delay_us(1000);
3593 	poll_threads();
3594 
3595 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3596 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3597 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3598 
3599 	rc = bdev_nvme_delete("nvme0", &path2);
3600 	CU_ASSERT(rc == 0);
3601 
3602 	poll_threads();
3603 	spdk_delay_us(1000);
3604 	poll_threads();
3605 
3606 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3607 
3608 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3609 	 * can be deleted when the bdev subsystem shutdown.
3610 	 */
3611 	g_ut_attach_bdev_count = 1;
3612 
3613 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3614 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3615 
3616 	ctrlr1->ns[0].uuid = &uuid1;
3617 
3618 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3619 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3620 	CU_ASSERT(rc == 0);
3621 
3622 	spdk_delay_us(1000);
3623 	poll_threads();
3624 
3625 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3626 	poll_threads();
3627 
3628 	ut_init_trid2(&path2.trid);
3629 
3630 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3631 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3632 
3633 	ctrlr2->ns[0].uuid = &uuid1;
3634 
3635 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3636 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3637 	CU_ASSERT(rc == 0);
3638 
3639 	spdk_delay_us(1000);
3640 	poll_threads();
3641 
3642 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3643 	poll_threads();
3644 
3645 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3646 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3647 
3648 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3649 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3650 
3651 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3652 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3653 
3654 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3655 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3656 
3657 	/* Check if a nvme_bdev has two nvme_ns. */
3658 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3659 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3660 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3661 
3662 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3663 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3664 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3665 
3666 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3667 	bdev_nvme_destruct(&bdev1->disk);
3668 
3669 	poll_threads();
3670 
3671 	CU_ASSERT(nvme_ns1->bdev == NULL);
3672 	CU_ASSERT(nvme_ns2->bdev == NULL);
3673 
3674 	nvme_ctrlr1->destruct = true;
3675 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3676 
3677 	poll_threads();
3678 	spdk_delay_us(1000);
3679 	poll_threads();
3680 
3681 	nvme_ctrlr2->destruct = true;
3682 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3683 
3684 	poll_threads();
3685 	spdk_delay_us(1000);
3686 	poll_threads();
3687 
3688 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3689 }
3690 
3691 static void
3692 test_add_multi_io_paths_to_nbdev_ch(void)
3693 {
3694 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3695 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3696 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3697 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3698 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3699 	const int STRING_SIZE = 32;
3700 	const char *attached_names[STRING_SIZE];
3701 	struct nvme_bdev *bdev;
3702 	struct spdk_io_channel *ch;
3703 	struct nvme_bdev_channel *nbdev_ch;
3704 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3705 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3706 	int rc;
3707 
3708 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3709 	ut_init_trid(&path1.trid);
3710 	ut_init_trid2(&path2.trid);
3711 	ut_init_trid3(&path3.trid);
3712 	g_ut_attach_ctrlr_status = 0;
3713 	g_ut_attach_bdev_count = 1;
3714 
3715 	set_thread(1);
3716 
3717 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3718 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3719 
3720 	ctrlr1->ns[0].uuid = &uuid1;
3721 
3722 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3723 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3724 	CU_ASSERT(rc == 0);
3725 
3726 	spdk_delay_us(1000);
3727 	poll_threads();
3728 
3729 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3730 	poll_threads();
3731 
3732 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3733 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3734 
3735 	ctrlr2->ns[0].uuid = &uuid1;
3736 
3737 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3738 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3739 	CU_ASSERT(rc == 0);
3740 
3741 	spdk_delay_us(1000);
3742 	poll_threads();
3743 
3744 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3745 	poll_threads();
3746 
3747 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3748 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3749 
3750 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3751 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3752 
3753 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3754 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3755 
3756 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3757 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3758 
3759 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3760 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3761 
3762 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3763 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3764 
3765 	set_thread(0);
3766 
3767 	ch = spdk_get_io_channel(bdev);
3768 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3769 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3770 
3771 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3772 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3773 
3774 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3775 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3776 
3777 	set_thread(1);
3778 
3779 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3780 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3781 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3782 
3783 	ctrlr3->ns[0].uuid = &uuid1;
3784 
3785 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3786 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3787 	CU_ASSERT(rc == 0);
3788 
3789 	spdk_delay_us(1000);
3790 	poll_threads();
3791 
3792 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3793 	poll_threads();
3794 
3795 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3796 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3797 
3798 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3799 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3800 
3801 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3802 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3803 
3804 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3805 	rc = bdev_nvme_delete("nvme0", &path2);
3806 	CU_ASSERT(rc == 0);
3807 
3808 	poll_threads();
3809 	spdk_delay_us(1000);
3810 	poll_threads();
3811 
3812 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3813 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3814 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3815 
3816 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3817 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3818 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3819 
3820 	set_thread(0);
3821 
3822 	spdk_put_io_channel(ch);
3823 
3824 	poll_threads();
3825 
3826 	set_thread(1);
3827 
3828 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3829 	CU_ASSERT(rc == 0);
3830 
3831 	poll_threads();
3832 	spdk_delay_us(1000);
3833 	poll_threads();
3834 
3835 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3836 }
3837 
3838 static void
3839 test_admin_path(void)
3840 {
3841 	struct nvme_path_id path1 = {}, path2 = {};
3842 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3843 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3844 	const int STRING_SIZE = 32;
3845 	const char *attached_names[STRING_SIZE];
3846 	struct nvme_bdev *bdev;
3847 	struct spdk_io_channel *ch;
3848 	struct spdk_bdev_io *bdev_io;
3849 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3850 	int rc;
3851 
3852 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3853 	ut_init_trid(&path1.trid);
3854 	ut_init_trid2(&path2.trid);
3855 	g_ut_attach_ctrlr_status = 0;
3856 	g_ut_attach_bdev_count = 1;
3857 
3858 	set_thread(0);
3859 
3860 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3861 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3862 
3863 	ctrlr1->ns[0].uuid = &uuid1;
3864 
3865 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3866 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3867 	CU_ASSERT(rc == 0);
3868 
3869 	spdk_delay_us(1000);
3870 	poll_threads();
3871 
3872 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3873 	poll_threads();
3874 
3875 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3876 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3877 
3878 	ctrlr2->ns[0].uuid = &uuid1;
3879 
3880 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3881 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3882 	CU_ASSERT(rc == 0);
3883 
3884 	spdk_delay_us(1000);
3885 	poll_threads();
3886 
3887 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3888 	poll_threads();
3889 
3890 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3891 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3892 
3893 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3894 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3895 
3896 	ch = spdk_get_io_channel(bdev);
3897 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3898 
3899 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3900 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3901 
3902 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3903 	 * submitted to ctrlr2.
3904 	 */
3905 	ctrlr1->is_failed = true;
3906 	bdev_io->internal.in_submit_request = true;
3907 
3908 	bdev_nvme_submit_request(ch, bdev_io);
3909 
3910 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3911 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3912 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3913 
3914 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3915 	poll_threads();
3916 
3917 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3918 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3919 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3920 
3921 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3922 	ctrlr2->is_failed = true;
3923 	bdev_io->internal.in_submit_request = true;
3924 
3925 	bdev_nvme_submit_request(ch, bdev_io);
3926 
3927 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3928 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3929 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3930 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3931 
3932 	free(bdev_io);
3933 
3934 	spdk_put_io_channel(ch);
3935 
3936 	poll_threads();
3937 
3938 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3939 	CU_ASSERT(rc == 0);
3940 
3941 	poll_threads();
3942 	spdk_delay_us(1000);
3943 	poll_threads();
3944 
3945 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3946 }
3947 
3948 static struct nvme_io_path *
3949 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3950 			struct nvme_ctrlr *nvme_ctrlr)
3951 {
3952 	struct nvme_io_path *io_path;
3953 
3954 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3955 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
3956 			return io_path;
3957 		}
3958 	}
3959 
3960 	return NULL;
3961 }
3962 
3963 static void
3964 test_reset_bdev_ctrlr(void)
3965 {
3966 	struct nvme_path_id path1 = {}, path2 = {};
3967 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3968 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3969 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3970 	struct nvme_path_id *curr_path1, *curr_path2;
3971 	const int STRING_SIZE = 32;
3972 	const char *attached_names[STRING_SIZE];
3973 	struct nvme_bdev *bdev;
3974 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3975 	struct nvme_bdev_io *first_bio;
3976 	struct spdk_io_channel *ch1, *ch2;
3977 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3978 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3979 	int rc;
3980 
3981 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3982 	ut_init_trid(&path1.trid);
3983 	ut_init_trid2(&path2.trid);
3984 	g_ut_attach_ctrlr_status = 0;
3985 	g_ut_attach_bdev_count = 1;
3986 
3987 	set_thread(0);
3988 
3989 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3990 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3991 
3992 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3993 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3994 	CU_ASSERT(rc == 0);
3995 
3996 	spdk_delay_us(1000);
3997 	poll_threads();
3998 
3999 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4000 	poll_threads();
4001 
4002 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4003 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4004 
4005 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4006 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4007 	CU_ASSERT(rc == 0);
4008 
4009 	spdk_delay_us(1000);
4010 	poll_threads();
4011 
4012 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4013 	poll_threads();
4014 
4015 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4016 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4017 
4018 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4019 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
4020 
4021 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
4022 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
4023 
4024 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4025 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
4026 
4027 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
4028 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
4029 
4030 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4031 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
4032 
4033 	set_thread(0);
4034 
4035 	ch1 = spdk_get_io_channel(bdev);
4036 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
4037 
4038 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
4039 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
4040 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
4041 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
4042 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
4043 
4044 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
4045 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
4046 
4047 	set_thread(1);
4048 
4049 	ch2 = spdk_get_io_channel(bdev);
4050 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
4051 
4052 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
4053 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
4054 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
4055 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
4056 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
4057 
4058 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
4059 
4060 	/* The first reset request from bdev_io is submitted on thread 0.
4061 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
4062 	 *
4063 	 * A few extra polls are necessary after resetting ctrlr1 to check
4064 	 * pending reset requests for ctrlr1.
4065 	 */
4066 	ctrlr1->is_failed = true;
4067 	curr_path1->last_failed_tsc = spdk_get_ticks();
4068 	ctrlr2->is_failed = true;
4069 	curr_path2->last_failed_tsc = spdk_get_ticks();
4070 
4071 	set_thread(0);
4072 
4073 	bdev_nvme_submit_request(ch1, first_bdev_io);
4074 	CU_ASSERT(first_bio->io_path == io_path11);
4075 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4076 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4077 
4078 	poll_thread_times(0, 3);
4079 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4080 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4081 
4082 	poll_thread_times(1, 2);
4083 	CU_ASSERT(io_path11->qpair->qpair == NULL);
4084 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4085 	CU_ASSERT(ctrlr1->is_failed == true);
4086 
4087 	poll_thread_times(0, 1);
4088 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4089 	CU_ASSERT(ctrlr1->is_failed == false);
4090 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4091 	CU_ASSERT(curr_path1->last_failed_tsc != 0);
4092 
4093 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4094 	poll_thread_times(0, 2);
4095 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4096 
4097 	poll_thread_times(0, 1);
4098 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4099 	CU_ASSERT(io_path21->qpair->qpair == NULL);
4100 
4101 	poll_thread_times(1, 1);
4102 	CU_ASSERT(io_path11->qpair->qpair != NULL);
4103 	CU_ASSERT(io_path21->qpair->qpair != NULL);
4104 
4105 	poll_thread_times(0, 2);
4106 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4107 	poll_thread_times(1, 1);
4108 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4109 	poll_thread_times(0, 2);
4110 	CU_ASSERT(nvme_ctrlr1->resetting == false);
4111 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4112 	CU_ASSERT(first_bio->io_path == io_path12);
4113 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4114 
4115 	poll_thread_times(0, 3);
4116 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4117 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4118 
4119 	poll_thread_times(1, 2);
4120 	CU_ASSERT(io_path12->qpair->qpair == NULL);
4121 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4122 	CU_ASSERT(ctrlr2->is_failed == true);
4123 
4124 	poll_thread_times(0, 1);
4125 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4126 	CU_ASSERT(ctrlr2->is_failed == false);
4127 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4128 	CU_ASSERT(curr_path2->last_failed_tsc != 0);
4129 
4130 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4131 	poll_thread_times(0, 2);
4132 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4133 
4134 	poll_thread_times(0, 1);
4135 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4136 	CU_ASSERT(io_path22->qpair->qpair == NULL);
4137 
4138 	poll_thread_times(1, 2);
4139 	CU_ASSERT(io_path12->qpair->qpair != NULL);
4140 	CU_ASSERT(io_path22->qpair->qpair != NULL);
4141 
4142 	poll_thread_times(0, 2);
4143 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4144 	poll_thread_times(1, 1);
4145 	CU_ASSERT(nvme_ctrlr2->resetting == true);
4146 	poll_thread_times(0, 2);
4147 	CU_ASSERT(first_bio->io_path == NULL);
4148 	CU_ASSERT(nvme_ctrlr2->resetting == false);
4149 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4150 
4151 	poll_threads();
4152 
4153 	/* There is a race between two reset requests from bdev_io.
4154 	 *
4155 	 * The first reset request is submitted on thread 0, and the second reset
4156 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4157 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4158 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4159 	 * The second is pending on ctrlr2 again. After the first completes resetting
4160 	 * ctrl2, both complete successfully.
4161 	 */
4162 	ctrlr1->is_failed = true;
4163 	curr_path1->last_failed_tsc = spdk_get_ticks();
4164 	ctrlr2->is_failed = true;
4165 	curr_path2->last_failed_tsc = spdk_get_ticks();
4166 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4167 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4168 
4169 	set_thread(0);
4170 
4171 	bdev_nvme_submit_request(ch1, first_bdev_io);
4172 
4173 	set_thread(1);
4174 
4175 	bdev_nvme_submit_request(ch2, second_bdev_io);
4176 
4177 	CU_ASSERT(nvme_ctrlr1->resetting == true);
4178 	CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio);
4179 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io);
4180 
4181 	poll_threads();
4182 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4183 	poll_threads();
4184 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4185 	poll_threads();
4186 
4187 	CU_ASSERT(ctrlr1->is_failed == false);
4188 	CU_ASSERT(curr_path1->last_failed_tsc == 0);
4189 	CU_ASSERT(ctrlr2->is_failed == false);
4190 	CU_ASSERT(curr_path2->last_failed_tsc == 0);
4191 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4192 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4193 
4194 	set_thread(0);
4195 
4196 	spdk_put_io_channel(ch1);
4197 
4198 	set_thread(1);
4199 
4200 	spdk_put_io_channel(ch2);
4201 
4202 	poll_threads();
4203 
4204 	set_thread(0);
4205 
4206 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4207 	CU_ASSERT(rc == 0);
4208 
4209 	poll_threads();
4210 	spdk_delay_us(1000);
4211 	poll_threads();
4212 
4213 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4214 
4215 	free(first_bdev_io);
4216 	free(second_bdev_io);
4217 }
4218 
4219 static void
4220 test_find_io_path(void)
4221 {
4222 	struct nvme_bdev_channel nbdev_ch = {
4223 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4224 	};
4225 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4226 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4227 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4228 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4229 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4230 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4231 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
4232 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4233 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4234 
4235 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4236 
4237 	/* Test if io_path whose ANA state is not accessible is excluded. */
4238 
4239 	nvme_qpair1.qpair = &qpair1;
4240 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4241 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4242 
4243 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4244 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4245 
4246 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4247 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4248 
4249 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4250 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4251 
4252 	nbdev_ch.current_io_path = NULL;
4253 
4254 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4255 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4256 
4257 	nbdev_ch.current_io_path = NULL;
4258 
4259 	/* Test if io_path whose qpair is resetting is excluded. */
4260 
4261 	nvme_qpair1.qpair = NULL;
4262 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4263 
4264 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4265 
4266 	/* Test if ANA optimized state or the first found ANA non-optimized state
4267 	 * is prioritized.
4268 	 */
4269 
4270 	nvme_qpair1.qpair = &qpair1;
4271 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4272 	nvme_qpair2.qpair = &qpair2;
4273 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4274 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4275 
4276 	nbdev_ch.current_io_path = NULL;
4277 
4278 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4279 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4280 
4281 	nbdev_ch.current_io_path = NULL;
4282 }
4283 
4284 static void
4285 test_retry_io_if_ana_state_is_updating(void)
4286 {
4287 	struct nvme_path_id path = {};
4288 	struct nvme_ctrlr_opts opts = {};
4289 	struct spdk_nvme_ctrlr *ctrlr;
4290 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4291 	struct nvme_ctrlr *nvme_ctrlr;
4292 	const int STRING_SIZE = 32;
4293 	const char *attached_names[STRING_SIZE];
4294 	struct nvme_bdev *bdev;
4295 	struct nvme_ns *nvme_ns;
4296 	struct spdk_bdev_io *bdev_io1;
4297 	struct spdk_io_channel *ch;
4298 	struct nvme_bdev_channel *nbdev_ch;
4299 	struct nvme_io_path *io_path;
4300 	struct nvme_qpair *nvme_qpair;
4301 	int rc;
4302 
4303 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4304 	ut_init_trid(&path.trid);
4305 
4306 	set_thread(0);
4307 
4308 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4309 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4310 
4311 	g_ut_attach_ctrlr_status = 0;
4312 	g_ut_attach_bdev_count = 1;
4313 
4314 	opts.ctrlr_loss_timeout_sec = -1;
4315 	opts.reconnect_delay_sec = 1;
4316 
4317 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4318 			      attach_ctrlr_done, NULL, NULL, &opts, false);
4319 	CU_ASSERT(rc == 0);
4320 
4321 	spdk_delay_us(1000);
4322 	poll_threads();
4323 
4324 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4325 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4326 
4327 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4328 	CU_ASSERT(nvme_ctrlr != NULL);
4329 
4330 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4331 	CU_ASSERT(bdev != NULL);
4332 
4333 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4334 	CU_ASSERT(nvme_ns != NULL);
4335 
4336 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4337 	ut_bdev_io_set_buf(bdev_io1);
4338 
4339 	ch = spdk_get_io_channel(bdev);
4340 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4341 
4342 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4343 
4344 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4345 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4346 
4347 	nvme_qpair = io_path->qpair;
4348 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4349 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4350 
4351 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4352 
4353 	/* If qpair is connected, I/O should succeed. */
4354 	bdev_io1->internal.in_submit_request = true;
4355 
4356 	bdev_nvme_submit_request(ch, bdev_io1);
4357 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4358 
4359 	poll_threads();
4360 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4361 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4362 
4363 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4364 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4365 	nbdev_ch->current_io_path = NULL;
4366 
4367 	bdev_io1->internal.in_submit_request = true;
4368 
4369 	bdev_nvme_submit_request(ch, bdev_io1);
4370 
4371 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4372 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4373 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4374 
4375 	/* ANA state became accessible while I/O was queued. */
4376 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4377 
4378 	spdk_delay_us(1000000);
4379 
4380 	poll_thread_times(0, 1);
4381 
4382 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4383 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4384 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4385 
4386 	poll_threads();
4387 
4388 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4389 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4390 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4391 
4392 	free(bdev_io1);
4393 
4394 	spdk_put_io_channel(ch);
4395 
4396 	poll_threads();
4397 
4398 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4399 	CU_ASSERT(rc == 0);
4400 
4401 	poll_threads();
4402 	spdk_delay_us(1000);
4403 	poll_threads();
4404 
4405 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4406 }
4407 
4408 static void
4409 test_retry_io_for_io_path_error(void)
4410 {
4411 	struct nvme_path_id path1 = {}, path2 = {};
4412 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4413 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4414 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4415 	const int STRING_SIZE = 32;
4416 	const char *attached_names[STRING_SIZE];
4417 	struct nvme_bdev *bdev;
4418 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4419 	struct spdk_bdev_io *bdev_io;
4420 	struct nvme_bdev_io *bio;
4421 	struct spdk_io_channel *ch;
4422 	struct nvme_bdev_channel *nbdev_ch;
4423 	struct nvme_io_path *io_path1, *io_path2;
4424 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4425 	struct ut_nvme_req *req;
4426 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4427 	int rc;
4428 
4429 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4430 	ut_init_trid(&path1.trid);
4431 	ut_init_trid2(&path2.trid);
4432 
4433 	g_opts.bdev_retry_count = 1;
4434 
4435 	set_thread(0);
4436 
4437 	g_ut_attach_ctrlr_status = 0;
4438 	g_ut_attach_bdev_count = 1;
4439 
4440 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4441 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4442 
4443 	ctrlr1->ns[0].uuid = &uuid1;
4444 
4445 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4446 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4447 	CU_ASSERT(rc == 0);
4448 
4449 	spdk_delay_us(1000);
4450 	poll_threads();
4451 
4452 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4453 	poll_threads();
4454 
4455 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4456 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4457 
4458 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4459 	CU_ASSERT(nvme_ctrlr1 != NULL);
4460 
4461 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4462 	CU_ASSERT(bdev != NULL);
4463 
4464 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4465 	CU_ASSERT(nvme_ns1 != NULL);
4466 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4467 
4468 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4469 	ut_bdev_io_set_buf(bdev_io);
4470 
4471 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4472 
4473 	ch = spdk_get_io_channel(bdev);
4474 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4475 
4476 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4477 
4478 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4479 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4480 
4481 	nvme_qpair1 = io_path1->qpair;
4482 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4483 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4484 
4485 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4486 
4487 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4488 	bdev_io->internal.in_submit_request = true;
4489 
4490 	bdev_nvme_submit_request(ch, bdev_io);
4491 
4492 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4493 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4494 
4495 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4496 	SPDK_CU_ASSERT_FATAL(req != NULL);
4497 
4498 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4499 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4500 	req->cpl.status.dnr = 1;
4501 
4502 	poll_thread_times(0, 1);
4503 
4504 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4505 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4506 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4507 
4508 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4509 	bdev_io->internal.in_submit_request = true;
4510 
4511 	bdev_nvme_submit_request(ch, bdev_io);
4512 
4513 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4514 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4515 
4516 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4517 	SPDK_CU_ASSERT_FATAL(req != NULL);
4518 
4519 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4520 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4521 
4522 	poll_thread_times(0, 1);
4523 
4524 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4525 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4526 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4527 
4528 	poll_threads();
4529 
4530 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4531 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4532 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4533 
4534 	/* Add io_path2 dynamically, and create a multipath configuration. */
4535 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4536 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4537 
4538 	ctrlr2->ns[0].uuid = &uuid1;
4539 
4540 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4541 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4542 	CU_ASSERT(rc == 0);
4543 
4544 	spdk_delay_us(1000);
4545 	poll_threads();
4546 
4547 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4548 	poll_threads();
4549 
4550 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4551 	CU_ASSERT(nvme_ctrlr2 != NULL);
4552 
4553 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4554 	CU_ASSERT(nvme_ns2 != NULL);
4555 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4556 
4557 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4558 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4559 
4560 	nvme_qpair2 = io_path2->qpair;
4561 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4562 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4563 
4564 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4565 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4566 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4567 	 */
4568 	bdev_io->internal.in_submit_request = true;
4569 
4570 	bdev_nvme_submit_request(ch, bdev_io);
4571 
4572 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4573 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4574 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4575 
4576 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4577 	SPDK_CU_ASSERT_FATAL(req != NULL);
4578 
4579 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4580 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4581 
4582 	poll_thread_times(0, 1);
4583 
4584 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4585 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4586 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4587 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4588 
4589 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4590 	nvme_qpair1->qpair = NULL;
4591 
4592 	poll_threads();
4593 
4594 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4595 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4596 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4597 
4598 	free(bdev_io);
4599 
4600 	spdk_put_io_channel(ch);
4601 
4602 	poll_threads();
4603 
4604 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4605 	CU_ASSERT(rc == 0);
4606 
4607 	poll_threads();
4608 	spdk_delay_us(1000);
4609 	poll_threads();
4610 
4611 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4612 
4613 	g_opts.bdev_retry_count = 0;
4614 }
4615 
4616 static void
4617 test_retry_io_count(void)
4618 {
4619 	struct nvme_path_id path = {};
4620 	struct spdk_nvme_ctrlr *ctrlr;
4621 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4622 	struct nvme_ctrlr *nvme_ctrlr;
4623 	const int STRING_SIZE = 32;
4624 	const char *attached_names[STRING_SIZE];
4625 	struct nvme_bdev *bdev;
4626 	struct nvme_ns *nvme_ns;
4627 	struct spdk_bdev_io *bdev_io;
4628 	struct nvme_bdev_io *bio;
4629 	struct spdk_io_channel *ch;
4630 	struct nvme_bdev_channel *nbdev_ch;
4631 	struct nvme_io_path *io_path;
4632 	struct nvme_qpair *nvme_qpair;
4633 	struct ut_nvme_req *req;
4634 	int rc;
4635 
4636 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4637 	ut_init_trid(&path.trid);
4638 
4639 	set_thread(0);
4640 
4641 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4642 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4643 
4644 	g_ut_attach_ctrlr_status = 0;
4645 	g_ut_attach_bdev_count = 1;
4646 
4647 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4648 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4649 	CU_ASSERT(rc == 0);
4650 
4651 	spdk_delay_us(1000);
4652 	poll_threads();
4653 
4654 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4655 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4656 
4657 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4658 	CU_ASSERT(nvme_ctrlr != NULL);
4659 
4660 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4661 	CU_ASSERT(bdev != NULL);
4662 
4663 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4664 	CU_ASSERT(nvme_ns != NULL);
4665 
4666 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4667 	ut_bdev_io_set_buf(bdev_io);
4668 
4669 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4670 
4671 	ch = spdk_get_io_channel(bdev);
4672 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4673 
4674 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4675 
4676 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4677 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4678 
4679 	nvme_qpair = io_path->qpair;
4680 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4681 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4682 
4683 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4684 
4685 	/* If I/O is aborted by request, it should not be retried. */
4686 	g_opts.bdev_retry_count = 1;
4687 
4688 	bdev_io->internal.in_submit_request = true;
4689 
4690 	bdev_nvme_submit_request(ch, bdev_io);
4691 
4692 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4693 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4694 
4695 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4696 	SPDK_CU_ASSERT_FATAL(req != NULL);
4697 
4698 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4699 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4700 
4701 	poll_thread_times(0, 1);
4702 
4703 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4704 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4705 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4706 
4707 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4708 	 * the failed I/O should not be retried.
4709 	 */
4710 	g_opts.bdev_retry_count = 4;
4711 
4712 	bdev_io->internal.in_submit_request = true;
4713 
4714 	bdev_nvme_submit_request(ch, bdev_io);
4715 
4716 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4717 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4718 
4719 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4720 	SPDK_CU_ASSERT_FATAL(req != NULL);
4721 
4722 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4723 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4724 	bio->retry_count = 4;
4725 
4726 	poll_thread_times(0, 1);
4727 
4728 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4729 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4730 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4731 
4732 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4733 	g_opts.bdev_retry_count = -1;
4734 
4735 	bdev_io->internal.in_submit_request = true;
4736 
4737 	bdev_nvme_submit_request(ch, bdev_io);
4738 
4739 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4740 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4741 
4742 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4743 	SPDK_CU_ASSERT_FATAL(req != NULL);
4744 
4745 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4746 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4747 	bio->retry_count = 4;
4748 
4749 	poll_thread_times(0, 1);
4750 
4751 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4752 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4753 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4754 
4755 	poll_threads();
4756 
4757 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4758 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4759 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4760 
4761 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4762 	 * the failed I/O should be retried.
4763 	 */
4764 	g_opts.bdev_retry_count = 4;
4765 
4766 	bdev_io->internal.in_submit_request = true;
4767 
4768 	bdev_nvme_submit_request(ch, bdev_io);
4769 
4770 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4771 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4772 
4773 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4774 	SPDK_CU_ASSERT_FATAL(req != NULL);
4775 
4776 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4777 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4778 	bio->retry_count = 3;
4779 
4780 	poll_thread_times(0, 1);
4781 
4782 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4783 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4784 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4785 
4786 	poll_threads();
4787 
4788 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4789 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4790 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4791 
4792 	free(bdev_io);
4793 
4794 	spdk_put_io_channel(ch);
4795 
4796 	poll_threads();
4797 
4798 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4799 	CU_ASSERT(rc == 0);
4800 
4801 	poll_threads();
4802 	spdk_delay_us(1000);
4803 	poll_threads();
4804 
4805 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4806 
4807 	g_opts.bdev_retry_count = 0;
4808 }
4809 
4810 static void
4811 test_concurrent_read_ana_log_page(void)
4812 {
4813 	struct spdk_nvme_transport_id trid = {};
4814 	struct spdk_nvme_ctrlr *ctrlr;
4815 	struct nvme_ctrlr *nvme_ctrlr;
4816 	const int STRING_SIZE = 32;
4817 	const char *attached_names[STRING_SIZE];
4818 	int rc;
4819 
4820 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4821 	ut_init_trid(&trid);
4822 
4823 	set_thread(0);
4824 
4825 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4826 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4827 
4828 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4829 
4830 	g_ut_attach_ctrlr_status = 0;
4831 	g_ut_attach_bdev_count = 1;
4832 
4833 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
4834 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4835 	CU_ASSERT(rc == 0);
4836 
4837 	spdk_delay_us(1000);
4838 	poll_threads();
4839 
4840 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4841 	poll_threads();
4842 
4843 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4844 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4845 
4846 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4847 
4848 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4849 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4850 
4851 	/* Following read request should be rejected. */
4852 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4853 
4854 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4855 
4856 	set_thread(1);
4857 
4858 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4859 
4860 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4861 
4862 	/* Reset request while reading ANA log page should not be rejected. */
4863 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
4864 	CU_ASSERT(rc == 0);
4865 
4866 	poll_threads();
4867 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4868 	poll_threads();
4869 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4870 	poll_threads();
4871 
4872 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4873 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4874 
4875 	/* Read ANA log page while resetting ctrlr should be rejected. */
4876 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
4877 	CU_ASSERT(rc == 0);
4878 
4879 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4880 
4881 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4882 
4883 	poll_threads();
4884 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4885 	poll_threads();
4886 
4887 	set_thread(0);
4888 
4889 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4890 	CU_ASSERT(rc == 0);
4891 
4892 	poll_threads();
4893 	spdk_delay_us(1000);
4894 	poll_threads();
4895 
4896 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4897 }
4898 
4899 static void
4900 test_retry_io_for_ana_error(void)
4901 {
4902 	struct nvme_path_id path = {};
4903 	struct spdk_nvme_ctrlr *ctrlr;
4904 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4905 	struct nvme_ctrlr *nvme_ctrlr;
4906 	const int STRING_SIZE = 32;
4907 	const char *attached_names[STRING_SIZE];
4908 	struct nvme_bdev *bdev;
4909 	struct nvme_ns *nvme_ns;
4910 	struct spdk_bdev_io *bdev_io;
4911 	struct nvme_bdev_io *bio;
4912 	struct spdk_io_channel *ch;
4913 	struct nvme_bdev_channel *nbdev_ch;
4914 	struct nvme_io_path *io_path;
4915 	struct nvme_qpair *nvme_qpair;
4916 	struct ut_nvme_req *req;
4917 	uint64_t now;
4918 	int rc;
4919 
4920 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4921 	ut_init_trid(&path.trid);
4922 
4923 	g_opts.bdev_retry_count = 1;
4924 
4925 	set_thread(0);
4926 
4927 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4928 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4929 
4930 	g_ut_attach_ctrlr_status = 0;
4931 	g_ut_attach_bdev_count = 1;
4932 
4933 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4934 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4935 	CU_ASSERT(rc == 0);
4936 
4937 	spdk_delay_us(1000);
4938 	poll_threads();
4939 
4940 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4941 	poll_threads();
4942 
4943 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4944 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4945 
4946 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4947 	CU_ASSERT(nvme_ctrlr != NULL);
4948 
4949 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4950 	CU_ASSERT(bdev != NULL);
4951 
4952 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4953 	CU_ASSERT(nvme_ns != NULL);
4954 
4955 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4956 	ut_bdev_io_set_buf(bdev_io);
4957 
4958 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4959 
4960 	ch = spdk_get_io_channel(bdev);
4961 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4962 
4963 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4964 
4965 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4966 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4967 
4968 	nvme_qpair = io_path->qpair;
4969 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4970 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4971 
4972 	now = spdk_get_ticks();
4973 
4974 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4975 
4976 	/* If I/O got ANA error, it should be queued, the corresponding namespace
4977 	 * should be freezed and its ANA state should be updated.
4978 	 */
4979 	bdev_io->internal.in_submit_request = true;
4980 
4981 	bdev_nvme_submit_request(ch, bdev_io);
4982 
4983 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4984 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4985 
4986 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4987 	SPDK_CU_ASSERT_FATAL(req != NULL);
4988 
4989 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4990 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
4991 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4992 
4993 	poll_thread_times(0, 1);
4994 
4995 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4996 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4997 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4998 	/* I/O should be retried immediately. */
4999 	CU_ASSERT(bio->retry_ticks == now);
5000 	CU_ASSERT(nvme_ns->ana_state_updating == true);
5001 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
5002 
5003 	poll_threads();
5004 
5005 	/* Namespace is inaccessible, and hence I/O should be queued again. */
5006 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5007 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5008 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5009 	/* I/O should be retried after a second if no I/O path was found but
5010 	 * any I/O path may become available.
5011 	 */
5012 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
5013 
5014 	/* Namespace should be unfreezed after completing to update its ANA state. */
5015 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5016 	poll_threads();
5017 
5018 	CU_ASSERT(nvme_ns->ana_state_updating == false);
5019 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5020 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
5021 
5022 	/* Retry the queued I/O should succeed. */
5023 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
5024 	poll_threads();
5025 
5026 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5027 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5028 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5029 
5030 	free(bdev_io);
5031 
5032 	spdk_put_io_channel(ch);
5033 
5034 	poll_threads();
5035 
5036 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5037 	CU_ASSERT(rc == 0);
5038 
5039 	poll_threads();
5040 	spdk_delay_us(1000);
5041 	poll_threads();
5042 
5043 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5044 
5045 	g_opts.bdev_retry_count = 0;
5046 }
5047 
5048 static void
5049 test_check_io_error_resiliency_params(void)
5050 {
5051 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5052 	 * 3rd parameter is fast_io_fail_timeout_sec.
5053 	 */
5054 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
5055 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
5056 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
5057 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
5058 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
5059 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
5060 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
5061 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
5062 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
5063 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
5064 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
5065 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
5066 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
5067 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
5068 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
5069 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
5070 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
5071 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5072 	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
5073 }
5074 
5075 static void
5076 test_retry_io_if_ctrlr_is_resetting(void)
5077 {
5078 	struct nvme_path_id path = {};
5079 	struct nvme_ctrlr_opts opts = {};
5080 	struct spdk_nvme_ctrlr *ctrlr;
5081 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5082 	struct nvme_ctrlr *nvme_ctrlr;
5083 	const int STRING_SIZE = 32;
5084 	const char *attached_names[STRING_SIZE];
5085 	struct nvme_bdev *bdev;
5086 	struct nvme_ns *nvme_ns;
5087 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5088 	struct spdk_io_channel *ch;
5089 	struct nvme_bdev_channel *nbdev_ch;
5090 	struct nvme_io_path *io_path;
5091 	struct nvme_qpair *nvme_qpair;
5092 	int rc;
5093 
5094 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5095 	ut_init_trid(&path.trid);
5096 
5097 	set_thread(0);
5098 
5099 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5100 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5101 
5102 	g_ut_attach_ctrlr_status = 0;
5103 	g_ut_attach_bdev_count = 1;
5104 
5105 	opts.ctrlr_loss_timeout_sec = -1;
5106 	opts.reconnect_delay_sec = 1;
5107 
5108 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5109 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5110 	CU_ASSERT(rc == 0);
5111 
5112 	spdk_delay_us(1000);
5113 	poll_threads();
5114 
5115 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5116 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5117 
5118 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5119 	CU_ASSERT(nvme_ctrlr != NULL);
5120 
5121 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5122 	CU_ASSERT(bdev != NULL);
5123 
5124 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5125 	CU_ASSERT(nvme_ns != NULL);
5126 
5127 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5128 	ut_bdev_io_set_buf(bdev_io1);
5129 
5130 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5131 	ut_bdev_io_set_buf(bdev_io2);
5132 
5133 	ch = spdk_get_io_channel(bdev);
5134 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5135 
5136 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5137 
5138 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5139 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5140 
5141 	nvme_qpair = io_path->qpair;
5142 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5143 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5144 
5145 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5146 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5147 
5148 	/* If qpair is connected, I/O should succeed. */
5149 	bdev_io1->internal.in_submit_request = true;
5150 
5151 	bdev_nvme_submit_request(ch, bdev_io1);
5152 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5153 
5154 	poll_threads();
5155 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5156 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5157 
5158 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5159 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5160 	 * while resetting the nvme_ctrlr.
5161 	 */
5162 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5163 	ctrlr->is_failed = true;
5164 
5165 	poll_thread_times(0, 5);
5166 
5167 	CU_ASSERT(nvme_qpair->qpair == NULL);
5168 	CU_ASSERT(nvme_ctrlr->resetting == true);
5169 	CU_ASSERT(ctrlr->is_failed == false);
5170 
5171 	bdev_io1->internal.in_submit_request = true;
5172 
5173 	bdev_nvme_submit_request(ch, bdev_io1);
5174 
5175 	spdk_delay_us(1);
5176 
5177 	bdev_io2->internal.in_submit_request = true;
5178 
5179 	bdev_nvme_submit_request(ch, bdev_io2);
5180 
5181 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5182 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5183 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5184 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5185 
5186 	poll_threads();
5187 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5188 	poll_threads();
5189 
5190 	CU_ASSERT(nvme_qpair->qpair != NULL);
5191 	CU_ASSERT(nvme_ctrlr->resetting == false);
5192 
5193 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5194 
5195 	poll_thread_times(0, 1);
5196 
5197 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5198 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5199 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5200 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5201 
5202 	poll_threads();
5203 
5204 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5205 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5206 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5207 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5208 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5209 
5210 	spdk_delay_us(1);
5211 
5212 	poll_thread_times(0, 1);
5213 
5214 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5215 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5216 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5217 
5218 	poll_threads();
5219 
5220 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5221 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5222 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5223 
5224 	free(bdev_io1);
5225 	free(bdev_io2);
5226 
5227 	spdk_put_io_channel(ch);
5228 
5229 	poll_threads();
5230 
5231 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5232 	CU_ASSERT(rc == 0);
5233 
5234 	poll_threads();
5235 	spdk_delay_us(1000);
5236 	poll_threads();
5237 
5238 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5239 }
5240 
5241 static void
5242 test_reconnect_ctrlr(void)
5243 {
5244 	struct spdk_nvme_transport_id trid = {};
5245 	struct spdk_nvme_ctrlr ctrlr = {};
5246 	struct nvme_ctrlr *nvme_ctrlr;
5247 	struct spdk_io_channel *ch1, *ch2;
5248 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5249 	int rc;
5250 
5251 	ut_init_trid(&trid);
5252 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5253 
5254 	set_thread(0);
5255 
5256 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5257 	CU_ASSERT(rc == 0);
5258 
5259 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5260 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5261 
5262 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5263 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5264 
5265 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5266 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5267 
5268 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5269 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5270 
5271 	set_thread(1);
5272 
5273 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5274 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5275 
5276 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5277 
5278 	/* Reset starts from thread 1. */
5279 	set_thread(1);
5280 
5281 	/* The reset should fail and a reconnect timer should be registered. */
5282 	ctrlr.fail_reset = true;
5283 	ctrlr.is_failed = true;
5284 
5285 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5286 	CU_ASSERT(rc == 0);
5287 	CU_ASSERT(nvme_ctrlr->resetting == true);
5288 	CU_ASSERT(ctrlr.is_failed == true);
5289 
5290 	poll_threads();
5291 
5292 	CU_ASSERT(nvme_ctrlr->resetting == false);
5293 	CU_ASSERT(ctrlr.is_failed == false);
5294 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5295 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5296 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5297 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5298 
5299 	/* A new reset starts from thread 0. */
5300 	set_thread(1);
5301 
5302 	/* The reset should cancel the reconnect timer and should start from reconnection.
5303 	 * Then, the reset should fail and a reconnect timer should be registered again.
5304 	 */
5305 	ctrlr.fail_reset = true;
5306 	ctrlr.is_failed = true;
5307 
5308 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5309 	CU_ASSERT(rc == 0);
5310 	CU_ASSERT(nvme_ctrlr->resetting == true);
5311 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5312 	CU_ASSERT(ctrlr.is_failed == true);
5313 
5314 	poll_threads();
5315 
5316 	CU_ASSERT(nvme_ctrlr->resetting == false);
5317 	CU_ASSERT(ctrlr.is_failed == false);
5318 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5319 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5320 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5321 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5322 
5323 	/* Then a reconnect retry should suceeed. */
5324 	ctrlr.fail_reset = false;
5325 
5326 	spdk_delay_us(SPDK_SEC_TO_USEC);
5327 	poll_thread_times(0, 1);
5328 
5329 	CU_ASSERT(nvme_ctrlr->resetting == true);
5330 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5331 
5332 	poll_threads();
5333 
5334 	CU_ASSERT(nvme_ctrlr->resetting == false);
5335 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5336 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5337 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5338 
5339 	/* The reset should fail and a reconnect timer should be registered. */
5340 	ctrlr.fail_reset = true;
5341 	ctrlr.is_failed = true;
5342 
5343 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5344 	CU_ASSERT(rc == 0);
5345 	CU_ASSERT(nvme_ctrlr->resetting == true);
5346 	CU_ASSERT(ctrlr.is_failed == true);
5347 
5348 	poll_threads();
5349 
5350 	CU_ASSERT(nvme_ctrlr->resetting == false);
5351 	CU_ASSERT(ctrlr.is_failed == false);
5352 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5353 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5354 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5355 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5356 
5357 	/* Then a reconnect retry should still fail. */
5358 	spdk_delay_us(SPDK_SEC_TO_USEC);
5359 	poll_thread_times(0, 1);
5360 
5361 	CU_ASSERT(nvme_ctrlr->resetting == true);
5362 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5363 
5364 	poll_threads();
5365 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5366 	poll_threads();
5367 
5368 	CU_ASSERT(nvme_ctrlr->resetting == false);
5369 	CU_ASSERT(ctrlr.is_failed == false);
5370 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5371 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5372 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5373 
5374 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5375 	spdk_delay_us(SPDK_SEC_TO_USEC);
5376 	poll_threads();
5377 
5378 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5379 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5380 	CU_ASSERT(nvme_ctrlr->destruct == true);
5381 
5382 	spdk_put_io_channel(ch2);
5383 
5384 	set_thread(0);
5385 
5386 	spdk_put_io_channel(ch1);
5387 
5388 	poll_threads();
5389 	spdk_delay_us(1000);
5390 	poll_threads();
5391 
5392 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5393 }
5394 
5395 static struct nvme_path_id *
5396 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5397 		       const struct spdk_nvme_transport_id *trid)
5398 {
5399 	struct nvme_path_id *p;
5400 
5401 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5402 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5403 			break;
5404 		}
5405 	}
5406 
5407 	return p;
5408 }
5409 
5410 static void
5411 test_retry_failover_ctrlr(void)
5412 {
5413 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5414 	struct spdk_nvme_ctrlr ctrlr = {};
5415 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5416 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5417 	struct spdk_io_channel *ch;
5418 	struct nvme_ctrlr_channel *ctrlr_ch;
5419 	int rc;
5420 
5421 	ut_init_trid(&trid1);
5422 	ut_init_trid2(&trid2);
5423 	ut_init_trid3(&trid3);
5424 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5425 
5426 	set_thread(0);
5427 
5428 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5429 	CU_ASSERT(rc == 0);
5430 
5431 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5432 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5433 
5434 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5435 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5436 
5437 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5438 	CU_ASSERT(rc == 0);
5439 
5440 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5441 	CU_ASSERT(rc == 0);
5442 
5443 	ch = spdk_get_io_channel(nvme_ctrlr);
5444 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5445 
5446 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5447 
5448 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5449 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5450 	CU_ASSERT(path_id1->last_failed_tsc == 0);
5451 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5452 
5453 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5454 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5455 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5456 
5457 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5458 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5459 
5460 	/* It is expected that connecting both of trid1, trid2, and trid3 fail,
5461 	 * and a reconnect timer is started. */
5462 	ctrlr.fail_reset = true;
5463 	ctrlr.is_failed = true;
5464 
5465 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5466 	CU_ASSERT(rc == 0);
5467 
5468 	poll_threads();
5469 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5470 	poll_threads();
5471 
5472 	CU_ASSERT(nvme_ctrlr->resetting == false);
5473 	CU_ASSERT(ctrlr.is_failed == false);
5474 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5475 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5476 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5477 	CU_ASSERT(path_id1->last_failed_tsc != 0);
5478 
5479 	CU_ASSERT(path_id2->last_failed_tsc != 0);
5480 	CU_ASSERT(path_id3->last_failed_tsc != 0);
5481 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5482 
5483 	/* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is
5484 	 * switched to trid2 but reset is not started.
5485 	 */
5486 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, true);
5487 	CU_ASSERT(rc == 0);
5488 
5489 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL);
5490 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5491 
5492 	CU_ASSERT(nvme_ctrlr->resetting == false);
5493 
5494 	/* If reconnect succeeds, trid2 should be the active path_id */
5495 	ctrlr.fail_reset = false;
5496 
5497 	spdk_delay_us(SPDK_SEC_TO_USEC);
5498 	poll_thread_times(0, 1);
5499 
5500 	CU_ASSERT(nvme_ctrlr->resetting == true);
5501 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5502 
5503 	poll_threads();
5504 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5505 	poll_threads();
5506 
5507 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL);
5508 	CU_ASSERT(path_id2->last_failed_tsc == 0);
5509 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5510 	CU_ASSERT(nvme_ctrlr->resetting == false);
5511 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5512 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5513 
5514 	spdk_put_io_channel(ch);
5515 
5516 	poll_threads();
5517 
5518 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5519 	CU_ASSERT(rc == 0);
5520 
5521 	poll_threads();
5522 	spdk_delay_us(1000);
5523 	poll_threads();
5524 
5525 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5526 }
5527 
5528 static void
5529 test_fail_path(void)
5530 {
5531 	struct nvme_path_id path = {};
5532 	struct nvme_ctrlr_opts opts = {};
5533 	struct spdk_nvme_ctrlr *ctrlr;
5534 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5535 	struct nvme_ctrlr *nvme_ctrlr;
5536 	const int STRING_SIZE = 32;
5537 	const char *attached_names[STRING_SIZE];
5538 	struct nvme_bdev *bdev;
5539 	struct nvme_ns *nvme_ns;
5540 	struct spdk_bdev_io *bdev_io;
5541 	struct spdk_io_channel *ch;
5542 	struct nvme_bdev_channel *nbdev_ch;
5543 	struct nvme_io_path *io_path;
5544 	struct nvme_ctrlr_channel *ctrlr_ch;
5545 	int rc;
5546 
5547 	/* The test scenario is the following.
5548 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5549 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5550 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5551 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5552 	 *   comes first. The queued I/O is failed.
5553 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5554 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5555 	 */
5556 
5557 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5558 	ut_init_trid(&path.trid);
5559 
5560 	set_thread(0);
5561 
5562 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5563 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5564 
5565 	g_ut_attach_ctrlr_status = 0;
5566 	g_ut_attach_bdev_count = 1;
5567 
5568 	opts.ctrlr_loss_timeout_sec = 4;
5569 	opts.reconnect_delay_sec = 1;
5570 	opts.fast_io_fail_timeout_sec = 2;
5571 
5572 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5573 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5574 	CU_ASSERT(rc == 0);
5575 
5576 	spdk_delay_us(1000);
5577 	poll_threads();
5578 
5579 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5580 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5581 
5582 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5583 	CU_ASSERT(nvme_ctrlr != NULL);
5584 
5585 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5586 	CU_ASSERT(bdev != NULL);
5587 
5588 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5589 	CU_ASSERT(nvme_ns != NULL);
5590 
5591 	ch = spdk_get_io_channel(bdev);
5592 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5593 
5594 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5595 
5596 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5597 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5598 
5599 	ctrlr_ch = io_path->qpair->ctrlr_ch;
5600 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5601 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
5602 
5603 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5604 	ut_bdev_io_set_buf(bdev_io);
5605 
5606 
5607 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5608 	ctrlr->fail_reset = true;
5609 	ctrlr->is_failed = true;
5610 
5611 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
5612 	CU_ASSERT(rc == 0);
5613 	CU_ASSERT(nvme_ctrlr->resetting == true);
5614 	CU_ASSERT(ctrlr->is_failed == true);
5615 
5616 	poll_threads();
5617 
5618 	CU_ASSERT(nvme_ctrlr->resetting == false);
5619 	CU_ASSERT(ctrlr->is_failed == false);
5620 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5621 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5622 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5623 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5624 
5625 	/* I/O should be queued. */
5626 	bdev_io->internal.in_submit_request = true;
5627 
5628 	bdev_nvme_submit_request(ch, bdev_io);
5629 
5630 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5631 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5632 
5633 	/* After a second, the I/O should be still queued and the ctrlr should be
5634 	 * still recovering.
5635 	 */
5636 	spdk_delay_us(SPDK_SEC_TO_USEC);
5637 	poll_threads();
5638 
5639 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5640 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5641 
5642 	CU_ASSERT(nvme_ctrlr->resetting == false);
5643 	CU_ASSERT(ctrlr->is_failed == false);
5644 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5645 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5646 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5647 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5648 
5649 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5650 
5651 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5652 	spdk_delay_us(SPDK_SEC_TO_USEC);
5653 	poll_threads();
5654 
5655 	CU_ASSERT(nvme_ctrlr->resetting == false);
5656 	CU_ASSERT(ctrlr->is_failed == false);
5657 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5658 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5659 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5660 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5661 
5662 	/* Then within a second, pending I/O should be failed. */
5663 	spdk_delay_us(SPDK_SEC_TO_USEC);
5664 	poll_threads();
5665 
5666 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5667 	poll_threads();
5668 
5669 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5670 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5671 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5672 
5673 	/* Another I/O submission should be failed immediately. */
5674 	bdev_io->internal.in_submit_request = true;
5675 
5676 	bdev_nvme_submit_request(ch, bdev_io);
5677 
5678 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5679 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5680 
5681 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5682 	 * be deleted.
5683 	 */
5684 	spdk_delay_us(SPDK_SEC_TO_USEC);
5685 	poll_threads();
5686 
5687 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5688 	poll_threads();
5689 
5690 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5691 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5692 	CU_ASSERT(nvme_ctrlr->destruct == true);
5693 
5694 	spdk_put_io_channel(ch);
5695 
5696 	poll_threads();
5697 	spdk_delay_us(1000);
5698 	poll_threads();
5699 
5700 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5701 
5702 	free(bdev_io);
5703 }
5704 
5705 static void
5706 test_nvme_ns_cmp(void)
5707 {
5708 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
5709 
5710 	nvme_ns1.id = 0;
5711 	nvme_ns2.id = UINT32_MAX;
5712 
5713 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
5714 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
5715 }
5716 
5717 static void
5718 test_ana_transition(void)
5719 {
5720 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
5721 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
5722 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
5723 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
5724 
5725 	/* case 1: ANA transition timedout is canceled. */
5726 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5727 	nvme_ns.ana_transition_timedout = true;
5728 
5729 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5730 
5731 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5732 
5733 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
5734 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5735 
5736 	/* case 2: ANATT timer is kept. */
5737 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5738 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
5739 			      &nvme_ns,
5740 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5741 
5742 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5743 
5744 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5745 
5746 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5747 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
5748 
5749 	/* case 3: ANATT timer is stopped. */
5750 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5751 
5752 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5753 
5754 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5755 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5756 
5757 	/* ANATT timer is started. */
5758 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5759 
5760 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5761 
5762 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5763 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
5764 
5765 	/* ANATT timer is expired. */
5766 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5767 
5768 	poll_threads();
5769 
5770 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5771 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
5772 }
5773 
5774 static void
5775 _set_preferred_path_cb(void *cb_arg, int rc)
5776 {
5777 	bool *done = cb_arg;
5778 
5779 	*done = true;
5780 }
5781 
5782 static void
5783 test_set_preferred_path(void)
5784 {
5785 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
5786 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
5787 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5788 	const int STRING_SIZE = 32;
5789 	const char *attached_names[STRING_SIZE];
5790 	struct nvme_bdev *bdev;
5791 	struct spdk_io_channel *ch;
5792 	struct nvme_bdev_channel *nbdev_ch;
5793 	struct nvme_io_path *io_path;
5794 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
5795 	const struct spdk_nvme_ctrlr_data *cdata;
5796 	bool done;
5797 	int rc;
5798 
5799 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5800 	ut_init_trid(&path1.trid);
5801 	ut_init_trid2(&path2.trid);
5802 	ut_init_trid3(&path3.trid);
5803 	g_ut_attach_ctrlr_status = 0;
5804 	g_ut_attach_bdev_count = 1;
5805 
5806 	set_thread(0);
5807 
5808 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
5809 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
5810 
5811 	ctrlr1->ns[0].uuid = &uuid1;
5812 
5813 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
5814 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5815 	CU_ASSERT(rc == 0);
5816 
5817 	spdk_delay_us(1000);
5818 	poll_threads();
5819 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5820 	poll_threads();
5821 
5822 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
5823 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
5824 
5825 	ctrlr2->ns[0].uuid = &uuid1;
5826 
5827 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
5828 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5829 	CU_ASSERT(rc == 0);
5830 
5831 	spdk_delay_us(1000);
5832 	poll_threads();
5833 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5834 	poll_threads();
5835 
5836 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
5837 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
5838 
5839 	ctrlr3->ns[0].uuid = &uuid1;
5840 
5841 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
5842 			      attach_ctrlr_done, NULL, NULL, NULL, true);
5843 	CU_ASSERT(rc == 0);
5844 
5845 	spdk_delay_us(1000);
5846 	poll_threads();
5847 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5848 	poll_threads();
5849 
5850 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5851 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5852 
5853 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5854 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
5855 
5856 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
5857 
5858 	ch = spdk_get_io_channel(bdev);
5859 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5860 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5861 
5862 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5863 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5864 
5865 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
5866 
5867 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
5868 	 * should return io_path to ctrlr2.
5869 	 */
5870 
5871 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
5872 	done = false;
5873 
5874 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5875 
5876 	poll_threads();
5877 	CU_ASSERT(done == true);
5878 
5879 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5880 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5881 
5882 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
5883 
5884 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
5885 	 * acquired, find_io_path() should return io_path to ctrlr3.
5886 	 */
5887 
5888 	spdk_put_io_channel(ch);
5889 
5890 	poll_threads();
5891 
5892 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
5893 	done = false;
5894 
5895 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5896 
5897 	poll_threads();
5898 	CU_ASSERT(done == true);
5899 
5900 	ch = spdk_get_io_channel(bdev);
5901 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5902 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5903 
5904 	io_path = bdev_nvme_find_io_path(nbdev_ch);
5905 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5906 
5907 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
5908 
5909 	spdk_put_io_channel(ch);
5910 
5911 	poll_threads();
5912 
5913 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5914 	CU_ASSERT(rc == 0);
5915 
5916 	poll_threads();
5917 	spdk_delay_us(1000);
5918 	poll_threads();
5919 
5920 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5921 }
5922 
5923 static void
5924 test_find_next_io_path(void)
5925 {
5926 	struct nvme_bdev_channel nbdev_ch = {
5927 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
5928 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
5929 		.mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN,
5930 	};
5931 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
5932 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
5933 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
5934 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
5935 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
5936 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
5937 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
5938 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
5939 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
5940 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
5941 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
5942 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
5943 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
5944 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
5945 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
5946 
5947 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
5948 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
5949 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
5950 
5951 	/* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL
5952 	 * is covered in test_find_io_path.
5953 	 */
5954 
5955 	nbdev_ch.current_io_path = &io_path2;
5956 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5957 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5958 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5959 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5960 
5961 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5962 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5963 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5964 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5965 
5966 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5967 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5968 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5969 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5970 
5971 	nbdev_ch.current_io_path = &io_path3;
5972 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5973 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5974 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5975 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5976 
5977 	/* Test if next io_path is selected according to rr_min_io */
5978 
5979 	nbdev_ch.current_io_path = NULL;
5980 	nbdev_ch.rr_min_io = 2;
5981 	nbdev_ch.rr_counter = 0;
5982 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5983 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5984 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5985 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5986 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5987 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5988 
5989 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5990 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5991 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5992 }
5993 
5994 static void
5995 test_find_io_path_min_qd(void)
5996 {
5997 	struct nvme_bdev_channel nbdev_ch = {
5998 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
5999 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6000 		.mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH,
6001 	};
6002 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6003 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6004 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6005 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6006 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6007 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6008 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6009 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6010 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6011 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6012 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6013 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
6014 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6015 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6016 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6017 
6018 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6019 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6020 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6021 
6022 	/* Test if the minumum io_outstanding or the ANA optimized state is
6023 	 * prioritized when using least queue depth selector
6024 	 */
6025 	qpair1.num_outstanding_reqs = 2;
6026 	qpair2.num_outstanding_reqs = 1;
6027 	qpair3.num_outstanding_reqs = 0;
6028 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6029 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6030 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6031 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6032 
6033 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6034 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6035 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6036 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6037 
6038 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6039 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6040 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6041 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6042 
6043 	qpair2.num_outstanding_reqs = 4;
6044 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6045 }
6046 
6047 static void
6048 test_disable_auto_failback(void)
6049 {
6050 	struct nvme_path_id path1 = {}, path2 = {};
6051 	struct nvme_ctrlr_opts opts = {};
6052 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6053 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6054 	struct nvme_ctrlr *nvme_ctrlr1;
6055 	const int STRING_SIZE = 32;
6056 	const char *attached_names[STRING_SIZE];
6057 	struct nvme_bdev *bdev;
6058 	struct spdk_io_channel *ch;
6059 	struct nvme_bdev_channel *nbdev_ch;
6060 	struct nvme_io_path *io_path;
6061 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6062 	const struct spdk_nvme_ctrlr_data *cdata;
6063 	bool done;
6064 	int rc;
6065 
6066 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6067 	ut_init_trid(&path1.trid);
6068 	ut_init_trid2(&path2.trid);
6069 	g_ut_attach_ctrlr_status = 0;
6070 	g_ut_attach_bdev_count = 1;
6071 
6072 	g_opts.disable_auto_failback = true;
6073 
6074 	opts.ctrlr_loss_timeout_sec = -1;
6075 	opts.reconnect_delay_sec = 1;
6076 
6077 	set_thread(0);
6078 
6079 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6080 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6081 
6082 	ctrlr1->ns[0].uuid = &uuid1;
6083 
6084 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6085 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6086 	CU_ASSERT(rc == 0);
6087 
6088 	spdk_delay_us(1000);
6089 	poll_threads();
6090 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6091 	poll_threads();
6092 
6093 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6094 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6095 
6096 	ctrlr2->ns[0].uuid = &uuid1;
6097 
6098 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6099 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6100 	CU_ASSERT(rc == 0);
6101 
6102 	spdk_delay_us(1000);
6103 	poll_threads();
6104 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6105 	poll_threads();
6106 
6107 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6108 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6109 
6110 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6111 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6112 
6113 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
6114 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6115 
6116 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6117 
6118 	ch = spdk_get_io_channel(bdev);
6119 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6120 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6121 
6122 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6123 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6124 
6125 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6126 
6127 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6128 	ctrlr1->fail_reset = true;
6129 	ctrlr1->is_failed = true;
6130 
6131 	bdev_nvme_reset_ctrlr(nvme_ctrlr1);
6132 
6133 	poll_threads();
6134 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6135 	poll_threads();
6136 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6137 	poll_threads();
6138 
6139 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6140 
6141 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6142 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6143 
6144 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6145 
6146 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6147 	 * Hence, io_path to ctrlr2 should still be used.
6148 	 */
6149 	ctrlr1->fail_reset = false;
6150 
6151 	spdk_delay_us(SPDK_SEC_TO_USEC);
6152 	poll_threads();
6153 
6154 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6155 
6156 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6157 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6158 
6159 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6160 
6161 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6162 	 * be used again.
6163 	 */
6164 
6165 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6166 	done = false;
6167 
6168 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6169 
6170 	poll_threads();
6171 	CU_ASSERT(done == true);
6172 
6173 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6174 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6175 
6176 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6177 
6178 	spdk_put_io_channel(ch);
6179 
6180 	poll_threads();
6181 
6182 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6183 	CU_ASSERT(rc == 0);
6184 
6185 	poll_threads();
6186 	spdk_delay_us(1000);
6187 	poll_threads();
6188 
6189 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6190 
6191 	g_opts.disable_auto_failback = false;
6192 }
6193 
6194 static void
6195 ut_set_multipath_policy_done(void *cb_arg, int rc)
6196 {
6197 	int *done = cb_arg;
6198 
6199 	SPDK_CU_ASSERT_FATAL(done != NULL);
6200 	*done = rc;
6201 }
6202 
6203 static void
6204 test_set_multipath_policy(void)
6205 {
6206 	struct nvme_path_id path1 = {}, path2 = {};
6207 	struct nvme_ctrlr_opts opts = {};
6208 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6209 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6210 	const int STRING_SIZE = 32;
6211 	const char *attached_names[STRING_SIZE];
6212 	struct nvme_bdev *bdev;
6213 	struct spdk_io_channel *ch;
6214 	struct nvme_bdev_channel *nbdev_ch;
6215 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6216 	int done;
6217 	int rc;
6218 
6219 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6220 	ut_init_trid(&path1.trid);
6221 	ut_init_trid2(&path2.trid);
6222 	g_ut_attach_ctrlr_status = 0;
6223 	g_ut_attach_bdev_count = 1;
6224 
6225 	g_opts.disable_auto_failback = true;
6226 
6227 	opts.ctrlr_loss_timeout_sec = -1;
6228 	opts.reconnect_delay_sec = 1;
6229 
6230 	set_thread(0);
6231 
6232 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6233 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6234 
6235 	ctrlr1->ns[0].uuid = &uuid1;
6236 
6237 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6238 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6239 	CU_ASSERT(rc == 0);
6240 
6241 	spdk_delay_us(1000);
6242 	poll_threads();
6243 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6244 	poll_threads();
6245 
6246 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6247 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6248 
6249 	ctrlr2->ns[0].uuid = &uuid1;
6250 
6251 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6252 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6253 	CU_ASSERT(rc == 0);
6254 
6255 	spdk_delay_us(1000);
6256 	poll_threads();
6257 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6258 	poll_threads();
6259 
6260 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6261 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6262 
6263 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6264 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6265 
6266 	/* If multipath policy is updated before getting any I/O channel,
6267 	 * an new I/O channel should have the update.
6268 	 */
6269 	done = -1;
6270 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6271 				       BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX,
6272 				       ut_set_multipath_policy_done, &done);
6273 	poll_threads();
6274 	CU_ASSERT(done == 0);
6275 
6276 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6277 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6278 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6279 
6280 	ch = spdk_get_io_channel(bdev);
6281 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6282 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6283 
6284 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6285 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH);
6286 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6287 
6288 	/* If multipath policy is updated while a I/O channel is active,
6289 	 * the update should be applied to the I/O channel immediately.
6290 	 */
6291 	done = -1;
6292 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6293 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX,
6294 				       ut_set_multipath_policy_done, &done);
6295 	poll_threads();
6296 	CU_ASSERT(done == 0);
6297 
6298 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6299 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6300 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6301 	CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6302 	CU_ASSERT(bdev->rr_min_io == UINT32_MAX);
6303 	CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX);
6304 
6305 	spdk_put_io_channel(ch);
6306 
6307 	poll_threads();
6308 
6309 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6310 	CU_ASSERT(rc == 0);
6311 
6312 	poll_threads();
6313 	spdk_delay_us(1000);
6314 	poll_threads();
6315 
6316 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6317 }
6318 
6319 static void
6320 test_uuid_generation(void)
6321 {
6322 	uint32_t nsid1 = 1, nsid2 = 2;
6323 	char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02";
6324 	char sn3[21] = "                    ";
6325 	char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'};
6326 	struct spdk_uuid uuid1, uuid2;
6327 
6328 	/* Test case 1:
6329 	 * Serial numbers are the same, nsids are different.
6330 	 * Compare two generated UUID - they should be different. */
6331 	uuid1 = nvme_generate_uuid(sn1, nsid1);
6332 	uuid2 = nvme_generate_uuid(sn1, nsid2);
6333 
6334 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6335 
6336 	/* Test case 2:
6337 	 * Serial numbers differ only by one character, nsids are the same.
6338 	 * Compare two generated UUID - they should be different. */
6339 	uuid1 = nvme_generate_uuid(sn1, nsid1);
6340 	uuid2 = nvme_generate_uuid(sn2, nsid1);
6341 
6342 	CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0);
6343 
6344 	/* Test case 3:
6345 	 * Serial number comprises only of space characters.
6346 	 * Validate the generated UUID. */
6347 	uuid1 = nvme_generate_uuid(sn3, nsid1);
6348 	CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0);
6349 }
6350 
6351 static void
6352 test_retry_io_to_same_path(void)
6353 {
6354 	struct nvme_path_id path1 = {}, path2 = {};
6355 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6356 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6357 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
6358 	const int STRING_SIZE = 32;
6359 	const char *attached_names[STRING_SIZE];
6360 	struct nvme_bdev *bdev;
6361 	struct spdk_bdev_io *bdev_io;
6362 	struct nvme_bdev_io *bio;
6363 	struct spdk_io_channel *ch;
6364 	struct nvme_bdev_channel *nbdev_ch;
6365 	struct nvme_io_path *io_path1, *io_path2;
6366 	struct ut_nvme_req *req;
6367 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6368 	int done;
6369 	int rc;
6370 
6371 	g_opts.nvme_ioq_poll_period_us = 1;
6372 
6373 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6374 	ut_init_trid(&path1.trid);
6375 	ut_init_trid2(&path2.trid);
6376 	g_ut_attach_ctrlr_status = 0;
6377 	g_ut_attach_bdev_count = 1;
6378 
6379 	set_thread(0);
6380 
6381 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6382 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6383 
6384 	ctrlr1->ns[0].uuid = &uuid1;
6385 
6386 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6387 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6388 	CU_ASSERT(rc == 0);
6389 
6390 	spdk_delay_us(1000);
6391 	poll_threads();
6392 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6393 	poll_threads();
6394 
6395 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6396 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6397 
6398 	ctrlr2->ns[0].uuid = &uuid1;
6399 
6400 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6401 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6402 	CU_ASSERT(rc == 0);
6403 
6404 	spdk_delay_us(1000);
6405 	poll_threads();
6406 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6407 	poll_threads();
6408 
6409 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6410 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6411 
6412 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
6413 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6414 
6415 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
6416 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6417 
6418 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6419 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6420 
6421 	done = -1;
6422 	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6423 				       BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done);
6424 	poll_threads();
6425 	CU_ASSERT(done == 0);
6426 
6427 	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6428 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6429 	CU_ASSERT(bdev->rr_min_io == 1);
6430 
6431 	ch = spdk_get_io_channel(bdev);
6432 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6433 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6434 
6435 	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6436 	CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN);
6437 	CU_ASSERT(nbdev_ch->rr_min_io == 1);
6438 
6439 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
6440 	ut_bdev_io_set_buf(bdev_io);
6441 
6442 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
6443 
6444 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
6445 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
6446 
6447 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
6448 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
6449 
6450 	/* The 1st I/O should be submitted to io_path1. */
6451 	bdev_io->internal.in_submit_request = true;
6452 
6453 	bdev_nvme_submit_request(ch, bdev_io);
6454 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6455 	CU_ASSERT(bio->io_path == io_path1);
6456 	CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1);
6457 
6458 	spdk_delay_us(1);
6459 
6460 	poll_threads();
6461 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6462 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6463 
6464 	/* The 2nd I/O should be submitted to io_path2 because the path selection
6465 	 * policy is round-robin.
6466 	 */
6467 	bdev_io->internal.in_submit_request = true;
6468 
6469 	bdev_nvme_submit_request(ch, bdev_io);
6470 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6471 	CU_ASSERT(bio->io_path == io_path2);
6472 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6473 
6474 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6475 	SPDK_CU_ASSERT_FATAL(req != NULL);
6476 
6477 	/* Set retry count to non-zero. */
6478 	g_opts.bdev_retry_count = 2;
6479 
6480 	/* Inject an I/O error. */
6481 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6482 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6483 
6484 	/* The 2nd I/O should be queued to nbdev_ch. */
6485 	spdk_delay_us(1);
6486 	poll_thread_times(0, 1);
6487 
6488 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6489 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6490 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
6491 
6492 	/* The 2nd I/O should keep caching io_path2. */
6493 	CU_ASSERT(bio->io_path == io_path2);
6494 
6495 	/* The 2nd I/O should be submitted to io_path2 again. */
6496 	poll_thread_times(0, 1);
6497 
6498 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6499 	CU_ASSERT(bio->io_path == io_path2);
6500 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);
6501 
6502 	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
6503 	SPDK_CU_ASSERT_FATAL(req != NULL);
6504 
6505 	/* Inject an I/O error again. */
6506 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
6507 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
6508 	req->cpl.status.crd = 1;
6509 
6510 	ctrlr2->cdata.crdt[1] = 1;
6511 
6512 	/* The 2nd I/O should be queued to nbdev_ch. */
6513 	spdk_delay_us(1);
6514 	poll_thread_times(0, 1);
6515 
6516 	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
6517 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
6518 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
6519 
6520 	/* The 2nd I/O should keep caching io_path2. */
6521 	CU_ASSERT(bio->io_path == io_path2);
6522 
6523 	/* Detach ctrlr2 dynamically. */
6524 	rc = bdev_nvme_delete("nvme0", &path2);
6525 	CU_ASSERT(rc == 0);
6526 
6527 	spdk_delay_us(1000);
6528 	poll_threads();
6529 	spdk_delay_us(1000);
6530 	poll_threads();
6531 	spdk_delay_us(1000);
6532 	poll_threads();
6533 	spdk_delay_us(1000);
6534 	poll_threads();
6535 
6536 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
6537 
6538 	poll_threads();
6539 	spdk_delay_us(100000);
6540 	poll_threads();
6541 	spdk_delay_us(1);
6542 	poll_threads();
6543 
6544 	/* The 2nd I/O should succeed by io_path1. */
6545 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
6546 	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
6547 	CU_ASSERT(bio->io_path == io_path1);
6548 
6549 	free(bdev_io);
6550 
6551 	spdk_put_io_channel(ch);
6552 
6553 	poll_threads();
6554 	spdk_delay_us(1);
6555 	poll_threads();
6556 
6557 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6558 	CU_ASSERT(rc == 0);
6559 
6560 	poll_threads();
6561 	spdk_delay_us(1000);
6562 	poll_threads();
6563 
6564 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
6565 
6566 	g_opts.nvme_ioq_poll_period_us = 0;
6567 	g_opts.bdev_retry_count = 0;
6568 }
6569 
6570 /* This case is to verify a fix for a complex race condition that
6571  * failover is lost if fabric connect command gets timeout while
6572  * controller is being reset.
6573  */
6574 static void
6575 test_race_between_reset_and_disconnected(void)
6576 {
6577 	struct spdk_nvme_transport_id trid = {};
6578 	struct spdk_nvme_ctrlr ctrlr = {};
6579 	struct nvme_ctrlr *nvme_ctrlr = NULL;
6580 	struct nvme_path_id *curr_trid;
6581 	struct spdk_io_channel *ch1, *ch2;
6582 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
6583 	int rc;
6584 
6585 	ut_init_trid(&trid);
6586 	TAILQ_INIT(&ctrlr.active_io_qpairs);
6587 
6588 	set_thread(0);
6589 
6590 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
6591 	CU_ASSERT(rc == 0);
6592 
6593 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
6594 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
6595 
6596 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
6597 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
6598 
6599 	ch1 = spdk_get_io_channel(nvme_ctrlr);
6600 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
6601 
6602 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
6603 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6604 
6605 	set_thread(1);
6606 
6607 	ch2 = spdk_get_io_channel(nvme_ctrlr);
6608 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
6609 
6610 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
6611 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6612 
6613 	/* Reset starts from thread 1. */
6614 	set_thread(1);
6615 
6616 	nvme_ctrlr->resetting = false;
6617 	curr_trid->last_failed_tsc = spdk_get_ticks();
6618 	ctrlr.is_failed = true;
6619 
6620 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
6621 	CU_ASSERT(rc == 0);
6622 	CU_ASSERT(nvme_ctrlr->resetting == true);
6623 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6624 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6625 
6626 	poll_thread_times(0, 3);
6627 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
6628 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6629 
6630 	poll_thread_times(0, 1);
6631 	poll_thread_times(1, 1);
6632 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
6633 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
6634 	CU_ASSERT(ctrlr.is_failed == true);
6635 
6636 	poll_thread_times(1, 1);
6637 	poll_thread_times(0, 1);
6638 	CU_ASSERT(ctrlr.is_failed == false);
6639 	CU_ASSERT(ctrlr.adminq.is_connected == false);
6640 
6641 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6642 	poll_thread_times(0, 2);
6643 	CU_ASSERT(ctrlr.adminq.is_connected == true);
6644 
6645 	poll_thread_times(0, 1);
6646 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6647 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
6648 
6649 	poll_thread_times(1, 1);
6650 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6651 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6652 	CU_ASSERT(nvme_ctrlr->resetting == true);
6653 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
6654 
6655 	poll_thread_times(0, 2);
6656 	CU_ASSERT(nvme_ctrlr->resetting == true);
6657 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6658 	poll_thread_times(1, 1);
6659 	CU_ASSERT(nvme_ctrlr->resetting == true);
6660 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6661 
6662 	/* Here is just one poll before _bdev_nvme_reset_complete() is executed.
6663 	 *
6664 	 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric
6665 	 * connect command is executed. If fabric connect command gets timeout,
6666 	 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until
6667 	 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false.
6668 	 *
6669 	 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr().
6670 	 */
6671 	rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false);
6672 	CU_ASSERT(rc == -EINPROGRESS);
6673 	CU_ASSERT(nvme_ctrlr->resetting == true);
6674 	CU_ASSERT(nvme_ctrlr->pending_failover == true);
6675 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6676 
6677 	poll_thread_times(0, 1);
6678 
6679 	CU_ASSERT(nvme_ctrlr->resetting == true);
6680 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6681 	CU_ASSERT(curr_trid->last_failed_tsc != 0);
6682 
6683 	poll_threads();
6684 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6685 	poll_threads();
6686 
6687 	CU_ASSERT(nvme_ctrlr->resetting == false);
6688 	CU_ASSERT(nvme_ctrlr->pending_failover == false);
6689 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6690 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
6691 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
6692 
6693 	spdk_put_io_channel(ch2);
6694 
6695 	set_thread(0);
6696 
6697 	spdk_put_io_channel(ch1);
6698 
6699 	poll_threads();
6700 
6701 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6702 	CU_ASSERT(rc == 0);
6703 
6704 	poll_threads();
6705 	spdk_delay_us(1000);
6706 	poll_threads();
6707 
6708 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6709 }
6710 static void
6711 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc)
6712 {
6713 	int *_rc = (int *)cb_arg;
6714 
6715 	SPDK_CU_ASSERT_FATAL(_rc != NULL);
6716 	*_rc = rc;
6717 }
6718 
6719 static void
6720 test_ctrlr_op_rpc(void)
6721 {
6722 	struct spdk_nvme_transport_id trid = {};
6723 	struct spdk_nvme_ctrlr ctrlr = {};
6724 	struct nvme_ctrlr *nvme_ctrlr = NULL;
6725 	struct nvme_path_id *curr_trid;
6726 	struct spdk_io_channel *ch1, *ch2;
6727 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
6728 	int ctrlr_op_rc;
6729 	int rc;
6730 
6731 	ut_init_trid(&trid);
6732 	TAILQ_INIT(&ctrlr.active_io_qpairs);
6733 
6734 	set_thread(0);
6735 
6736 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
6737 	CU_ASSERT(rc == 0);
6738 
6739 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
6740 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
6741 
6742 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
6743 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
6744 
6745 	ch1 = spdk_get_io_channel(nvme_ctrlr);
6746 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
6747 
6748 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
6749 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
6750 
6751 	set_thread(1);
6752 
6753 	ch2 = spdk_get_io_channel(nvme_ctrlr);
6754 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
6755 
6756 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
6757 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
6758 
6759 	/* Reset starts from thread 1. */
6760 	set_thread(1);
6761 
6762 	/* Case 1: ctrlr is already being destructed. */
6763 	nvme_ctrlr->destruct = true;
6764 	ctrlr_op_rc = 0;
6765 
6766 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6767 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6768 
6769 	poll_threads();
6770 
6771 	CU_ASSERT(ctrlr_op_rc == -ENXIO);
6772 
6773 	/* Case 2: reset is in progress. */
6774 	nvme_ctrlr->destruct = false;
6775 	nvme_ctrlr->resetting = true;
6776 	ctrlr_op_rc = 0;
6777 
6778 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6779 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6780 
6781 	poll_threads();
6782 
6783 	CU_ASSERT(ctrlr_op_rc == -EBUSY);
6784 
6785 	/* Case 3: reset completes successfully. */
6786 	nvme_ctrlr->resetting = false;
6787 	curr_trid->last_failed_tsc = spdk_get_ticks();
6788 	ctrlr.is_failed = true;
6789 	ctrlr_op_rc = -1;
6790 
6791 	nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET,
6792 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6793 
6794 	CU_ASSERT(nvme_ctrlr->resetting == true);
6795 	CU_ASSERT(ctrlr_op_rc == -1);
6796 
6797 	poll_threads();
6798 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6799 	poll_threads();
6800 
6801 	CU_ASSERT(nvme_ctrlr->resetting == false);
6802 	CU_ASSERT(curr_trid->last_failed_tsc == 0);
6803 	CU_ASSERT(ctrlr.is_failed == false);
6804 	CU_ASSERT(ctrlr_op_rc == 0);
6805 
6806 	/* Case 4: invalid operation. */
6807 	nvme_ctrlr_op_rpc(nvme_ctrlr, -1,
6808 			  ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6809 
6810 	poll_threads();
6811 
6812 	CU_ASSERT(ctrlr_op_rc == -EINVAL);
6813 
6814 	spdk_put_io_channel(ch2);
6815 
6816 	set_thread(0);
6817 
6818 	spdk_put_io_channel(ch1);
6819 
6820 	poll_threads();
6821 
6822 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6823 	CU_ASSERT(rc == 0);
6824 
6825 	poll_threads();
6826 	spdk_delay_us(1000);
6827 	poll_threads();
6828 
6829 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6830 }
6831 
6832 static void
6833 test_bdev_ctrlr_op_rpc(void)
6834 {
6835 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
6836 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
6837 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6838 	struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL;
6839 	struct nvme_path_id *curr_trid1, *curr_trid2;
6840 	struct spdk_io_channel *ch11, *ch12, *ch21, *ch22;
6841 	struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22;
6842 	int ctrlr_op_rc;
6843 	int rc;
6844 
6845 	ut_init_trid(&trid1);
6846 	ut_init_trid2(&trid2);
6847 	TAILQ_INIT(&ctrlr1.active_io_qpairs);
6848 	TAILQ_INIT(&ctrlr2.active_io_qpairs);
6849 	ctrlr1.cdata.cmic.multi_ctrlr = 1;
6850 	ctrlr2.cdata.cmic.multi_ctrlr = 1;
6851 	ctrlr1.cdata.cntlid = 1;
6852 	ctrlr2.cdata.cntlid = 2;
6853 	ctrlr1.adminq.is_connected = true;
6854 	ctrlr2.adminq.is_connected = true;
6855 
6856 	set_thread(0);
6857 
6858 	rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL);
6859 	CU_ASSERT(rc == 0);
6860 
6861 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6862 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6863 
6864 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1);
6865 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6866 
6867 	curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
6868 	SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL);
6869 
6870 	ch11 = spdk_get_io_channel(nvme_ctrlr1);
6871 	SPDK_CU_ASSERT_FATAL(ch11 != NULL);
6872 
6873 	ctrlr_ch11 = spdk_io_channel_get_ctx(ch11);
6874 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
6875 
6876 	set_thread(1);
6877 
6878 	ch12 = spdk_get_io_channel(nvme_ctrlr1);
6879 	SPDK_CU_ASSERT_FATAL(ch12 != NULL);
6880 
6881 	ctrlr_ch12 = spdk_io_channel_get_ctx(ch12);
6882 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
6883 
6884 	set_thread(0);
6885 
6886 	rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL);
6887 	CU_ASSERT(rc == 0);
6888 
6889 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2);
6890 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
6891 
6892 	curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
6893 	SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL);
6894 
6895 	ch21 = spdk_get_io_channel(nvme_ctrlr2);
6896 	SPDK_CU_ASSERT_FATAL(ch21 != NULL);
6897 
6898 	ctrlr_ch21 = spdk_io_channel_get_ctx(ch21);
6899 	CU_ASSERT(ctrlr_ch21->qpair != NULL);
6900 
6901 	set_thread(1);
6902 
6903 	ch22 = spdk_get_io_channel(nvme_ctrlr2);
6904 	SPDK_CU_ASSERT_FATAL(ch22 != NULL);
6905 
6906 	ctrlr_ch22 = spdk_io_channel_get_ctx(ch22);
6907 	CU_ASSERT(ctrlr_ch22->qpair != NULL);
6908 
6909 	/* Reset starts from thread 1. */
6910 	set_thread(1);
6911 
6912 	nvme_ctrlr1->resetting = false;
6913 	nvme_ctrlr2->resetting = false;
6914 	curr_trid1->last_failed_tsc = spdk_get_ticks();
6915 	curr_trid2->last_failed_tsc = spdk_get_ticks();
6916 	ctrlr_op_rc = -1;
6917 
6918 	nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET,
6919 			       ut_ctrlr_op_rpc_cb, &ctrlr_op_rc);
6920 
6921 	CU_ASSERT(nvme_ctrlr1->resetting == true);
6922 	CU_ASSERT(ctrlr_ch11->qpair != NULL);
6923 	CU_ASSERT(ctrlr_ch12->qpair != NULL);
6924 	CU_ASSERT(nvme_ctrlr2->resetting == false);
6925 
6926 	poll_thread_times(0, 3);
6927 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
6928 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
6929 
6930 	poll_thread_times(0, 1);
6931 	poll_thread_times(1, 1);
6932 	CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL);
6933 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
6934 
6935 	poll_thread_times(1, 1);
6936 	poll_thread_times(0, 1);
6937 	CU_ASSERT(ctrlr1.adminq.is_connected == false);
6938 
6939 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6940 	poll_thread_times(0, 2);
6941 	CU_ASSERT(ctrlr1.adminq.is_connected == true);
6942 
6943 	poll_thread_times(0, 1);
6944 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
6945 	CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL);
6946 
6947 	poll_thread_times(1, 1);
6948 	CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL);
6949 	CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL);
6950 	CU_ASSERT(nvme_ctrlr1->resetting == true);
6951 	CU_ASSERT(curr_trid1->last_failed_tsc != 0);
6952 
6953 	poll_thread_times(0, 2);
6954 	poll_thread_times(1, 1);
6955 	poll_thread_times(0, 1);
6956 	poll_thread_times(1, 1);
6957 	poll_thread_times(0, 1);
6958 	poll_thread_times(1, 1);
6959 	poll_thread_times(0, 1);
6960 
6961 	CU_ASSERT(nvme_ctrlr1->resetting == false);
6962 	CU_ASSERT(curr_trid1->last_failed_tsc == 0);
6963 	CU_ASSERT(nvme_ctrlr2->resetting == true);
6964 
6965 	poll_threads();
6966 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6967 	poll_threads();
6968 
6969 	CU_ASSERT(nvme_ctrlr2->resetting == false);
6970 	CU_ASSERT(ctrlr_op_rc == 0);
6971 
6972 	set_thread(1);
6973 
6974 	spdk_put_io_channel(ch12);
6975 	spdk_put_io_channel(ch22);
6976 
6977 	set_thread(0);
6978 
6979 	spdk_put_io_channel(ch11);
6980 	spdk_put_io_channel(ch21);
6981 
6982 	poll_threads();
6983 
6984 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6985 	CU_ASSERT(rc == 0);
6986 
6987 	poll_threads();
6988 	spdk_delay_us(1000);
6989 	poll_threads();
6990 
6991 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
6992 }
6993 
6994 static void
6995 test_disable_enable_ctrlr(void)
6996 {
6997 	struct spdk_nvme_transport_id trid = {};
6998 	struct spdk_nvme_ctrlr ctrlr = {};
6999 	struct nvme_ctrlr *nvme_ctrlr = NULL;
7000 	struct nvme_path_id *curr_trid;
7001 	struct spdk_io_channel *ch1, *ch2;
7002 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
7003 	int rc;
7004 
7005 	ut_init_trid(&trid);
7006 	TAILQ_INIT(&ctrlr.active_io_qpairs);
7007 	ctrlr.adminq.is_connected = true;
7008 
7009 	set_thread(0);
7010 
7011 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
7012 	CU_ASSERT(rc == 0);
7013 
7014 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
7015 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
7016 
7017 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
7018 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
7019 
7020 	ch1 = spdk_get_io_channel(nvme_ctrlr);
7021 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
7022 
7023 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
7024 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7025 
7026 	set_thread(1);
7027 
7028 	ch2 = spdk_get_io_channel(nvme_ctrlr);
7029 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
7030 
7031 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
7032 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7033 
7034 	/* Disable starts from thread 1. */
7035 	set_thread(1);
7036 
7037 	/* Case 1: ctrlr is already disabled. */
7038 	nvme_ctrlr->disabled = true;
7039 
7040 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7041 	CU_ASSERT(rc == -EALREADY);
7042 
7043 	/* Case 2: ctrlr is already being destructed. */
7044 	nvme_ctrlr->disabled = false;
7045 	nvme_ctrlr->destruct = true;
7046 
7047 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7048 	CU_ASSERT(rc == -ENXIO);
7049 
7050 	/* Case 3: reset is in progress. */
7051 	nvme_ctrlr->destruct = false;
7052 	nvme_ctrlr->resetting = true;
7053 
7054 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7055 	CU_ASSERT(rc == -EBUSY);
7056 
7057 	/* Case 4: disable completes successfully. */
7058 	nvme_ctrlr->resetting = false;
7059 
7060 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7061 	CU_ASSERT(rc == 0);
7062 	CU_ASSERT(nvme_ctrlr->resetting == true);
7063 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
7064 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
7065 
7066 	poll_thread_times(0, 3);
7067 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7068 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7069 
7070 	poll_thread_times(0, 1);
7071 	poll_thread_times(1, 1);
7072 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7073 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7074 
7075 	poll_thread_times(1, 1);
7076 	poll_thread_times(0, 1);
7077 	CU_ASSERT(ctrlr.adminq.is_connected == false);
7078 	poll_thread_times(1, 1);
7079 	poll_thread_times(0, 1);
7080 	poll_thread_times(1, 1);
7081 	poll_thread_times(0, 1);
7082 	CU_ASSERT(nvme_ctrlr->resetting == false);
7083 	CU_ASSERT(nvme_ctrlr->disabled == true);
7084 
7085 	/* Case 5: enable completes successfully. */
7086 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7087 	CU_ASSERT(rc == 0);
7088 
7089 	CU_ASSERT(nvme_ctrlr->resetting == true);
7090 	CU_ASSERT(nvme_ctrlr->disabled == false);
7091 
7092 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7093 	poll_thread_times(0, 2);
7094 	CU_ASSERT(ctrlr.adminq.is_connected == true);
7095 
7096 	poll_thread_times(0, 1);
7097 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7098 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7099 
7100 	poll_thread_times(1, 1);
7101 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
7102 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
7103 	CU_ASSERT(nvme_ctrlr->resetting == true);
7104 
7105 	poll_thread_times(0, 2);
7106 	CU_ASSERT(nvme_ctrlr->resetting == true);
7107 	poll_thread_times(1, 1);
7108 	CU_ASSERT(nvme_ctrlr->resetting == true);
7109 	poll_thread_times(0, 1);
7110 	CU_ASSERT(nvme_ctrlr->resetting == false);
7111 
7112 	/* Case 6: ctrlr is already enabled. */
7113 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7114 	CU_ASSERT(rc == -EALREADY);
7115 
7116 	set_thread(0);
7117 
7118 	/* Case 7: disable cancels delayed reconnect. */
7119 	nvme_ctrlr->opts.reconnect_delay_sec = 10;
7120 	ctrlr.fail_reset = true;
7121 
7122 	rc = bdev_nvme_reset_ctrlr(nvme_ctrlr);
7123 	CU_ASSERT(rc == 0);
7124 
7125 	poll_threads();
7126 
7127 	CU_ASSERT(nvme_ctrlr->resetting == false);
7128 	CU_ASSERT(ctrlr.is_failed == false);
7129 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
7130 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
7131 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
7132 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
7133 
7134 	rc = bdev_nvme_disable_ctrlr(nvme_ctrlr);
7135 	CU_ASSERT(rc == 0);
7136 
7137 	CU_ASSERT(nvme_ctrlr->resetting == true);
7138 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
7139 
7140 	poll_threads();
7141 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
7142 	poll_threads();
7143 
7144 	CU_ASSERT(nvme_ctrlr->resetting == false);
7145 	CU_ASSERT(nvme_ctrlr->disabled == true);
7146 
7147 	rc = bdev_nvme_enable_ctrlr(nvme_ctrlr);
7148 	CU_ASSERT(rc == 0);
7149 
7150 	CU_ASSERT(nvme_ctrlr->resetting == true);
7151 	CU_ASSERT(nvme_ctrlr->disabled == false);
7152 
7153 	poll_threads();
7154 
7155 	CU_ASSERT(nvme_ctrlr->resetting == false);
7156 
7157 	set_thread(1);
7158 
7159 	spdk_put_io_channel(ch2);
7160 
7161 	set_thread(0);
7162 
7163 	spdk_put_io_channel(ch1);
7164 
7165 	poll_threads();
7166 
7167 	rc = bdev_nvme_delete("nvme0", &g_any_path);
7168 	CU_ASSERT(rc == 0);
7169 
7170 	poll_threads();
7171 	spdk_delay_us(1000);
7172 	poll_threads();
7173 
7174 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
7175 }
7176 
7177 int
7178 main(int argc, char **argv)
7179 {
7180 	CU_pSuite	suite = NULL;
7181 	unsigned int	num_failures;
7182 
7183 	CU_initialize_registry();
7184 
7185 	suite = CU_add_suite("nvme", NULL, NULL);
7186 
7187 	CU_ADD_TEST(suite, test_create_ctrlr);
7188 	CU_ADD_TEST(suite, test_reset_ctrlr);
7189 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
7190 	CU_ADD_TEST(suite, test_failover_ctrlr);
7191 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
7192 	CU_ADD_TEST(suite, test_pending_reset);
7193 	CU_ADD_TEST(suite, test_attach_ctrlr);
7194 	CU_ADD_TEST(suite, test_aer_cb);
7195 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
7196 	CU_ADD_TEST(suite, test_add_remove_trid);
7197 	CU_ADD_TEST(suite, test_abort);
7198 	CU_ADD_TEST(suite, test_get_io_qpair);
7199 	CU_ADD_TEST(suite, test_bdev_unregister);
7200 	CU_ADD_TEST(suite, test_compare_ns);
7201 	CU_ADD_TEST(suite, test_init_ana_log_page);
7202 	CU_ADD_TEST(suite, test_get_memory_domains);
7203 	CU_ADD_TEST(suite, test_reconnect_qpair);
7204 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
7205 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
7206 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
7207 	CU_ADD_TEST(suite, test_admin_path);
7208 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
7209 	CU_ADD_TEST(suite, test_find_io_path);
7210 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
7211 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
7212 	CU_ADD_TEST(suite, test_retry_io_count);
7213 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
7214 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
7215 	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
7216 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
7217 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
7218 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
7219 	CU_ADD_TEST(suite, test_fail_path);
7220 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
7221 	CU_ADD_TEST(suite, test_ana_transition);
7222 	CU_ADD_TEST(suite, test_set_preferred_path);
7223 	CU_ADD_TEST(suite, test_find_next_io_path);
7224 	CU_ADD_TEST(suite, test_find_io_path_min_qd);
7225 	CU_ADD_TEST(suite, test_disable_auto_failback);
7226 	CU_ADD_TEST(suite, test_set_multipath_policy);
7227 	CU_ADD_TEST(suite, test_uuid_generation);
7228 	CU_ADD_TEST(suite, test_retry_io_to_same_path);
7229 	CU_ADD_TEST(suite, test_race_between_reset_and_disconnected);
7230 	CU_ADD_TEST(suite, test_ctrlr_op_rpc);
7231 	CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc);
7232 	CU_ADD_TEST(suite, test_disable_enable_ctrlr);
7233 
7234 	allocate_threads(3);
7235 	set_thread(0);
7236 	bdev_nvme_library_init();
7237 	init_accel();
7238 
7239 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
7240 
7241 	set_thread(0);
7242 	bdev_nvme_library_fini();
7243 	fini_accel();
7244 	free_threads();
7245 
7246 	CU_cleanup_registry();
7247 
7248 	return num_failures;
7249 }
7250