xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 0098e636761237b77c12c30c2408263a5d2260cc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_cunit.h"
9 #include "spdk/thread.h"
10 #include "spdk/bdev_module.h"
11 #include "spdk/bdev_module.h"
12 
13 #include "common/lib/ut_multithread.c"
14 
15 #include "bdev/nvme/bdev_nvme.c"
16 
17 #include "unit/lib/json_mock.c"
18 
19 static void *g_accel_p = (void *)0xdeadbeaf;
20 
21 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
22 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
23 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
24 	     spdk_nvme_remove_cb remove_cb), NULL);
25 
26 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
27 		enum spdk_nvme_transport_type trtype));
28 
29 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
30 	    NULL);
31 
32 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
33 
34 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
35 		struct spdk_nvme_transport_id *trid), 0);
36 
37 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
38 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
39 
40 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
41 
42 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
43 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
44 
45 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int);
46 
47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
48 	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
49 
50 int
51 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
52 				   struct spdk_memory_domain **domains, int array_size)
53 {
54 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
55 
56 	return 0;
57 }
58 
59 struct spdk_io_channel *
60 spdk_accel_engine_get_io_channel(void)
61 {
62 	return spdk_get_io_channel(g_accel_p);
63 }
64 
65 void
66 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
67 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
68 {
69 	/* Avoid warning that opts is used uninitialised */
70 	memset(opts, 0, opts_size);
71 }
72 
73 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
74 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
75 
76 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
77 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
78 
79 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
80 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
81 
82 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
83 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
84 
85 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
86 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
87 
88 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
89 
90 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
91 
92 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
93 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
94 
95 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
96 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
97 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
98 
99 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
100 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
101 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
102 
103 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
104 		size_t *size), 0);
105 
106 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
107 
108 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
109 
110 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
111 
112 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
113 
114 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
115 
116 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
117 
118 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
119 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
120 
121 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
122 
123 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
124 		char *name, size_t *size), 0);
125 
126 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
127 	    (struct spdk_nvme_ns *ns), 0);
128 
129 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
130 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
131 
132 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
133 	    (struct spdk_nvme_ns *ns), 0);
134 
135 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
136 	    (struct spdk_nvme_ns *ns), 0);
137 
138 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
139 	    (struct spdk_nvme_ns *ns), 0);
140 
141 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
142 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
143 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
144 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
145 
146 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
147 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
148 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
149 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
150 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
151 
152 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
153 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
154 	     void *payload, uint32_t payload_size, uint64_t slba,
155 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
156 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
157 
158 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
159 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
160 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
161 
162 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
163 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
164 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
165 
166 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
167 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
168 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
169 
170 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
171 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
172 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
173 
174 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
175 
176 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
177 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
178 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
179 
180 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
181 
182 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
183 
184 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
185 
186 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
187 
188 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
189 
190 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
191 		struct iovec *iov,
192 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
193 
194 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
195 
196 struct ut_nvme_req {
197 	uint16_t			opc;
198 	spdk_nvme_cmd_cb		cb_fn;
199 	void				*cb_arg;
200 	struct spdk_nvme_cpl		cpl;
201 	TAILQ_ENTRY(ut_nvme_req)	tailq;
202 };
203 
204 struct spdk_nvme_ns {
205 	struct spdk_nvme_ctrlr		*ctrlr;
206 	uint32_t			id;
207 	bool				is_active;
208 	struct spdk_uuid		*uuid;
209 	enum spdk_nvme_ana_state	ana_state;
210 	enum spdk_nvme_csi		csi;
211 };
212 
213 struct spdk_nvme_qpair {
214 	struct spdk_nvme_ctrlr		*ctrlr;
215 	uint8_t				failure_reason;
216 	bool				is_connected;
217 	bool				in_completion_context;
218 	bool				delete_after_completion_context;
219 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
220 	uint32_t			num_outstanding_reqs;
221 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
222 	struct spdk_nvme_poll_group	*poll_group;
223 	void				*poll_group_tailq_head;
224 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
225 };
226 
227 struct spdk_nvme_ctrlr {
228 	uint32_t			num_ns;
229 	struct spdk_nvme_ns		*ns;
230 	struct spdk_nvme_ns_data	*nsdata;
231 	struct spdk_nvme_qpair		adminq;
232 	struct spdk_nvme_ctrlr_data	cdata;
233 	bool				attached;
234 	bool				is_failed;
235 	bool				fail_reset;
236 	struct spdk_nvme_transport_id	trid;
237 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
238 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
239 	struct spdk_nvme_ctrlr_opts	opts;
240 };
241 
242 struct spdk_nvme_poll_group {
243 	void				*ctx;
244 	struct spdk_nvme_accel_fn_table	accel_fn_table;
245 	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
246 	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
247 };
248 
249 struct spdk_nvme_probe_ctx {
250 	struct spdk_nvme_transport_id	trid;
251 	void				*cb_ctx;
252 	spdk_nvme_attach_cb		attach_cb;
253 	struct spdk_nvme_ctrlr		*init_ctrlr;
254 };
255 
256 uint32_t
257 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
258 {
259 	uint32_t nsid;
260 
261 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
262 		if (ctrlr->ns[nsid - 1].is_active) {
263 			return nsid;
264 		}
265 	}
266 
267 	return 0;
268 }
269 
270 uint32_t
271 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
272 {
273 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
274 		if (ctrlr->ns[nsid - 1].is_active) {
275 			return nsid;
276 		}
277 	}
278 
279 	return 0;
280 }
281 
282 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
283 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
284 			g_ut_attached_ctrlrs);
285 static int g_ut_attach_ctrlr_status;
286 static size_t g_ut_attach_bdev_count;
287 static int g_ut_register_bdev_status;
288 static struct spdk_bdev *g_ut_registered_bdev;
289 static uint16_t g_ut_cntlid;
290 static struct nvme_path_id g_any_path = {};
291 
292 static void
293 ut_init_trid(struct spdk_nvme_transport_id *trid)
294 {
295 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
296 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
297 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
298 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
299 }
300 
301 static void
302 ut_init_trid2(struct spdk_nvme_transport_id *trid)
303 {
304 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
305 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
306 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
307 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
308 }
309 
310 static void
311 ut_init_trid3(struct spdk_nvme_transport_id *trid)
312 {
313 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
314 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
315 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
316 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
317 }
318 
319 static int
320 cmp_int(int a, int b)
321 {
322 	return a - b;
323 }
324 
325 int
326 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
327 			       const struct spdk_nvme_transport_id *trid2)
328 {
329 	int cmp;
330 
331 	/* We assume trtype is TCP for now. */
332 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
333 
334 	cmp = cmp_int(trid1->trtype, trid2->trtype);
335 	if (cmp) {
336 		return cmp;
337 	}
338 
339 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
340 	if (cmp) {
341 		return cmp;
342 	}
343 
344 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
345 	if (cmp) {
346 		return cmp;
347 	}
348 
349 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
350 	if (cmp) {
351 		return cmp;
352 	}
353 
354 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
355 	if (cmp) {
356 		return cmp;
357 	}
358 
359 	return 0;
360 }
361 
362 static struct spdk_nvme_ctrlr *
363 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
364 		bool ana_reporting, bool multipath)
365 {
366 	struct spdk_nvme_ctrlr *ctrlr;
367 	uint32_t i;
368 
369 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
370 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
371 			/* There is a ctrlr whose trid matches. */
372 			return NULL;
373 		}
374 	}
375 
376 	ctrlr = calloc(1, sizeof(*ctrlr));
377 	if (ctrlr == NULL) {
378 		return NULL;
379 	}
380 
381 	ctrlr->attached = true;
382 	ctrlr->adminq.ctrlr = ctrlr;
383 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
384 	ctrlr->adminq.is_connected = true;
385 
386 	if (num_ns != 0) {
387 		ctrlr->num_ns = num_ns;
388 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
389 		if (ctrlr->ns == NULL) {
390 			free(ctrlr);
391 			return NULL;
392 		}
393 
394 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
395 		if (ctrlr->nsdata == NULL) {
396 			free(ctrlr->ns);
397 			free(ctrlr);
398 			return NULL;
399 		}
400 
401 		for (i = 0; i < num_ns; i++) {
402 			ctrlr->ns[i].id = i + 1;
403 			ctrlr->ns[i].ctrlr = ctrlr;
404 			ctrlr->ns[i].is_active = true;
405 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
406 			ctrlr->nsdata[i].nsze = 1024;
407 			ctrlr->nsdata[i].nmic.can_share = multipath;
408 		}
409 
410 		ctrlr->cdata.nn = num_ns;
411 		ctrlr->cdata.mnan = num_ns;
412 		ctrlr->cdata.nanagrpid = num_ns;
413 	}
414 
415 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
416 	ctrlr->cdata.cmic.multi_ctrlr = multipath;
417 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
418 	ctrlr->trid = *trid;
419 	TAILQ_INIT(&ctrlr->active_io_qpairs);
420 
421 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
422 
423 	return ctrlr;
424 }
425 
426 static void
427 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
428 {
429 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
430 
431 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
432 	free(ctrlr->nsdata);
433 	free(ctrlr->ns);
434 	free(ctrlr);
435 }
436 
437 static int
438 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
439 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
440 {
441 	struct ut_nvme_req *req;
442 
443 	req = calloc(1, sizeof(*req));
444 	if (req == NULL) {
445 		return -ENOMEM;
446 	}
447 
448 	req->opc = opc;
449 	req->cb_fn = cb_fn;
450 	req->cb_arg = cb_arg;
451 
452 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
453 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
454 
455 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
456 	qpair->num_outstanding_reqs++;
457 
458 	return 0;
459 }
460 
461 static struct ut_nvme_req *
462 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
463 {
464 	struct ut_nvme_req *req;
465 
466 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
467 		if (req->cb_arg == cb_arg) {
468 			break;
469 		}
470 	}
471 
472 	return req;
473 }
474 
475 static struct spdk_bdev_io *
476 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
477 		 struct spdk_io_channel *ch)
478 {
479 	struct spdk_bdev_io *bdev_io;
480 
481 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
482 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
483 	bdev_io->type = type;
484 	bdev_io->bdev = &nbdev->disk;
485 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
486 
487 	return bdev_io;
488 }
489 
490 static void
491 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
492 {
493 	bdev_io->u.bdev.iovs = &bdev_io->iov;
494 	bdev_io->u.bdev.iovcnt = 1;
495 
496 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
497 	bdev_io->iov.iov_len = 4096;
498 }
499 
500 static void
501 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
502 {
503 	if (ctrlr->is_failed) {
504 		free(ctrlr);
505 		return;
506 	}
507 
508 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
509 	if (probe_ctx->cb_ctx) {
510 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
511 	}
512 
513 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
514 
515 	if (probe_ctx->attach_cb) {
516 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
517 	}
518 }
519 
520 int
521 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
522 {
523 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
524 
525 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
526 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
527 			continue;
528 		}
529 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
530 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
531 	}
532 
533 	free(probe_ctx);
534 
535 	return 0;
536 }
537 
538 struct spdk_nvme_probe_ctx *
539 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
540 			const struct spdk_nvme_ctrlr_opts *opts,
541 			spdk_nvme_attach_cb attach_cb)
542 {
543 	struct spdk_nvme_probe_ctx *probe_ctx;
544 
545 	if (trid == NULL) {
546 		return NULL;
547 	}
548 
549 	probe_ctx = calloc(1, sizeof(*probe_ctx));
550 	if (probe_ctx == NULL) {
551 		return NULL;
552 	}
553 
554 	probe_ctx->trid = *trid;
555 	probe_ctx->cb_ctx = (void *)opts;
556 	probe_ctx->attach_cb = attach_cb;
557 
558 	return probe_ctx;
559 }
560 
561 int
562 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
563 {
564 	if (ctrlr->attached) {
565 		ut_detach_ctrlr(ctrlr);
566 	}
567 
568 	return 0;
569 }
570 
571 int
572 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
573 {
574 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
575 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
576 
577 	return 0;
578 }
579 
580 int
581 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
582 {
583 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
584 }
585 
586 void
587 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
588 {
589 	memset(opts, 0, opts_size);
590 
591 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
592 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
593 }
594 
595 const struct spdk_nvme_ctrlr_data *
596 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
597 {
598 	return &ctrlr->cdata;
599 }
600 
601 uint32_t
602 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
603 {
604 	return ctrlr->num_ns;
605 }
606 
607 struct spdk_nvme_ns *
608 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
609 {
610 	if (nsid < 1 || nsid > ctrlr->num_ns) {
611 		return NULL;
612 	}
613 
614 	return &ctrlr->ns[nsid - 1];
615 }
616 
617 bool
618 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
619 {
620 	if (nsid < 1 || nsid > ctrlr->num_ns) {
621 		return false;
622 	}
623 
624 	return ctrlr->ns[nsid - 1].is_active;
625 }
626 
627 union spdk_nvme_csts_register
628 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
629 {
630 	union spdk_nvme_csts_register csts;
631 
632 	csts.raw = 0;
633 
634 	return csts;
635 }
636 
637 union spdk_nvme_vs_register
638 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
639 {
640 	union spdk_nvme_vs_register vs;
641 
642 	vs.raw = 0;
643 
644 	return vs;
645 }
646 
647 struct spdk_nvme_qpair *
648 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
649 			       const struct spdk_nvme_io_qpair_opts *user_opts,
650 			       size_t opts_size)
651 {
652 	struct spdk_nvme_qpair *qpair;
653 
654 	qpair = calloc(1, sizeof(*qpair));
655 	if (qpair == NULL) {
656 		return NULL;
657 	}
658 
659 	qpair->ctrlr = ctrlr;
660 	TAILQ_INIT(&qpair->outstanding_reqs);
661 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
662 
663 	return qpair;
664 }
665 
666 static void
667 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
668 {
669 	struct spdk_nvme_poll_group *group = qpair->poll_group;
670 
671 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
672 
673 	qpair->poll_group_tailq_head = &group->connected_qpairs;
674 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
675 	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
676 }
677 
678 static void
679 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
680 {
681 	struct spdk_nvme_poll_group *group = qpair->poll_group;
682 
683 	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
684 
685 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
686 	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
687 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
688 }
689 
690 int
691 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
692 				 struct spdk_nvme_qpair *qpair)
693 {
694 	if (qpair->is_connected) {
695 		return -EISCONN;
696 	}
697 
698 	qpair->is_connected = true;
699 	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
700 
701 	if (qpair->poll_group) {
702 		nvme_poll_group_connect_qpair(qpair);
703 	}
704 
705 	return 0;
706 }
707 
708 void
709 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
710 {
711 	if (!qpair->is_connected) {
712 		return;
713 	}
714 
715 	qpair->is_connected = false;
716 
717 	if (qpair->poll_group != NULL) {
718 		nvme_poll_group_disconnect_qpair(qpair);
719 	}
720 }
721 
722 int
723 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
724 {
725 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
726 
727 	if (qpair->in_completion_context) {
728 		qpair->delete_after_completion_context = true;
729 		return 0;
730 	}
731 
732 	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
733 
734 	if (qpair->poll_group != NULL) {
735 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
736 	}
737 
738 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
739 
740 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
741 
742 	free(qpair);
743 
744 	return 0;
745 }
746 
747 int
748 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
749 {
750 	if (ctrlr->fail_reset) {
751 		ctrlr->is_failed = true;
752 		return -EIO;
753 	}
754 
755 	ctrlr->adminq.is_connected = true;
756 	return 0;
757 }
758 
759 void
760 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
761 {
762 }
763 
764 int
765 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
766 {
767 	ctrlr->adminq.is_connected = false;
768 	ctrlr->is_failed = false;
769 
770 	return 0;
771 }
772 
773 void
774 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
775 {
776 	ctrlr->is_failed = true;
777 }
778 
779 bool
780 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
781 {
782 	return ctrlr->is_failed;
783 }
784 
785 spdk_nvme_qp_failure_reason
786 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
787 {
788 	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
789 }
790 
791 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
792 				 sizeof(uint32_t))
793 static void
794 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
795 {
796 	struct spdk_nvme_ana_page ana_hdr;
797 	char _ana_desc[UT_ANA_DESC_SIZE];
798 	struct spdk_nvme_ana_group_descriptor *ana_desc;
799 	struct spdk_nvme_ns *ns;
800 	uint32_t i;
801 
802 	memset(&ana_hdr, 0, sizeof(ana_hdr));
803 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
804 
805 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
806 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
807 
808 	buf += sizeof(ana_hdr);
809 	length -= sizeof(ana_hdr);
810 
811 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
812 
813 	for (i = 0; i < ctrlr->num_ns; i++) {
814 		ns = &ctrlr->ns[i];
815 
816 		if (!ns->is_active) {
817 			continue;
818 		}
819 
820 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
821 
822 		ana_desc->ana_group_id = ns->id;
823 		ana_desc->num_of_nsid = 1;
824 		ana_desc->ana_state = ns->ana_state;
825 		ana_desc->nsid[0] = ns->id;
826 
827 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
828 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
829 
830 		buf += UT_ANA_DESC_SIZE;
831 		length -= UT_ANA_DESC_SIZE;
832 	}
833 }
834 
835 int
836 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
837 				 uint8_t log_page, uint32_t nsid,
838 				 void *payload, uint32_t payload_size,
839 				 uint64_t offset,
840 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
841 {
842 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
843 		SPDK_CU_ASSERT_FATAL(offset == 0);
844 		ut_create_ana_log_page(ctrlr, payload, payload_size);
845 	}
846 
847 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
848 				      cb_fn, cb_arg);
849 }
850 
851 int
852 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
853 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
854 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
855 {
856 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
857 }
858 
859 int
860 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
861 			      void *cmd_cb_arg,
862 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
863 {
864 	struct ut_nvme_req *req = NULL, *abort_req;
865 
866 	if (qpair == NULL) {
867 		qpair = &ctrlr->adminq;
868 	}
869 
870 	abort_req = calloc(1, sizeof(*abort_req));
871 	if (abort_req == NULL) {
872 		return -ENOMEM;
873 	}
874 
875 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
876 		if (req->cb_arg == cmd_cb_arg) {
877 			break;
878 		}
879 	}
880 
881 	if (req == NULL) {
882 		free(abort_req);
883 		return -ENOENT;
884 	}
885 
886 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
887 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
888 
889 	abort_req->opc = SPDK_NVME_OPC_ABORT;
890 	abort_req->cb_fn = cb_fn;
891 	abort_req->cb_arg = cb_arg;
892 
893 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
894 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
895 	abort_req->cpl.cdw0 = 0;
896 
897 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
898 	ctrlr->adminq.num_outstanding_reqs++;
899 
900 	return 0;
901 }
902 
903 int32_t
904 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
905 {
906 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
907 }
908 
909 uint32_t
910 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
911 {
912 	return ns->id;
913 }
914 
915 struct spdk_nvme_ctrlr *
916 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
917 {
918 	return ns->ctrlr;
919 }
920 
921 static inline struct spdk_nvme_ns_data *
922 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
923 {
924 	return &ns->ctrlr->nsdata[ns->id - 1];
925 }
926 
927 const struct spdk_nvme_ns_data *
928 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
929 {
930 	return _nvme_ns_get_data(ns);
931 }
932 
933 uint64_t
934 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
935 {
936 	return _nvme_ns_get_data(ns)->nsze;
937 }
938 
939 const struct spdk_uuid *
940 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
941 {
942 	return ns->uuid;
943 }
944 
945 enum spdk_nvme_csi
946 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
947 	return ns->csi;
948 }
949 
950 int
951 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
952 			      void *metadata, uint64_t lba, uint32_t lba_count,
953 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
954 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
955 {
956 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
957 }
958 
959 int
960 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
961 			       void *buffer, void *metadata, uint64_t lba,
962 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
963 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
964 {
965 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
966 }
967 
968 int
969 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
970 			       uint64_t lba, uint32_t lba_count,
971 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
972 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
973 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
974 			       uint16_t apptag_mask, uint16_t apptag)
975 {
976 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
977 }
978 
979 int
980 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
981 				uint64_t lba, uint32_t lba_count,
982 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
983 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
984 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
985 				uint16_t apptag_mask, uint16_t apptag)
986 {
987 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
988 }
989 
990 static bool g_ut_readv_ext_called;
991 int
992 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
993 			   uint64_t lba, uint32_t lba_count,
994 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
995 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
996 			   spdk_nvme_req_next_sge_cb next_sge_fn,
997 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
998 {
999 	g_ut_readv_ext_called = true;
1000 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1001 }
1002 
1003 static bool g_ut_writev_ext_called;
1004 int
1005 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1006 			    uint64_t lba, uint32_t lba_count,
1007 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1008 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1009 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1010 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1011 {
1012 	g_ut_writev_ext_called = true;
1013 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1014 }
1015 
1016 int
1017 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1018 				  uint64_t lba, uint32_t lba_count,
1019 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1020 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1021 				  spdk_nvme_req_next_sge_cb next_sge_fn,
1022 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1023 {
1024 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1025 }
1026 
1027 int
1028 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1029 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1030 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1031 {
1032 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1033 }
1034 
1035 int
1036 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1037 			      uint64_t lba, uint32_t lba_count,
1038 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1039 			      uint32_t io_flags)
1040 {
1041 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1042 }
1043 
1044 struct spdk_nvme_poll_group *
1045 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1046 {
1047 	struct spdk_nvme_poll_group *group;
1048 
1049 	group = calloc(1, sizeof(*group));
1050 	if (group == NULL) {
1051 		return NULL;
1052 	}
1053 
1054 	group->ctx = ctx;
1055 	if (table != NULL) {
1056 		group->accel_fn_table = *table;
1057 	}
1058 	TAILQ_INIT(&group->connected_qpairs);
1059 	TAILQ_INIT(&group->disconnected_qpairs);
1060 
1061 	return group;
1062 }
1063 
1064 int
1065 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1066 {
1067 	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1068 	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1069 		return -EBUSY;
1070 	}
1071 
1072 	free(group);
1073 
1074 	return 0;
1075 }
1076 
1077 spdk_nvme_qp_failure_reason
1078 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1079 {
1080 	return qpair->failure_reason;
1081 }
1082 
1083 int32_t
1084 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1085 				    uint32_t max_completions)
1086 {
1087 	struct ut_nvme_req *req, *tmp;
1088 	uint32_t num_completions = 0;
1089 
1090 	if (!qpair->is_connected) {
1091 		return -ENXIO;
1092 	}
1093 
1094 	qpair->in_completion_context = true;
1095 
1096 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1097 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1098 		qpair->num_outstanding_reqs--;
1099 
1100 		req->cb_fn(req->cb_arg, &req->cpl);
1101 
1102 		free(req);
1103 		num_completions++;
1104 	}
1105 
1106 	qpair->in_completion_context = false;
1107 	if (qpair->delete_after_completion_context) {
1108 		spdk_nvme_ctrlr_free_io_qpair(qpair);
1109 	}
1110 
1111 	return num_completions;
1112 }
1113 
1114 int64_t
1115 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1116 		uint32_t completions_per_qpair,
1117 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1118 {
1119 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1120 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1121 
1122 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1123 
1124 	if (disconnected_qpair_cb == NULL) {
1125 		return -EINVAL;
1126 	}
1127 
1128 	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1129 		disconnected_qpair_cb(qpair, group->ctx);
1130 	}
1131 
1132 	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1133 		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1134 			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1135 			/* Bump the number of completions so this counts as "busy" */
1136 			num_completions++;
1137 			continue;
1138 		}
1139 
1140 		local_completions = spdk_nvme_qpair_process_completions(qpair,
1141 				    completions_per_qpair);
1142 		if (local_completions < 0 && error_reason == 0) {
1143 			error_reason = local_completions;
1144 		} else {
1145 			num_completions += local_completions;
1146 			assert(num_completions >= 0);
1147 		}
1148 	}
1149 
1150 	return error_reason ? error_reason : num_completions;
1151 }
1152 
1153 int
1154 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1155 			 struct spdk_nvme_qpair *qpair)
1156 {
1157 	CU_ASSERT(!qpair->is_connected);
1158 
1159 	qpair->poll_group = group;
1160 	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1161 	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1162 
1163 	return 0;
1164 }
1165 
1166 int
1167 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1168 			    struct spdk_nvme_qpair *qpair)
1169 {
1170 	CU_ASSERT(!qpair->is_connected);
1171 
1172 	if (qpair->poll_group == NULL) {
1173 		return -ENOENT;
1174 	}
1175 
1176 	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1177 
1178 	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1179 
1180 	qpair->poll_group = NULL;
1181 	qpair->poll_group_tailq_head = NULL;
1182 
1183 	return 0;
1184 }
1185 
1186 int
1187 spdk_bdev_register(struct spdk_bdev *bdev)
1188 {
1189 	g_ut_registered_bdev = bdev;
1190 
1191 	return g_ut_register_bdev_status;
1192 }
1193 
1194 void
1195 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1196 {
1197 	int rc;
1198 
1199 	rc = bdev->fn_table->destruct(bdev->ctxt);
1200 
1201 	if (bdev == g_ut_registered_bdev) {
1202 		g_ut_registered_bdev = NULL;
1203 	}
1204 
1205 	if (rc <= 0 && cb_fn != NULL) {
1206 		cb_fn(cb_arg, rc);
1207 	}
1208 }
1209 
1210 int
1211 spdk_bdev_open_ext(const char *bdev_name, bool write,
1212 		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1213 		   struct spdk_bdev_desc **desc)
1214 {
1215 	if (g_ut_registered_bdev == NULL ||
1216 	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1217 		return -ENODEV;
1218 	}
1219 
1220 	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1221 
1222 	return 0;
1223 }
1224 
1225 struct spdk_bdev *
1226 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1227 {
1228 	return (struct spdk_bdev *)desc;
1229 }
1230 
1231 int
1232 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1233 {
1234 	bdev->blockcnt = size;
1235 
1236 	return 0;
1237 }
1238 
1239 struct spdk_io_channel *
1240 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1241 {
1242 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1243 }
1244 
1245 void
1246 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1247 {
1248 	bdev_io->internal.status = status;
1249 	bdev_io->internal.in_submit_request = false;
1250 }
1251 
1252 void
1253 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1254 {
1255 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1256 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1257 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1258 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1259 	} else {
1260 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1261 	}
1262 
1263 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1264 	bdev_io->internal.error.nvme.sct = sct;
1265 	bdev_io->internal.error.nvme.sc = sc;
1266 
1267 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1268 }
1269 
1270 void
1271 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1272 {
1273 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1274 
1275 	ut_bdev_io_set_buf(bdev_io);
1276 
1277 	cb(ch, bdev_io, true);
1278 }
1279 
1280 static void
1281 test_create_ctrlr(void)
1282 {
1283 	struct spdk_nvme_transport_id trid = {};
1284 	struct spdk_nvme_ctrlr ctrlr = {};
1285 	int rc;
1286 
1287 	ut_init_trid(&trid);
1288 
1289 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1290 	CU_ASSERT(rc == 0);
1291 
1292 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1293 
1294 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1295 	CU_ASSERT(rc == 0);
1296 
1297 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1298 
1299 	poll_threads();
1300 	spdk_delay_us(1000);
1301 	poll_threads();
1302 
1303 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1304 }
1305 
1306 static void
1307 test_reset_ctrlr(void)
1308 {
1309 	struct spdk_nvme_transport_id trid = {};
1310 	struct spdk_nvme_ctrlr ctrlr = {};
1311 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1312 	struct nvme_path_id *curr_trid;
1313 	struct spdk_io_channel *ch1, *ch2;
1314 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1315 	int rc;
1316 
1317 	ut_init_trid(&trid);
1318 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1319 
1320 	set_thread(0);
1321 
1322 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1323 	CU_ASSERT(rc == 0);
1324 
1325 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1326 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1327 
1328 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1329 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1330 
1331 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1332 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1333 
1334 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1335 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1336 
1337 	set_thread(1);
1338 
1339 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1340 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1341 
1342 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1343 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1344 
1345 	/* Reset starts from thread 1. */
1346 	set_thread(1);
1347 
1348 	/* Case 1: ctrlr is already being destructed. */
1349 	nvme_ctrlr->destruct = true;
1350 
1351 	rc = bdev_nvme_reset(nvme_ctrlr);
1352 	CU_ASSERT(rc == -ENXIO);
1353 
1354 	/* Case 2: reset is in progress. */
1355 	nvme_ctrlr->destruct = false;
1356 	nvme_ctrlr->resetting = true;
1357 
1358 	rc = bdev_nvme_reset(nvme_ctrlr);
1359 	CU_ASSERT(rc == -EBUSY);
1360 
1361 	/* Case 3: reset completes successfully. */
1362 	nvme_ctrlr->resetting = false;
1363 	curr_trid->is_failed = true;
1364 	ctrlr.is_failed = true;
1365 
1366 	rc = bdev_nvme_reset(nvme_ctrlr);
1367 	CU_ASSERT(rc == 0);
1368 	CU_ASSERT(nvme_ctrlr->resetting == true);
1369 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1370 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1371 
1372 	poll_thread_times(0, 3);
1373 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1374 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1375 
1376 	poll_thread_times(0, 1);
1377 	poll_thread_times(1, 1);
1378 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1379 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1380 	CU_ASSERT(ctrlr.is_failed == true);
1381 
1382 	poll_thread_times(1, 1);
1383 	poll_thread_times(0, 1);
1384 	CU_ASSERT(ctrlr.is_failed == false);
1385 	CU_ASSERT(ctrlr.adminq.is_connected == false);
1386 
1387 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1388 	poll_thread_times(0, 2);
1389 	CU_ASSERT(ctrlr.adminq.is_connected == true);
1390 
1391 	poll_thread_times(0, 1);
1392 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1393 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1394 
1395 	poll_thread_times(1, 1);
1396 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1397 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1398 	CU_ASSERT(nvme_ctrlr->resetting == true);
1399 	CU_ASSERT(curr_trid->is_failed == true);
1400 
1401 	poll_thread_times(0, 2);
1402 	CU_ASSERT(nvme_ctrlr->resetting == true);
1403 	poll_thread_times(1, 1);
1404 	CU_ASSERT(nvme_ctrlr->resetting == true);
1405 	poll_thread_times(0, 1);
1406 	CU_ASSERT(nvme_ctrlr->resetting == false);
1407 	CU_ASSERT(curr_trid->is_failed == false);
1408 
1409 	spdk_put_io_channel(ch2);
1410 
1411 	set_thread(0);
1412 
1413 	spdk_put_io_channel(ch1);
1414 
1415 	poll_threads();
1416 
1417 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1418 	CU_ASSERT(rc == 0);
1419 
1420 	poll_threads();
1421 	spdk_delay_us(1000);
1422 	poll_threads();
1423 
1424 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1425 }
1426 
1427 static void
1428 test_race_between_reset_and_destruct_ctrlr(void)
1429 {
1430 	struct spdk_nvme_transport_id trid = {};
1431 	struct spdk_nvme_ctrlr ctrlr = {};
1432 	struct nvme_ctrlr *nvme_ctrlr;
1433 	struct spdk_io_channel *ch1, *ch2;
1434 	int rc;
1435 
1436 	ut_init_trid(&trid);
1437 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1438 
1439 	set_thread(0);
1440 
1441 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1442 	CU_ASSERT(rc == 0);
1443 
1444 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1445 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1446 
1447 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1448 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1449 
1450 	set_thread(1);
1451 
1452 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1453 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1454 
1455 	/* Reset starts from thread 1. */
1456 	set_thread(1);
1457 
1458 	rc = bdev_nvme_reset(nvme_ctrlr);
1459 	CU_ASSERT(rc == 0);
1460 	CU_ASSERT(nvme_ctrlr->resetting == true);
1461 
1462 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1463 	set_thread(0);
1464 
1465 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1466 	CU_ASSERT(rc == 0);
1467 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1468 	CU_ASSERT(nvme_ctrlr->destruct == true);
1469 	CU_ASSERT(nvme_ctrlr->resetting == true);
1470 
1471 	poll_threads();
1472 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1473 	poll_threads();
1474 
1475 	/* Reset completed but ctrlr is not still destructed yet. */
1476 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1477 	CU_ASSERT(nvme_ctrlr->destruct == true);
1478 	CU_ASSERT(nvme_ctrlr->resetting == false);
1479 
1480 	/* New reset request is rejected. */
1481 	rc = bdev_nvme_reset(nvme_ctrlr);
1482 	CU_ASSERT(rc == -ENXIO);
1483 
1484 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1485 	 * However there are two channels and destruct is not completed yet.
1486 	 */
1487 	poll_threads();
1488 
1489 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1490 
1491 	set_thread(0);
1492 
1493 	spdk_put_io_channel(ch1);
1494 
1495 	set_thread(1);
1496 
1497 	spdk_put_io_channel(ch2);
1498 
1499 	poll_threads();
1500 	spdk_delay_us(1000);
1501 	poll_threads();
1502 
1503 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1504 }
1505 
1506 static void
1507 test_failover_ctrlr(void)
1508 {
1509 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1510 	struct spdk_nvme_ctrlr ctrlr = {};
1511 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1512 	struct nvme_path_id *curr_trid, *next_trid;
1513 	struct spdk_io_channel *ch1, *ch2;
1514 	int rc;
1515 
1516 	ut_init_trid(&trid1);
1517 	ut_init_trid2(&trid2);
1518 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1519 
1520 	set_thread(0);
1521 
1522 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1523 	CU_ASSERT(rc == 0);
1524 
1525 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1526 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1527 
1528 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1529 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1530 
1531 	set_thread(1);
1532 
1533 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1534 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1535 
1536 	/* First, test one trid case. */
1537 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1538 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1539 
1540 	/* Failover starts from thread 1. */
1541 	set_thread(1);
1542 
1543 	/* Case 1: ctrlr is already being destructed. */
1544 	nvme_ctrlr->destruct = true;
1545 
1546 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1547 	CU_ASSERT(rc == -ENXIO);
1548 	CU_ASSERT(curr_trid->is_failed == false);
1549 
1550 	/* Case 2: reset is in progress. */
1551 	nvme_ctrlr->destruct = false;
1552 	nvme_ctrlr->resetting = true;
1553 
1554 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1555 	CU_ASSERT(rc == -EBUSY);
1556 
1557 	/* Case 3: reset completes successfully. */
1558 	nvme_ctrlr->resetting = false;
1559 
1560 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1561 	CU_ASSERT(rc == 0);
1562 
1563 	CU_ASSERT(nvme_ctrlr->resetting == true);
1564 	CU_ASSERT(curr_trid->is_failed == true);
1565 
1566 	poll_threads();
1567 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1568 	poll_threads();
1569 
1570 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1571 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1572 
1573 	CU_ASSERT(nvme_ctrlr->resetting == false);
1574 	CU_ASSERT(curr_trid->is_failed == false);
1575 
1576 	set_thread(0);
1577 
1578 	/* Second, test two trids case. */
1579 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1580 	CU_ASSERT(rc == 0);
1581 
1582 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1583 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1584 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1585 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1586 
1587 	/* Failover starts from thread 1. */
1588 	set_thread(1);
1589 
1590 	/* Case 4: reset is in progress. */
1591 	nvme_ctrlr->resetting = true;
1592 
1593 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1594 	CU_ASSERT(rc == -EBUSY);
1595 
1596 	/* Case 5: failover completes successfully. */
1597 	nvme_ctrlr->resetting = false;
1598 
1599 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1600 	CU_ASSERT(rc == 0);
1601 
1602 	CU_ASSERT(nvme_ctrlr->resetting == true);
1603 
1604 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1605 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1606 	CU_ASSERT(next_trid != curr_trid);
1607 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1608 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1609 
1610 	poll_threads();
1611 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1612 	poll_threads();
1613 
1614 	CU_ASSERT(nvme_ctrlr->resetting == false);
1615 
1616 	spdk_put_io_channel(ch2);
1617 
1618 	set_thread(0);
1619 
1620 	spdk_put_io_channel(ch1);
1621 
1622 	poll_threads();
1623 
1624 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1625 	CU_ASSERT(rc == 0);
1626 
1627 	poll_threads();
1628 	spdk_delay_us(1000);
1629 	poll_threads();
1630 
1631 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1632 }
1633 
1634 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1635  *
1636  * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1637  * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1638  * to trid2. While processing the failed reset, trid3 was added. trid1 should
1639  * have been active, i.e., the head of the list until the failover completed.
1640  * However trid3 was inserted to the head of the list by mistake.
1641  *
1642  * I/O qpairs have smaller polling period than admin qpair. When a connection is
1643  * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1644  * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1645  * may be executed repeatedly before failover is executed. Hence this bug is real.
1646  *
1647  * The following test verifies the fix.
1648  */
1649 static void
1650 test_race_between_failover_and_add_secondary_trid(void)
1651 {
1652 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1653 	struct spdk_nvme_ctrlr ctrlr = {};
1654 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1655 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1656 	struct spdk_io_channel *ch1, *ch2;
1657 	int rc;
1658 
1659 	ut_init_trid(&trid1);
1660 	ut_init_trid2(&trid2);
1661 	ut_init_trid3(&trid3);
1662 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1663 
1664 	set_thread(0);
1665 
1666 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1667 	CU_ASSERT(rc == 0);
1668 
1669 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1670 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1671 
1672 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1673 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1674 
1675 	set_thread(1);
1676 
1677 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1678 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1679 
1680 	set_thread(0);
1681 
1682 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1683 	CU_ASSERT(rc == 0);
1684 
1685 	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1686 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1687 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1688 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1689 	path_id2 = TAILQ_NEXT(path_id1, link);
1690 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1691 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1692 
1693 	ctrlr.fail_reset = true;
1694 
1695 	rc = bdev_nvme_reset(nvme_ctrlr);
1696 	CU_ASSERT(rc == 0);
1697 
1698 	poll_threads();
1699 
1700 	CU_ASSERT(path_id1->is_failed == true);
1701 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1702 
1703 	rc = bdev_nvme_reset(nvme_ctrlr);
1704 	CU_ASSERT(rc == 0);
1705 
1706 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1707 	CU_ASSERT(rc == 0);
1708 
1709 	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1710 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1711 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1712 	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1713 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1714 	path_id3 = TAILQ_NEXT(path_id2, link);
1715 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1716 	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1717 
1718 	poll_threads();
1719 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1720 	poll_threads();
1721 
1722 	spdk_put_io_channel(ch1);
1723 
1724 	set_thread(1);
1725 
1726 	spdk_put_io_channel(ch2);
1727 
1728 	poll_threads();
1729 
1730 	set_thread(0);
1731 
1732 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1733 	CU_ASSERT(rc == 0);
1734 
1735 	poll_threads();
1736 	spdk_delay_us(1000);
1737 	poll_threads();
1738 
1739 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1740 }
1741 
1742 static void
1743 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1744 {
1745 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1746 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1747 }
1748 
1749 static void
1750 test_pending_reset(void)
1751 {
1752 	struct spdk_nvme_transport_id trid = {};
1753 	struct spdk_nvme_ctrlr *ctrlr;
1754 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1755 	const int STRING_SIZE = 32;
1756 	const char *attached_names[STRING_SIZE];
1757 	struct nvme_bdev *bdev;
1758 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1759 	struct spdk_io_channel *ch1, *ch2;
1760 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1761 	struct nvme_io_path *io_path1, *io_path2;
1762 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1763 	int rc;
1764 
1765 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1766 	ut_init_trid(&trid);
1767 
1768 	set_thread(0);
1769 
1770 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1771 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1772 
1773 	g_ut_attach_ctrlr_status = 0;
1774 	g_ut_attach_bdev_count = 1;
1775 
1776 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1777 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1778 	CU_ASSERT(rc == 0);
1779 
1780 	spdk_delay_us(1000);
1781 	poll_threads();
1782 
1783 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1784 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1785 
1786 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1787 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1788 
1789 	ch1 = spdk_get_io_channel(bdev);
1790 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1791 
1792 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1793 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1794 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1795 	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1796 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1797 
1798 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1799 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1800 
1801 	set_thread(1);
1802 
1803 	ch2 = spdk_get_io_channel(bdev);
1804 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1805 
1806 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1807 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1808 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1809 	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1810 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1811 
1812 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1813 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1814 
1815 	/* The first reset request is submitted on thread 1, and the second reset request
1816 	 * is submitted on thread 0 while processing the first request.
1817 	 */
1818 	bdev_nvme_submit_request(ch2, first_bdev_io);
1819 	CU_ASSERT(nvme_ctrlr->resetting == true);
1820 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1821 
1822 	set_thread(0);
1823 
1824 	bdev_nvme_submit_request(ch1, second_bdev_io);
1825 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1826 
1827 	poll_threads();
1828 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1829 	poll_threads();
1830 
1831 	CU_ASSERT(nvme_ctrlr->resetting == false);
1832 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1833 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1834 
1835 	/* The first reset request is submitted on thread 1, and the second reset request
1836 	 * is submitted on thread 0 while processing the first request.
1837 	 *
1838 	 * The difference from the above scenario is that the controller is removed while
1839 	 * processing the first request. Hence both reset requests should fail.
1840 	 */
1841 	set_thread(1);
1842 
1843 	bdev_nvme_submit_request(ch2, first_bdev_io);
1844 	CU_ASSERT(nvme_ctrlr->resetting == true);
1845 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1846 
1847 	set_thread(0);
1848 
1849 	bdev_nvme_submit_request(ch1, second_bdev_io);
1850 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1851 
1852 	ctrlr->fail_reset = true;
1853 
1854 	poll_threads();
1855 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1856 	poll_threads();
1857 
1858 	CU_ASSERT(nvme_ctrlr->resetting == false);
1859 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1860 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1861 
1862 	spdk_put_io_channel(ch1);
1863 
1864 	set_thread(1);
1865 
1866 	spdk_put_io_channel(ch2);
1867 
1868 	poll_threads();
1869 
1870 	set_thread(0);
1871 
1872 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1873 	CU_ASSERT(rc == 0);
1874 
1875 	poll_threads();
1876 	spdk_delay_us(1000);
1877 	poll_threads();
1878 
1879 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1880 
1881 	free(first_bdev_io);
1882 	free(second_bdev_io);
1883 }
1884 
1885 static void
1886 test_attach_ctrlr(void)
1887 {
1888 	struct spdk_nvme_transport_id trid = {};
1889 	struct spdk_nvme_ctrlr *ctrlr;
1890 	struct nvme_ctrlr *nvme_ctrlr;
1891 	const int STRING_SIZE = 32;
1892 	const char *attached_names[STRING_SIZE];
1893 	struct nvme_bdev *nbdev;
1894 	int rc;
1895 
1896 	set_thread(0);
1897 
1898 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1899 	ut_init_trid(&trid);
1900 
1901 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1902 	 * by probe polling.
1903 	 */
1904 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1905 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1906 
1907 	ctrlr->is_failed = true;
1908 	g_ut_attach_ctrlr_status = -EIO;
1909 	g_ut_attach_bdev_count = 0;
1910 
1911 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1912 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1913 	CU_ASSERT(rc == 0);
1914 
1915 	spdk_delay_us(1000);
1916 	poll_threads();
1917 
1918 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1919 
1920 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1921 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1922 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1923 
1924 	g_ut_attach_ctrlr_status = 0;
1925 
1926 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1927 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1928 	CU_ASSERT(rc == 0);
1929 
1930 	spdk_delay_us(1000);
1931 	poll_threads();
1932 
1933 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1934 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1935 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1936 
1937 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1938 	CU_ASSERT(rc == 0);
1939 
1940 	poll_threads();
1941 	spdk_delay_us(1000);
1942 	poll_threads();
1943 
1944 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1945 
1946 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1947 	 * one nvme_bdev is created.
1948 	 */
1949 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1950 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1951 
1952 	g_ut_attach_bdev_count = 1;
1953 
1954 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1955 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1956 	CU_ASSERT(rc == 0);
1957 
1958 	spdk_delay_us(1000);
1959 	poll_threads();
1960 
1961 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1962 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1963 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1964 
1965 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1966 	attached_names[0] = NULL;
1967 
1968 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1969 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1970 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1971 
1972 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1973 	CU_ASSERT(rc == 0);
1974 
1975 	poll_threads();
1976 	spdk_delay_us(1000);
1977 	poll_threads();
1978 
1979 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1980 
1981 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1982 	 * created because creating one nvme_bdev failed.
1983 	 */
1984 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1985 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1986 
1987 	g_ut_register_bdev_status = -EINVAL;
1988 	g_ut_attach_bdev_count = 0;
1989 
1990 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1991 			      attach_ctrlr_done, NULL, NULL, NULL, false);
1992 	CU_ASSERT(rc == 0);
1993 
1994 	spdk_delay_us(1000);
1995 	poll_threads();
1996 
1997 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1998 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1999 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2000 
2001 	CU_ASSERT(attached_names[0] == NULL);
2002 
2003 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2004 	CU_ASSERT(rc == 0);
2005 
2006 	poll_threads();
2007 	spdk_delay_us(1000);
2008 	poll_threads();
2009 
2010 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2011 
2012 	g_ut_register_bdev_status = 0;
2013 }
2014 
2015 static void
2016 test_aer_cb(void)
2017 {
2018 	struct spdk_nvme_transport_id trid = {};
2019 	struct spdk_nvme_ctrlr *ctrlr;
2020 	struct nvme_ctrlr *nvme_ctrlr;
2021 	struct nvme_bdev *bdev;
2022 	const int STRING_SIZE = 32;
2023 	const char *attached_names[STRING_SIZE];
2024 	union spdk_nvme_async_event_completion event = {};
2025 	struct spdk_nvme_cpl cpl = {};
2026 	int rc;
2027 
2028 	set_thread(0);
2029 
2030 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2031 	ut_init_trid(&trid);
2032 
2033 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2034 	 * namespaces are populated.
2035 	 */
2036 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2037 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2038 
2039 	ctrlr->ns[0].is_active = false;
2040 
2041 	g_ut_attach_ctrlr_status = 0;
2042 	g_ut_attach_bdev_count = 3;
2043 
2044 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2045 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2046 	CU_ASSERT(rc == 0);
2047 
2048 	spdk_delay_us(1000);
2049 	poll_threads();
2050 
2051 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2052 	poll_threads();
2053 
2054 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2055 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2056 
2057 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2058 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2059 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2060 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2061 
2062 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2063 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2064 	CU_ASSERT(bdev->disk.blockcnt == 1024);
2065 
2066 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2067 	 * change the size of the 4th namespace.
2068 	 */
2069 	ctrlr->ns[0].is_active = true;
2070 	ctrlr->ns[2].is_active = false;
2071 	ctrlr->nsdata[3].nsze = 2048;
2072 
2073 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2074 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2075 	cpl.cdw0 = event.raw;
2076 
2077 	aer_cb(nvme_ctrlr, &cpl);
2078 
2079 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2080 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2081 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2082 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2083 	CU_ASSERT(bdev->disk.blockcnt == 2048);
2084 
2085 	/* Change ANA state of active namespaces. */
2086 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2087 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2088 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2089 
2090 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2091 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2092 	cpl.cdw0 = event.raw;
2093 
2094 	aer_cb(nvme_ctrlr, &cpl);
2095 
2096 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2097 	poll_threads();
2098 
2099 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2100 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2101 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2102 
2103 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2104 	CU_ASSERT(rc == 0);
2105 
2106 	poll_threads();
2107 	spdk_delay_us(1000);
2108 	poll_threads();
2109 
2110 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2111 }
2112 
2113 static void
2114 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2115 			enum spdk_bdev_io_type io_type)
2116 {
2117 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2118 	struct nvme_io_path *io_path;
2119 	struct spdk_nvme_qpair *qpair;
2120 
2121 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2122 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2123 	qpair = io_path->qpair->qpair;
2124 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2125 
2126 	bdev_io->type = io_type;
2127 	bdev_io->internal.in_submit_request = true;
2128 
2129 	bdev_nvme_submit_request(ch, bdev_io);
2130 
2131 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2132 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2133 
2134 	poll_threads();
2135 
2136 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2137 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2138 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2139 }
2140 
2141 static void
2142 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2143 		   enum spdk_bdev_io_type io_type)
2144 {
2145 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2146 	struct nvme_io_path *io_path;
2147 	struct spdk_nvme_qpair *qpair;
2148 
2149 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2150 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2151 	qpair = io_path->qpair->qpair;
2152 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2153 
2154 	bdev_io->type = io_type;
2155 	bdev_io->internal.in_submit_request = true;
2156 
2157 	bdev_nvme_submit_request(ch, bdev_io);
2158 
2159 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2160 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2161 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2162 }
2163 
2164 static void
2165 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2166 {
2167 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2168 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2169 	struct ut_nvme_req *req;
2170 	struct nvme_io_path *io_path;
2171 	struct spdk_nvme_qpair *qpair;
2172 
2173 	io_path = bdev_nvme_find_io_path(nbdev_ch);
2174 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2175 	qpair = io_path->qpair->qpair;
2176 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2177 
2178 	/* Only compare and write now. */
2179 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2180 	bdev_io->internal.in_submit_request = true;
2181 
2182 	bdev_nvme_submit_request(ch, bdev_io);
2183 
2184 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2185 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2186 	CU_ASSERT(bio->first_fused_submitted == true);
2187 
2188 	/* First outstanding request is compare operation. */
2189 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2190 	SPDK_CU_ASSERT_FATAL(req != NULL);
2191 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2192 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2193 
2194 	poll_threads();
2195 
2196 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2197 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2198 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2199 }
2200 
2201 static void
2202 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2203 			 struct spdk_nvme_ctrlr *ctrlr)
2204 {
2205 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2206 	bdev_io->internal.in_submit_request = true;
2207 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2208 
2209 	bdev_nvme_submit_request(ch, bdev_io);
2210 
2211 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2212 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2213 
2214 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2215 	poll_thread_times(1, 1);
2216 
2217 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2218 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2219 
2220 	poll_thread_times(0, 1);
2221 
2222 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2223 }
2224 
2225 static void
2226 test_submit_nvme_cmd(void)
2227 {
2228 	struct spdk_nvme_transport_id trid = {};
2229 	struct spdk_nvme_ctrlr *ctrlr;
2230 	struct nvme_ctrlr *nvme_ctrlr;
2231 	const int STRING_SIZE = 32;
2232 	const char *attached_names[STRING_SIZE];
2233 	struct nvme_bdev *bdev;
2234 	struct spdk_bdev_io *bdev_io;
2235 	struct spdk_io_channel *ch;
2236 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2237 	int rc;
2238 
2239 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2240 	ut_init_trid(&trid);
2241 
2242 	set_thread(1);
2243 
2244 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2245 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2246 
2247 	g_ut_attach_ctrlr_status = 0;
2248 	g_ut_attach_bdev_count = 1;
2249 
2250 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2251 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2252 	CU_ASSERT(rc == 0);
2253 
2254 	spdk_delay_us(1000);
2255 	poll_threads();
2256 
2257 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2258 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2259 
2260 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2261 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2262 
2263 	set_thread(0);
2264 
2265 	ch = spdk_get_io_channel(bdev);
2266 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2267 
2268 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2269 
2270 	bdev_io->u.bdev.iovs = NULL;
2271 
2272 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2273 
2274 	ut_bdev_io_set_buf(bdev_io);
2275 
2276 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2277 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2278 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2279 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2280 
2281 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2282 
2283 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2284 
2285 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2286 	bdev_io->u.bdev.ext_opts = &ext_io_opts;
2287 	g_ut_readv_ext_called = false;
2288 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2289 	CU_ASSERT(g_ut_readv_ext_called == true);
2290 	g_ut_readv_ext_called = false;
2291 
2292 	g_ut_writev_ext_called = false;
2293 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2294 	CU_ASSERT(g_ut_writev_ext_called == true);
2295 	g_ut_writev_ext_called = false;
2296 	bdev_io->u.bdev.ext_opts = NULL;
2297 
2298 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2299 
2300 	free(bdev_io);
2301 
2302 	spdk_put_io_channel(ch);
2303 
2304 	poll_threads();
2305 
2306 	set_thread(1);
2307 
2308 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2309 	CU_ASSERT(rc == 0);
2310 
2311 	poll_threads();
2312 	spdk_delay_us(1000);
2313 	poll_threads();
2314 
2315 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2316 }
2317 
2318 static void
2319 test_add_remove_trid(void)
2320 {
2321 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2322 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2323 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2324 	const int STRING_SIZE = 32;
2325 	const char *attached_names[STRING_SIZE];
2326 	struct nvme_path_id *ctrid;
2327 	int rc;
2328 
2329 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2330 	ut_init_trid(&path1.trid);
2331 	ut_init_trid2(&path2.trid);
2332 	ut_init_trid3(&path3.trid);
2333 
2334 	set_thread(0);
2335 
2336 	g_ut_attach_ctrlr_status = 0;
2337 	g_ut_attach_bdev_count = 0;
2338 
2339 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2340 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2341 
2342 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2343 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2344 	CU_ASSERT(rc == 0);
2345 
2346 	spdk_delay_us(1000);
2347 	poll_threads();
2348 
2349 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2350 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2351 
2352 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2353 
2354 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2355 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2356 
2357 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2358 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2359 	CU_ASSERT(rc == 0);
2360 
2361 	spdk_delay_us(1000);
2362 	poll_threads();
2363 
2364 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2365 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2366 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2367 			break;
2368 		}
2369 	}
2370 	CU_ASSERT(ctrid != NULL);
2371 
2372 	/* trid3 is not in the registered list. */
2373 	rc = bdev_nvme_delete("nvme0", &path3);
2374 	CU_ASSERT(rc == -ENXIO);
2375 
2376 	/* trid2 is not used, and simply removed. */
2377 	rc = bdev_nvme_delete("nvme0", &path2);
2378 	CU_ASSERT(rc == 0);
2379 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2380 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2381 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2382 	}
2383 
2384 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2385 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2386 
2387 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2388 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2389 	CU_ASSERT(rc == 0);
2390 
2391 	spdk_delay_us(1000);
2392 	poll_threads();
2393 
2394 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2395 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2396 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2397 			break;
2398 		}
2399 	}
2400 	CU_ASSERT(ctrid != NULL);
2401 
2402 	/* path1 is currently used and path3 is an alternative path.
2403 	 * If we remove path1, path is changed to path3.
2404 	 */
2405 	rc = bdev_nvme_delete("nvme0", &path1);
2406 	CU_ASSERT(rc == 0);
2407 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2408 	CU_ASSERT(nvme_ctrlr->resetting == true);
2409 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2410 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2411 	}
2412 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2413 
2414 	poll_threads();
2415 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2416 	poll_threads();
2417 
2418 	CU_ASSERT(nvme_ctrlr->resetting == false);
2419 
2420 	/* path3 is the current and only path. If we remove path3, the corresponding
2421 	 * nvme_ctrlr is removed.
2422 	 */
2423 	rc = bdev_nvme_delete("nvme0", &path3);
2424 	CU_ASSERT(rc == 0);
2425 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2426 
2427 	poll_threads();
2428 	spdk_delay_us(1000);
2429 	poll_threads();
2430 
2431 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2432 
2433 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2434 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2435 
2436 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2437 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2438 	CU_ASSERT(rc == 0);
2439 
2440 	spdk_delay_us(1000);
2441 	poll_threads();
2442 
2443 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2444 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2445 
2446 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2447 
2448 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2449 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2450 
2451 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2452 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2453 	CU_ASSERT(rc == 0);
2454 
2455 	spdk_delay_us(1000);
2456 	poll_threads();
2457 
2458 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2459 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2460 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2461 			break;
2462 		}
2463 	}
2464 	CU_ASSERT(ctrid != NULL);
2465 
2466 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2467 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2468 	CU_ASSERT(rc == 0);
2469 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2470 
2471 	poll_threads();
2472 	spdk_delay_us(1000);
2473 	poll_threads();
2474 
2475 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2476 }
2477 
2478 static void
2479 test_abort(void)
2480 {
2481 	struct spdk_nvme_transport_id trid = {};
2482 	struct nvme_ctrlr_opts opts = {};
2483 	struct spdk_nvme_ctrlr *ctrlr;
2484 	struct nvme_ctrlr *nvme_ctrlr;
2485 	const int STRING_SIZE = 32;
2486 	const char *attached_names[STRING_SIZE];
2487 	struct nvme_bdev *bdev;
2488 	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2489 	struct spdk_io_channel *ch1, *ch2;
2490 	struct nvme_bdev_channel *nbdev_ch1;
2491 	struct nvme_io_path *io_path1;
2492 	struct nvme_qpair *nvme_qpair1;
2493 	int rc;
2494 
2495 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2496 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2497 	 * are submitted on thread 1. Both should succeed.
2498 	 */
2499 
2500 	ut_init_trid(&trid);
2501 
2502 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2503 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2504 
2505 	g_ut_attach_ctrlr_status = 0;
2506 	g_ut_attach_bdev_count = 1;
2507 
2508 	set_thread(1);
2509 
2510 	opts.ctrlr_loss_timeout_sec = -1;
2511 	opts.reconnect_delay_sec = 1;
2512 
2513 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2514 			      attach_ctrlr_done, NULL, NULL, &opts, false);
2515 	CU_ASSERT(rc == 0);
2516 
2517 	spdk_delay_us(1000);
2518 	poll_threads();
2519 
2520 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2521 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2522 
2523 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2524 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2525 
2526 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2527 	ut_bdev_io_set_buf(write_io);
2528 
2529 	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2530 	ut_bdev_io_set_buf(fuse_io);
2531 
2532 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2533 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2534 
2535 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2536 
2537 	set_thread(0);
2538 
2539 	ch1 = spdk_get_io_channel(bdev);
2540 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2541 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2542 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2543 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2544 	nvme_qpair1 = io_path1->qpair;
2545 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2546 
2547 	set_thread(1);
2548 
2549 	ch2 = spdk_get_io_channel(bdev);
2550 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2551 
2552 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2553 	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2554 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2555 
2556 	/* Aborting the already completed request should fail. */
2557 	write_io->internal.in_submit_request = true;
2558 	bdev_nvme_submit_request(ch1, write_io);
2559 	poll_threads();
2560 
2561 	CU_ASSERT(write_io->internal.in_submit_request == false);
2562 
2563 	abort_io->u.abort.bio_to_abort = write_io;
2564 	abort_io->internal.in_submit_request = true;
2565 
2566 	bdev_nvme_submit_request(ch1, abort_io);
2567 
2568 	poll_threads();
2569 
2570 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2571 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2572 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2573 
2574 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2575 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2576 
2577 	admin_io->internal.in_submit_request = true;
2578 	bdev_nvme_submit_request(ch1, admin_io);
2579 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2580 	poll_threads();
2581 
2582 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2583 
2584 	abort_io->u.abort.bio_to_abort = admin_io;
2585 	abort_io->internal.in_submit_request = true;
2586 
2587 	bdev_nvme_submit_request(ch2, abort_io);
2588 
2589 	poll_threads();
2590 
2591 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2592 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2593 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2594 
2595 	/* Aborting the write request should succeed. */
2596 	write_io->internal.in_submit_request = true;
2597 	bdev_nvme_submit_request(ch1, write_io);
2598 
2599 	CU_ASSERT(write_io->internal.in_submit_request == true);
2600 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2601 
2602 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2603 	abort_io->u.abort.bio_to_abort = write_io;
2604 	abort_io->internal.in_submit_request = true;
2605 
2606 	bdev_nvme_submit_request(ch1, abort_io);
2607 
2608 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2609 	poll_threads();
2610 
2611 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2612 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2613 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2614 	CU_ASSERT(write_io->internal.in_submit_request == false);
2615 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2616 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2617 
2618 	/* Aborting the fuse request should succeed. */
2619 	fuse_io->internal.in_submit_request = true;
2620 	bdev_nvme_submit_request(ch1, fuse_io);
2621 
2622 	CU_ASSERT(fuse_io->internal.in_submit_request == true);
2623 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2624 
2625 	abort_io->u.abort.bio_to_abort = fuse_io;
2626 	abort_io->internal.in_submit_request = true;
2627 
2628 	bdev_nvme_submit_request(ch1, abort_io);
2629 
2630 	spdk_delay_us(10000);
2631 	poll_threads();
2632 
2633 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2634 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2635 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2636 	CU_ASSERT(fuse_io->internal.in_submit_request == false);
2637 	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2638 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2639 
2640 	/* Aborting the admin request should succeed. */
2641 	admin_io->internal.in_submit_request = true;
2642 	bdev_nvme_submit_request(ch1, admin_io);
2643 
2644 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2645 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2646 
2647 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2648 	abort_io->u.abort.bio_to_abort = admin_io;
2649 	abort_io->internal.in_submit_request = true;
2650 
2651 	bdev_nvme_submit_request(ch2, abort_io);
2652 
2653 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2654 	poll_threads();
2655 
2656 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2657 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2658 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2659 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2660 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2661 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2662 
2663 	set_thread(0);
2664 
2665 	/* If qpair is disconnected, it is freed and then reconnected via resetting
2666 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2667 	 * while resetting the nvme_ctrlr.
2668 	 */
2669 	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2670 
2671 	poll_thread_times(0, 3);
2672 
2673 	CU_ASSERT(nvme_qpair1->qpair == NULL);
2674 	CU_ASSERT(nvme_ctrlr->resetting == true);
2675 
2676 	write_io->internal.in_submit_request = true;
2677 
2678 	bdev_nvme_submit_request(ch1, write_io);
2679 
2680 	CU_ASSERT(write_io->internal.in_submit_request == true);
2681 	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2682 
2683 	/* Aborting the queued write request should succeed immediately. */
2684 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2685 	abort_io->u.abort.bio_to_abort = write_io;
2686 	abort_io->internal.in_submit_request = true;
2687 
2688 	bdev_nvme_submit_request(ch1, abort_io);
2689 
2690 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2691 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2692 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2693 	CU_ASSERT(write_io->internal.in_submit_request == false);
2694 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2695 
2696 	poll_threads();
2697 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2698 	poll_threads();
2699 
2700 	spdk_put_io_channel(ch1);
2701 
2702 	set_thread(1);
2703 
2704 	spdk_put_io_channel(ch2);
2705 
2706 	poll_threads();
2707 
2708 	free(write_io);
2709 	free(fuse_io);
2710 	free(admin_io);
2711 	free(abort_io);
2712 
2713 	set_thread(1);
2714 
2715 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2716 	CU_ASSERT(rc == 0);
2717 
2718 	poll_threads();
2719 	spdk_delay_us(1000);
2720 	poll_threads();
2721 
2722 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2723 }
2724 
2725 static void
2726 test_get_io_qpair(void)
2727 {
2728 	struct spdk_nvme_transport_id trid = {};
2729 	struct spdk_nvme_ctrlr ctrlr = {};
2730 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2731 	struct spdk_io_channel *ch;
2732 	struct nvme_ctrlr_channel *ctrlr_ch;
2733 	struct spdk_nvme_qpair *qpair;
2734 	int rc;
2735 
2736 	ut_init_trid(&trid);
2737 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2738 
2739 	set_thread(0);
2740 
2741 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2742 	CU_ASSERT(rc == 0);
2743 
2744 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2745 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2746 
2747 	ch = spdk_get_io_channel(nvme_ctrlr);
2748 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2749 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2750 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2751 
2752 	qpair = bdev_nvme_get_io_qpair(ch);
2753 	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2754 
2755 	spdk_put_io_channel(ch);
2756 
2757 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2758 	CU_ASSERT(rc == 0);
2759 
2760 	poll_threads();
2761 	spdk_delay_us(1000);
2762 	poll_threads();
2763 
2764 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2765 }
2766 
2767 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2768  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2769  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2770  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2771  */
2772 static void
2773 test_bdev_unregister(void)
2774 {
2775 	struct spdk_nvme_transport_id trid = {};
2776 	struct spdk_nvme_ctrlr *ctrlr;
2777 	struct nvme_ctrlr *nvme_ctrlr;
2778 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2779 	const int STRING_SIZE = 32;
2780 	const char *attached_names[STRING_SIZE];
2781 	struct nvme_bdev *bdev1, *bdev2;
2782 	int rc;
2783 
2784 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2785 	ut_init_trid(&trid);
2786 
2787 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2788 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2789 
2790 	g_ut_attach_ctrlr_status = 0;
2791 	g_ut_attach_bdev_count = 2;
2792 
2793 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2794 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2795 	CU_ASSERT(rc == 0);
2796 
2797 	spdk_delay_us(1000);
2798 	poll_threads();
2799 
2800 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2801 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2802 
2803 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2804 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2805 
2806 	bdev1 = nvme_ns1->bdev;
2807 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2808 
2809 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2810 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2811 
2812 	bdev2 = nvme_ns2->bdev;
2813 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2814 
2815 	bdev_nvme_destruct(&bdev1->disk);
2816 	bdev_nvme_destruct(&bdev2->disk);
2817 
2818 	poll_threads();
2819 
2820 	CU_ASSERT(nvme_ns1->bdev == NULL);
2821 	CU_ASSERT(nvme_ns2->bdev == NULL);
2822 
2823 	nvme_ctrlr->destruct = true;
2824 	_nvme_ctrlr_destruct(nvme_ctrlr);
2825 
2826 	poll_threads();
2827 	spdk_delay_us(1000);
2828 	poll_threads();
2829 
2830 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2831 }
2832 
2833 static void
2834 test_compare_ns(void)
2835 {
2836 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2837 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2838 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2839 	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
2840 	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
2841 
2842 	/* No IDs are defined. */
2843 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2844 
2845 	/* Only EUI64 are defined and not matched. */
2846 	nsdata1.eui64 = 0xABCDEF0123456789;
2847 	nsdata2.eui64 = 0xBBCDEF0123456789;
2848 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2849 
2850 	/* Only EUI64 are defined and matched. */
2851 	nsdata2.eui64 = 0xABCDEF0123456789;
2852 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2853 
2854 	/* Only NGUID are defined and not matched. */
2855 	nsdata1.eui64 = 0x0;
2856 	nsdata2.eui64 = 0x0;
2857 	nsdata1.nguid[0] = 0x12;
2858 	nsdata2.nguid[0] = 0x10;
2859 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2860 
2861 	/* Only NGUID are defined and matched. */
2862 	nsdata2.nguid[0] = 0x12;
2863 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2864 
2865 	/* Only UUID are defined and not matched. */
2866 	nsdata1.nguid[0] = 0x0;
2867 	nsdata2.nguid[0] = 0x0;
2868 	ns1.uuid = &uuid1;
2869 	ns2.uuid = &uuid2;
2870 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2871 
2872 	/* Only one UUID is defined. */
2873 	ns1.uuid = NULL;
2874 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2875 
2876 	/* Only UUID are defined and matched. */
2877 	ns1.uuid = &uuid2;
2878 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2879 
2880 	/* All EUI64, NGUID, and UUID are defined and matched. */
2881 	nsdata1.eui64 = 0x123456789ABCDEF;
2882 	nsdata2.eui64 = 0x123456789ABCDEF;
2883 	nsdata1.nguid[15] = 0x34;
2884 	nsdata2.nguid[15] = 0x34;
2885 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2886 
2887 	/* CSI are not matched. */
2888 	ns1.csi = SPDK_NVME_CSI_ZNS;
2889 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2890 }
2891 
2892 static void
2893 test_init_ana_log_page(void)
2894 {
2895 	struct spdk_nvme_transport_id trid = {};
2896 	struct spdk_nvme_ctrlr *ctrlr;
2897 	struct nvme_ctrlr *nvme_ctrlr;
2898 	const int STRING_SIZE = 32;
2899 	const char *attached_names[STRING_SIZE];
2900 	int rc;
2901 
2902 	set_thread(0);
2903 
2904 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2905 	ut_init_trid(&trid);
2906 
2907 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
2908 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2909 
2910 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2911 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2912 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2913 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2914 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2915 
2916 	g_ut_attach_ctrlr_status = 0;
2917 	g_ut_attach_bdev_count = 5;
2918 
2919 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2920 			      attach_ctrlr_done, NULL, NULL, NULL, false);
2921 	CU_ASSERT(rc == 0);
2922 
2923 	spdk_delay_us(1000);
2924 	poll_threads();
2925 
2926 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2927 	poll_threads();
2928 
2929 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2930 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2931 
2932 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2933 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2934 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2935 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2936 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
2937 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2938 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2939 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2940 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2941 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2942 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
2943 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
2944 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
2945 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
2946 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
2947 
2948 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2949 	CU_ASSERT(rc == 0);
2950 
2951 	poll_threads();
2952 	spdk_delay_us(1000);
2953 	poll_threads();
2954 
2955 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2956 }
2957 
2958 static void
2959 init_accel(void)
2960 {
2961 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2962 				sizeof(int), "accel_p");
2963 }
2964 
2965 static void
2966 fini_accel(void)
2967 {
2968 	spdk_io_device_unregister(g_accel_p, NULL);
2969 }
2970 
2971 static void
2972 test_get_memory_domains(void)
2973 {
2974 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
2975 	struct nvme_ns ns = { .ctrlr = &ctrlr };
2976 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
2977 	struct spdk_memory_domain *domains[2] = {};
2978 	int rc = 0;
2979 
2980 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq);
2981 
2982 	/* nvme controller doesn't have memory domainÑ‹ */
2983 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0);
2984 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2985 	CU_ASSERT(rc == 0)
2986 
2987 	/* nvme controller has a memory domain */
2988 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1);
2989 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2990 	CU_ASSERT(rc == 1);
2991 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
2992 }
2993 
2994 static void
2995 test_reconnect_qpair(void)
2996 {
2997 	struct spdk_nvme_transport_id trid = {};
2998 	struct spdk_nvme_ctrlr *ctrlr;
2999 	struct nvme_ctrlr *nvme_ctrlr;
3000 	const int STRING_SIZE = 32;
3001 	const char *attached_names[STRING_SIZE];
3002 	struct nvme_bdev *bdev;
3003 	struct spdk_io_channel *ch1, *ch2;
3004 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3005 	struct nvme_io_path *io_path1, *io_path2;
3006 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3007 	int rc;
3008 
3009 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3010 	ut_init_trid(&trid);
3011 
3012 	set_thread(0);
3013 
3014 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3015 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3016 
3017 	g_ut_attach_ctrlr_status = 0;
3018 	g_ut_attach_bdev_count = 1;
3019 
3020 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3021 			      attach_ctrlr_done, NULL, NULL, NULL, false);
3022 	CU_ASSERT(rc == 0);
3023 
3024 	spdk_delay_us(1000);
3025 	poll_threads();
3026 
3027 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3028 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3029 
3030 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3031 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3032 
3033 	ch1 = spdk_get_io_channel(bdev);
3034 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3035 
3036 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3037 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3038 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3039 	nvme_qpair1 = io_path1->qpair;
3040 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3041 
3042 	set_thread(1);
3043 
3044 	ch2 = spdk_get_io_channel(bdev);
3045 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3046 
3047 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3048 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3049 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3050 	nvme_qpair2 = io_path2->qpair;
3051 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3052 
3053 	/* If a qpair is disconnected, it is freed and then reconnected via
3054 	 * resetting the corresponding nvme_ctrlr.
3055 	 */
3056 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3057 	ctrlr->is_failed = true;
3058 
3059 	poll_thread_times(1, 3);
3060 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3061 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3062 	CU_ASSERT(nvme_ctrlr->resetting == true);
3063 
3064 	poll_thread_times(0, 3);
3065 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3066 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3067 	CU_ASSERT(ctrlr->is_failed == true);
3068 
3069 	poll_thread_times(1, 2);
3070 	poll_thread_times(0, 1);
3071 	CU_ASSERT(ctrlr->is_failed == false);
3072 	CU_ASSERT(ctrlr->adminq.is_connected == false);
3073 
3074 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3075 	poll_thread_times(0, 2);
3076 	CU_ASSERT(ctrlr->adminq.is_connected == true);
3077 
3078 	poll_thread_times(0, 1);
3079 	poll_thread_times(1, 1);
3080 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3081 	CU_ASSERT(nvme_qpair2->qpair != NULL);
3082 	CU_ASSERT(nvme_ctrlr->resetting == true);
3083 
3084 	poll_thread_times(0, 2);
3085 	poll_thread_times(1, 1);
3086 	poll_thread_times(0, 1);
3087 	CU_ASSERT(nvme_ctrlr->resetting == false);
3088 
3089 	poll_threads();
3090 
3091 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3092 	 * fails, the qpair is just freed.
3093 	 */
3094 	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3095 	ctrlr->is_failed = true;
3096 	ctrlr->fail_reset = true;
3097 
3098 	poll_thread_times(1, 3);
3099 	CU_ASSERT(nvme_qpair1->qpair != NULL);
3100 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3101 	CU_ASSERT(nvme_ctrlr->resetting == true);
3102 
3103 	poll_thread_times(0, 3);
3104 	poll_thread_times(1, 1);
3105 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3106 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3107 	CU_ASSERT(ctrlr->is_failed == true);
3108 
3109 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3110 	poll_thread_times(0, 3);
3111 	poll_thread_times(1, 1);
3112 	poll_thread_times(0, 1);
3113 	CU_ASSERT(ctrlr->is_failed == true);
3114 	CU_ASSERT(nvme_ctrlr->resetting == false);
3115 	CU_ASSERT(nvme_qpair1->qpair == NULL);
3116 	CU_ASSERT(nvme_qpair2->qpair == NULL);
3117 
3118 	poll_threads();
3119 
3120 	spdk_put_io_channel(ch2);
3121 
3122 	set_thread(0);
3123 
3124 	spdk_put_io_channel(ch1);
3125 
3126 	poll_threads();
3127 
3128 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3129 	CU_ASSERT(rc == 0);
3130 
3131 	poll_threads();
3132 	spdk_delay_us(1000);
3133 	poll_threads();
3134 
3135 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3136 }
3137 
3138 static void
3139 test_create_bdev_ctrlr(void)
3140 {
3141 	struct nvme_path_id path1 = {}, path2 = {};
3142 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3143 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3144 	const int STRING_SIZE = 32;
3145 	const char *attached_names[STRING_SIZE];
3146 	int rc;
3147 
3148 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3149 	ut_init_trid(&path1.trid);
3150 	ut_init_trid2(&path2.trid);
3151 
3152 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3153 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3154 
3155 	g_ut_attach_ctrlr_status = 0;
3156 	g_ut_attach_bdev_count = 0;
3157 
3158 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3159 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3160 	CU_ASSERT(rc == 0);
3161 
3162 	spdk_delay_us(1000);
3163 	poll_threads();
3164 
3165 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3166 	poll_threads();
3167 
3168 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3169 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3170 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3171 
3172 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3173 	g_ut_attach_ctrlr_status = -EINVAL;
3174 
3175 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3176 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3177 
3178 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3179 
3180 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3181 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3182 	CU_ASSERT(rc == 0);
3183 
3184 	spdk_delay_us(1000);
3185 	poll_threads();
3186 
3187 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3188 	poll_threads();
3189 
3190 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3191 
3192 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3193 	g_ut_attach_ctrlr_status = 0;
3194 
3195 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3196 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3197 
3198 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3199 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3200 	CU_ASSERT(rc == 0);
3201 
3202 	spdk_delay_us(1000);
3203 	poll_threads();
3204 
3205 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3206 	poll_threads();
3207 
3208 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3209 
3210 	/* Delete two ctrlrs at once. */
3211 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3212 	CU_ASSERT(rc == 0);
3213 
3214 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3215 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3216 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3217 
3218 	poll_threads();
3219 	spdk_delay_us(1000);
3220 	poll_threads();
3221 
3222 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3223 
3224 	/* Add two ctrlrs and delete one by one. */
3225 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3226 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3227 
3228 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3229 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3230 
3231 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3232 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3233 	CU_ASSERT(rc == 0);
3234 
3235 	spdk_delay_us(1000);
3236 	poll_threads();
3237 
3238 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3239 	poll_threads();
3240 
3241 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3242 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3243 	CU_ASSERT(rc == 0);
3244 
3245 	spdk_delay_us(1000);
3246 	poll_threads();
3247 
3248 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3249 	poll_threads();
3250 
3251 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3252 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3253 
3254 	rc = bdev_nvme_delete("nvme0", &path1);
3255 	CU_ASSERT(rc == 0);
3256 
3257 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3258 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3259 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3260 
3261 	poll_threads();
3262 	spdk_delay_us(1000);
3263 	poll_threads();
3264 
3265 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3266 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3267 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3268 
3269 	rc = bdev_nvme_delete("nvme0", &path2);
3270 	CU_ASSERT(rc == 0);
3271 
3272 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3273 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3274 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3275 
3276 	poll_threads();
3277 	spdk_delay_us(1000);
3278 	poll_threads();
3279 
3280 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3281 }
3282 
3283 static struct nvme_ns *
3284 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3285 {
3286 	struct nvme_ns *nvme_ns;
3287 
3288 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3289 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3290 			return nvme_ns;
3291 		}
3292 	}
3293 
3294 	return NULL;
3295 }
3296 
3297 static void
3298 test_add_multi_ns_to_bdev(void)
3299 {
3300 	struct nvme_path_id path1 = {}, path2 = {};
3301 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3302 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3303 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3304 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3305 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3306 	const int STRING_SIZE = 32;
3307 	const char *attached_names[STRING_SIZE];
3308 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3309 	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3310 	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3311 	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3312 	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3313 	int rc;
3314 
3315 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3316 	ut_init_trid(&path1.trid);
3317 	ut_init_trid2(&path2.trid);
3318 
3319 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3320 
3321 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3322 	 * namespaces are populated.
3323 	 */
3324 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3325 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3326 
3327 	ctrlr1->ns[1].is_active = false;
3328 	ctrlr1->ns[4].is_active = false;
3329 	ctrlr1->ns[0].uuid = &uuid1;
3330 	ctrlr1->ns[2].uuid = &uuid3;
3331 	ctrlr1->ns[3].uuid = &uuid4;
3332 
3333 	g_ut_attach_ctrlr_status = 0;
3334 	g_ut_attach_bdev_count = 3;
3335 
3336 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3337 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3338 	CU_ASSERT(rc == 0);
3339 
3340 	spdk_delay_us(1000);
3341 	poll_threads();
3342 
3343 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3344 	poll_threads();
3345 
3346 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3347 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3348 	 * adding 4th namespace to a bdev should fail.
3349 	 */
3350 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3351 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3352 
3353 	ctrlr2->ns[2].is_active = false;
3354 	ctrlr2->ns[4].is_active = false;
3355 	ctrlr2->ns[0].uuid = &uuid1;
3356 	ctrlr2->ns[1].uuid = &uuid2;
3357 	ctrlr2->ns[3].uuid = &uuid44;
3358 
3359 	g_ut_attach_ctrlr_status = 0;
3360 	g_ut_attach_bdev_count = 2;
3361 
3362 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3363 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3364 	CU_ASSERT(rc == 0);
3365 
3366 	spdk_delay_us(1000);
3367 	poll_threads();
3368 
3369 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3370 	poll_threads();
3371 
3372 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3373 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3374 
3375 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3376 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3377 
3378 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3379 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3380 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3381 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3382 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3383 
3384 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3385 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3386 
3387 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3388 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3389 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3390 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3391 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3392 
3393 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3394 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3395 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3396 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3397 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3398 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3399 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3400 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3401 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3402 
3403 	CU_ASSERT(bdev1->ref == 2);
3404 	CU_ASSERT(bdev2->ref == 1);
3405 	CU_ASSERT(bdev3->ref == 1);
3406 	CU_ASSERT(bdev4->ref == 1);
3407 
3408 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3409 	rc = bdev_nvme_delete("nvme0", &path1);
3410 	CU_ASSERT(rc == 0);
3411 
3412 	poll_threads();
3413 	spdk_delay_us(1000);
3414 	poll_threads();
3415 
3416 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3417 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3418 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3419 
3420 	rc = bdev_nvme_delete("nvme0", &path2);
3421 	CU_ASSERT(rc == 0);
3422 
3423 	poll_threads();
3424 	spdk_delay_us(1000);
3425 	poll_threads();
3426 
3427 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3428 
3429 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3430 	 * can be deleted when the bdev subsystem shutdown.
3431 	 */
3432 	g_ut_attach_bdev_count = 1;
3433 
3434 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3435 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3436 
3437 	ctrlr1->ns[0].uuid = &uuid1;
3438 
3439 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3440 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3441 	CU_ASSERT(rc == 0);
3442 
3443 	spdk_delay_us(1000);
3444 	poll_threads();
3445 
3446 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3447 	poll_threads();
3448 
3449 	ut_init_trid2(&path2.trid);
3450 
3451 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3452 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3453 
3454 	ctrlr2->ns[0].uuid = &uuid1;
3455 
3456 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3457 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3458 	CU_ASSERT(rc == 0);
3459 
3460 	spdk_delay_us(1000);
3461 	poll_threads();
3462 
3463 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3464 	poll_threads();
3465 
3466 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3467 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3468 
3469 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3470 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3471 
3472 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3473 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3474 
3475 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3476 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3477 
3478 	/* Check if a nvme_bdev has two nvme_ns. */
3479 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3480 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3481 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3482 
3483 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3484 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3485 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3486 
3487 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3488 	bdev_nvme_destruct(&bdev1->disk);
3489 
3490 	poll_threads();
3491 
3492 	CU_ASSERT(nvme_ns1->bdev == NULL);
3493 	CU_ASSERT(nvme_ns2->bdev == NULL);
3494 
3495 	nvme_ctrlr1->destruct = true;
3496 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3497 
3498 	poll_threads();
3499 	spdk_delay_us(1000);
3500 	poll_threads();
3501 
3502 	nvme_ctrlr2->destruct = true;
3503 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3504 
3505 	poll_threads();
3506 	spdk_delay_us(1000);
3507 	poll_threads();
3508 
3509 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3510 }
3511 
3512 static void
3513 test_add_multi_io_paths_to_nbdev_ch(void)
3514 {
3515 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3516 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3517 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3518 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3519 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3520 	const int STRING_SIZE = 32;
3521 	const char *attached_names[STRING_SIZE];
3522 	struct nvme_bdev *bdev;
3523 	struct spdk_io_channel *ch;
3524 	struct nvme_bdev_channel *nbdev_ch;
3525 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3526 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3527 	int rc;
3528 
3529 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3530 	ut_init_trid(&path1.trid);
3531 	ut_init_trid2(&path2.trid);
3532 	ut_init_trid3(&path3.trid);
3533 	g_ut_attach_ctrlr_status = 0;
3534 	g_ut_attach_bdev_count = 1;
3535 
3536 	set_thread(1);
3537 
3538 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3539 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3540 
3541 	ctrlr1->ns[0].uuid = &uuid1;
3542 
3543 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3544 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3545 	CU_ASSERT(rc == 0);
3546 
3547 	spdk_delay_us(1000);
3548 	poll_threads();
3549 
3550 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3551 	poll_threads();
3552 
3553 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3554 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3555 
3556 	ctrlr2->ns[0].uuid = &uuid1;
3557 
3558 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3559 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3560 	CU_ASSERT(rc == 0);
3561 
3562 	spdk_delay_us(1000);
3563 	poll_threads();
3564 
3565 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3566 	poll_threads();
3567 
3568 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3569 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3570 
3571 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3572 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3573 
3574 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3575 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3576 
3577 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3578 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3579 
3580 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3581 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3582 
3583 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3584 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3585 
3586 	set_thread(0);
3587 
3588 	ch = spdk_get_io_channel(bdev);
3589 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3590 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3591 
3592 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3593 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3594 
3595 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3596 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3597 
3598 	set_thread(1);
3599 
3600 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3601 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3602 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3603 
3604 	ctrlr3->ns[0].uuid = &uuid1;
3605 
3606 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3607 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3608 	CU_ASSERT(rc == 0);
3609 
3610 	spdk_delay_us(1000);
3611 	poll_threads();
3612 
3613 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3614 	poll_threads();
3615 
3616 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3617 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3618 
3619 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3620 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3621 
3622 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3623 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3624 
3625 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3626 	rc = bdev_nvme_delete("nvme0", &path2);
3627 	CU_ASSERT(rc == 0);
3628 
3629 	poll_threads();
3630 	spdk_delay_us(1000);
3631 	poll_threads();
3632 
3633 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3634 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3635 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3636 
3637 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3638 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3639 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3640 
3641 	set_thread(0);
3642 
3643 	spdk_put_io_channel(ch);
3644 
3645 	poll_threads();
3646 
3647 	set_thread(1);
3648 
3649 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3650 	CU_ASSERT(rc == 0);
3651 
3652 	poll_threads();
3653 	spdk_delay_us(1000);
3654 	poll_threads();
3655 
3656 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3657 }
3658 
3659 static void
3660 test_admin_path(void)
3661 {
3662 	struct nvme_path_id path1 = {}, path2 = {};
3663 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3664 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3665 	const int STRING_SIZE = 32;
3666 	const char *attached_names[STRING_SIZE];
3667 	struct nvme_bdev *bdev;
3668 	struct spdk_io_channel *ch;
3669 	struct spdk_bdev_io *bdev_io;
3670 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3671 	int rc;
3672 
3673 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3674 	ut_init_trid(&path1.trid);
3675 	ut_init_trid2(&path2.trid);
3676 	g_ut_attach_ctrlr_status = 0;
3677 	g_ut_attach_bdev_count = 1;
3678 
3679 	set_thread(0);
3680 
3681 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3682 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3683 
3684 	ctrlr1->ns[0].uuid = &uuid1;
3685 
3686 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3687 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3688 	CU_ASSERT(rc == 0);
3689 
3690 	spdk_delay_us(1000);
3691 	poll_threads();
3692 
3693 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3694 	poll_threads();
3695 
3696 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3697 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3698 
3699 	ctrlr2->ns[0].uuid = &uuid1;
3700 
3701 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3702 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3703 	CU_ASSERT(rc == 0);
3704 
3705 	spdk_delay_us(1000);
3706 	poll_threads();
3707 
3708 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3709 	poll_threads();
3710 
3711 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3712 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3713 
3714 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3715 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3716 
3717 	ch = spdk_get_io_channel(bdev);
3718 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3719 
3720 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3721 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3722 
3723 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3724 	 * submitted to ctrlr2.
3725 	 */
3726 	ctrlr1->is_failed = true;
3727 	bdev_io->internal.in_submit_request = true;
3728 
3729 	bdev_nvme_submit_request(ch, bdev_io);
3730 
3731 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3732 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3733 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3734 
3735 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3736 	poll_threads();
3737 
3738 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3739 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3740 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3741 
3742 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3743 	ctrlr2->is_failed = true;
3744 	bdev_io->internal.in_submit_request = true;
3745 
3746 	bdev_nvme_submit_request(ch, bdev_io);
3747 
3748 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3749 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3750 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3751 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3752 
3753 	free(bdev_io);
3754 
3755 	spdk_put_io_channel(ch);
3756 
3757 	poll_threads();
3758 
3759 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3760 	CU_ASSERT(rc == 0);
3761 
3762 	poll_threads();
3763 	spdk_delay_us(1000);
3764 	poll_threads();
3765 
3766 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3767 }
3768 
3769 static struct nvme_io_path *
3770 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3771 			struct nvme_ctrlr *nvme_ctrlr)
3772 {
3773 	struct nvme_io_path *io_path;
3774 
3775 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3776 		if (io_path->qpair->ctrlr == nvme_ctrlr) {
3777 			return io_path;
3778 		}
3779 	}
3780 
3781 	return NULL;
3782 }
3783 
3784 static void
3785 test_reset_bdev_ctrlr(void)
3786 {
3787 	struct nvme_path_id path1 = {}, path2 = {};
3788 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3789 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3790 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3791 	struct nvme_path_id *curr_path1, *curr_path2;
3792 	const int STRING_SIZE = 32;
3793 	const char *attached_names[STRING_SIZE];
3794 	struct nvme_bdev *bdev;
3795 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3796 	struct nvme_bdev_io *first_bio;
3797 	struct spdk_io_channel *ch1, *ch2;
3798 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3799 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3800 	int rc;
3801 
3802 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3803 	ut_init_trid(&path1.trid);
3804 	ut_init_trid2(&path2.trid);
3805 	g_ut_attach_ctrlr_status = 0;
3806 	g_ut_attach_bdev_count = 1;
3807 
3808 	set_thread(0);
3809 
3810 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3811 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3812 
3813 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3814 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3815 	CU_ASSERT(rc == 0);
3816 
3817 	spdk_delay_us(1000);
3818 	poll_threads();
3819 
3820 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3821 	poll_threads();
3822 
3823 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3824 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3825 
3826 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3827 			      attach_ctrlr_done, NULL, NULL, NULL, true);
3828 	CU_ASSERT(rc == 0);
3829 
3830 	spdk_delay_us(1000);
3831 	poll_threads();
3832 
3833 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3834 	poll_threads();
3835 
3836 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3837 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3838 
3839 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3840 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3841 
3842 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
3843 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
3844 
3845 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3846 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3847 
3848 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
3849 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
3850 
3851 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3852 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3853 
3854 	set_thread(0);
3855 
3856 	ch1 = spdk_get_io_channel(bdev);
3857 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3858 
3859 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3860 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
3861 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
3862 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
3863 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
3864 
3865 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
3866 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
3867 
3868 	set_thread(1);
3869 
3870 	ch2 = spdk_get_io_channel(bdev);
3871 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3872 
3873 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3874 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
3875 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
3876 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
3877 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
3878 
3879 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
3880 
3881 	/* The first reset request from bdev_io is submitted on thread 0.
3882 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
3883 	 *
3884 	 * A few extra polls are necessary after resetting ctrlr1 to check
3885 	 * pending reset requests for ctrlr1.
3886 	 */
3887 	ctrlr1->is_failed = true;
3888 	curr_path1->is_failed = true;
3889 	ctrlr2->is_failed = true;
3890 	curr_path2->is_failed = true;
3891 
3892 	set_thread(0);
3893 
3894 	bdev_nvme_submit_request(ch1, first_bdev_io);
3895 	CU_ASSERT(first_bio->io_path == io_path11);
3896 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3897 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3898 
3899 	poll_thread_times(0, 3);
3900 	CU_ASSERT(io_path11->qpair->qpair == NULL);
3901 	CU_ASSERT(io_path21->qpair->qpair != NULL);
3902 
3903 	poll_thread_times(1, 2);
3904 	CU_ASSERT(io_path11->qpair->qpair == NULL);
3905 	CU_ASSERT(io_path21->qpair->qpair == NULL);
3906 	CU_ASSERT(ctrlr1->is_failed == true);
3907 
3908 	poll_thread_times(0, 1);
3909 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3910 	CU_ASSERT(ctrlr1->is_failed == false);
3911 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
3912 	CU_ASSERT(curr_path1->is_failed == true);
3913 
3914 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3915 	poll_thread_times(0, 2);
3916 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
3917 
3918 	poll_thread_times(0, 1);
3919 	CU_ASSERT(io_path11->qpair->qpair != NULL);
3920 	CU_ASSERT(io_path21->qpair->qpair == NULL);
3921 
3922 	poll_thread_times(1, 1);
3923 	CU_ASSERT(io_path11->qpair->qpair != NULL);
3924 	CU_ASSERT(io_path21->qpair->qpair != NULL);
3925 
3926 	poll_thread_times(0, 2);
3927 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3928 	poll_thread_times(1, 1);
3929 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3930 	poll_thread_times(0, 2);
3931 	CU_ASSERT(nvme_ctrlr1->resetting == false);
3932 	CU_ASSERT(curr_path1->is_failed == false);
3933 	CU_ASSERT(first_bio->io_path == io_path12);
3934 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3935 
3936 	poll_thread_times(0, 3);
3937 	CU_ASSERT(io_path12->qpair->qpair == NULL);
3938 	CU_ASSERT(io_path22->qpair->qpair != NULL);
3939 
3940 	poll_thread_times(1, 2);
3941 	CU_ASSERT(io_path12->qpair->qpair == NULL);
3942 	CU_ASSERT(io_path22->qpair->qpair == NULL);
3943 	CU_ASSERT(ctrlr2->is_failed == true);
3944 
3945 	poll_thread_times(0, 1);
3946 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3947 	CU_ASSERT(ctrlr2->is_failed == false);
3948 	CU_ASSERT(ctrlr2->adminq.is_connected == false);
3949 	CU_ASSERT(curr_path2->is_failed == true);
3950 
3951 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3952 	poll_thread_times(0, 2);
3953 	CU_ASSERT(ctrlr2->adminq.is_connected == true);
3954 
3955 	poll_thread_times(0, 1);
3956 	CU_ASSERT(io_path12->qpair->qpair != NULL);
3957 	CU_ASSERT(io_path22->qpair->qpair == NULL);
3958 
3959 	poll_thread_times(1, 2);
3960 	CU_ASSERT(io_path12->qpair->qpair != NULL);
3961 	CU_ASSERT(io_path22->qpair->qpair != NULL);
3962 
3963 	poll_thread_times(0, 2);
3964 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3965 	poll_thread_times(1, 1);
3966 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3967 	poll_thread_times(0, 2);
3968 	CU_ASSERT(first_bio->io_path == NULL);
3969 	CU_ASSERT(nvme_ctrlr2->resetting == false);
3970 	CU_ASSERT(curr_path2->is_failed == false);
3971 
3972 	poll_threads();
3973 
3974 	/* There is a race between two reset requests from bdev_io.
3975 	 *
3976 	 * The first reset request is submitted on thread 0, and the second reset
3977 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
3978 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
3979 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
3980 	 * The second is pending on ctrlr2 again. After the first completes resetting
3981 	 * ctrl2, both complete successfully.
3982 	 */
3983 	ctrlr1->is_failed = true;
3984 	curr_path1->is_failed = true;
3985 	ctrlr2->is_failed = true;
3986 	curr_path2->is_failed = true;
3987 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3988 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3989 
3990 	set_thread(0);
3991 
3992 	bdev_nvme_submit_request(ch1, first_bdev_io);
3993 
3994 	set_thread(1);
3995 
3996 	bdev_nvme_submit_request(ch2, second_bdev_io);
3997 
3998 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3999 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
4000 	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io);
4001 
4002 	poll_threads();
4003 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4004 	poll_threads();
4005 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4006 	poll_threads();
4007 
4008 	CU_ASSERT(ctrlr1->is_failed == false);
4009 	CU_ASSERT(curr_path1->is_failed == false);
4010 	CU_ASSERT(ctrlr2->is_failed == false);
4011 	CU_ASSERT(curr_path2->is_failed == false);
4012 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4013 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4014 
4015 	set_thread(0);
4016 
4017 	spdk_put_io_channel(ch1);
4018 
4019 	set_thread(1);
4020 
4021 	spdk_put_io_channel(ch2);
4022 
4023 	poll_threads();
4024 
4025 	set_thread(0);
4026 
4027 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4028 	CU_ASSERT(rc == 0);
4029 
4030 	poll_threads();
4031 	spdk_delay_us(1000);
4032 	poll_threads();
4033 
4034 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4035 
4036 	free(first_bdev_io);
4037 	free(second_bdev_io);
4038 }
4039 
4040 static void
4041 test_find_io_path(void)
4042 {
4043 	struct nvme_bdev_channel nbdev_ch = {
4044 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4045 	};
4046 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4047 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4048 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4049 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4050 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4051 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4052 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
4053 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4054 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4055 
4056 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4057 
4058 	/* Test if io_path whose ANA state is not accessible is excluded. */
4059 
4060 	nvme_qpair1.qpair = &qpair1;
4061 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4062 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4063 
4064 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4065 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4066 
4067 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4068 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4069 
4070 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4071 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4072 
4073 	nbdev_ch.current_io_path = NULL;
4074 
4075 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4076 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4077 
4078 	nbdev_ch.current_io_path = NULL;
4079 
4080 	/* Test if io_path whose qpair is resetting is excluded. */
4081 
4082 	nvme_qpair1.qpair = NULL;
4083 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4084 
4085 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4086 
4087 	/* Test if ANA optimized state or the first found ANA non-optimized state
4088 	 * is prioritized.
4089 	 */
4090 
4091 	nvme_qpair1.qpair = &qpair1;
4092 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4093 	nvme_qpair2.qpair = &qpair2;
4094 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4095 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4096 
4097 	nbdev_ch.current_io_path = NULL;
4098 
4099 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4100 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4101 
4102 	nbdev_ch.current_io_path = NULL;
4103 }
4104 
4105 static void
4106 test_retry_io_if_ana_state_is_updating(void)
4107 {
4108 	struct nvme_path_id path = {};
4109 	struct nvme_ctrlr_opts opts = {};
4110 	struct spdk_nvme_ctrlr *ctrlr;
4111 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4112 	struct nvme_ctrlr *nvme_ctrlr;
4113 	const int STRING_SIZE = 32;
4114 	const char *attached_names[STRING_SIZE];
4115 	struct nvme_bdev *bdev;
4116 	struct nvme_ns *nvme_ns;
4117 	struct spdk_bdev_io *bdev_io1;
4118 	struct spdk_io_channel *ch;
4119 	struct nvme_bdev_channel *nbdev_ch;
4120 	struct nvme_io_path *io_path;
4121 	struct nvme_qpair *nvme_qpair;
4122 	int rc;
4123 
4124 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4125 	ut_init_trid(&path.trid);
4126 
4127 	set_thread(0);
4128 
4129 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4130 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4131 
4132 	g_ut_attach_ctrlr_status = 0;
4133 	g_ut_attach_bdev_count = 1;
4134 
4135 	opts.ctrlr_loss_timeout_sec = -1;
4136 	opts.reconnect_delay_sec = 1;
4137 
4138 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4139 			      attach_ctrlr_done, NULL, NULL, &opts, false);
4140 	CU_ASSERT(rc == 0);
4141 
4142 	spdk_delay_us(1000);
4143 	poll_threads();
4144 
4145 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4146 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4147 
4148 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4149 	CU_ASSERT(nvme_ctrlr != NULL);
4150 
4151 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4152 	CU_ASSERT(bdev != NULL);
4153 
4154 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4155 	CU_ASSERT(nvme_ns != NULL);
4156 
4157 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4158 	ut_bdev_io_set_buf(bdev_io1);
4159 
4160 	ch = spdk_get_io_channel(bdev);
4161 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4162 
4163 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4164 
4165 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4166 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4167 
4168 	nvme_qpair = io_path->qpair;
4169 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4170 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4171 
4172 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4173 
4174 	/* If qpair is connected, I/O should succeed. */
4175 	bdev_io1->internal.in_submit_request = true;
4176 
4177 	bdev_nvme_submit_request(ch, bdev_io1);
4178 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4179 
4180 	poll_threads();
4181 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4182 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4183 
4184 	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4185 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4186 	nbdev_ch->current_io_path = NULL;
4187 
4188 	bdev_io1->internal.in_submit_request = true;
4189 
4190 	bdev_nvme_submit_request(ch, bdev_io1);
4191 
4192 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4193 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4194 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4195 
4196 	/* ANA state became accessible while I/O was queued. */
4197 	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4198 
4199 	spdk_delay_us(1000000);
4200 
4201 	poll_thread_times(0, 1);
4202 
4203 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4204 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4205 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4206 
4207 	poll_threads();
4208 
4209 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4210 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4211 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4212 
4213 	free(bdev_io1);
4214 
4215 	spdk_put_io_channel(ch);
4216 
4217 	poll_threads();
4218 
4219 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4220 	CU_ASSERT(rc == 0);
4221 
4222 	poll_threads();
4223 	spdk_delay_us(1000);
4224 	poll_threads();
4225 
4226 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4227 }
4228 
4229 static void
4230 test_retry_io_for_io_path_error(void)
4231 {
4232 	struct nvme_path_id path1 = {}, path2 = {};
4233 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4234 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4235 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4236 	const int STRING_SIZE = 32;
4237 	const char *attached_names[STRING_SIZE];
4238 	struct nvme_bdev *bdev;
4239 	struct nvme_ns *nvme_ns1, *nvme_ns2;
4240 	struct spdk_bdev_io *bdev_io;
4241 	struct nvme_bdev_io *bio;
4242 	struct spdk_io_channel *ch;
4243 	struct nvme_bdev_channel *nbdev_ch;
4244 	struct nvme_io_path *io_path1, *io_path2;
4245 	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4246 	struct ut_nvme_req *req;
4247 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4248 	int rc;
4249 
4250 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4251 	ut_init_trid(&path1.trid);
4252 	ut_init_trid2(&path2.trid);
4253 
4254 	g_opts.bdev_retry_count = 1;
4255 
4256 	set_thread(0);
4257 
4258 	g_ut_attach_ctrlr_status = 0;
4259 	g_ut_attach_bdev_count = 1;
4260 
4261 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4262 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4263 
4264 	ctrlr1->ns[0].uuid = &uuid1;
4265 
4266 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4267 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4268 	CU_ASSERT(rc == 0);
4269 
4270 	spdk_delay_us(1000);
4271 	poll_threads();
4272 
4273 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4274 	poll_threads();
4275 
4276 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4277 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4278 
4279 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4280 	CU_ASSERT(nvme_ctrlr1 != NULL);
4281 
4282 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4283 	CU_ASSERT(bdev != NULL);
4284 
4285 	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4286 	CU_ASSERT(nvme_ns1 != NULL);
4287 	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4288 
4289 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4290 	ut_bdev_io_set_buf(bdev_io);
4291 
4292 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4293 
4294 	ch = spdk_get_io_channel(bdev);
4295 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4296 
4297 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4298 
4299 	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4300 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4301 
4302 	nvme_qpair1 = io_path1->qpair;
4303 	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4304 	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4305 
4306 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4307 
4308 	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4309 	bdev_io->internal.in_submit_request = true;
4310 
4311 	bdev_nvme_submit_request(ch, bdev_io);
4312 
4313 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4314 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4315 
4316 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4317 	SPDK_CU_ASSERT_FATAL(req != NULL);
4318 
4319 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4320 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4321 	req->cpl.status.dnr = 1;
4322 
4323 	poll_thread_times(0, 1);
4324 
4325 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4326 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4327 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4328 
4329 	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4330 	bdev_io->internal.in_submit_request = true;
4331 
4332 	bdev_nvme_submit_request(ch, bdev_io);
4333 
4334 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4335 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4336 
4337 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4338 	SPDK_CU_ASSERT_FATAL(req != NULL);
4339 
4340 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4341 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4342 
4343 	poll_thread_times(0, 1);
4344 
4345 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4346 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4347 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4348 
4349 	poll_threads();
4350 
4351 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4352 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4353 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4354 
4355 	/* Add io_path2 dynamically, and create a multipath configuration. */
4356 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4357 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4358 
4359 	ctrlr2->ns[0].uuid = &uuid1;
4360 
4361 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4362 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4363 	CU_ASSERT(rc == 0);
4364 
4365 	spdk_delay_us(1000);
4366 	poll_threads();
4367 
4368 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4369 	poll_threads();
4370 
4371 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4372 	CU_ASSERT(nvme_ctrlr2 != NULL);
4373 
4374 	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4375 	CU_ASSERT(nvme_ns2 != NULL);
4376 	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4377 
4378 	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4379 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4380 
4381 	nvme_qpair2 = io_path2->qpair;
4382 	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4383 	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4384 
4385 	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4386 	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4387 	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4388 	 */
4389 	bdev_io->internal.in_submit_request = true;
4390 
4391 	bdev_nvme_submit_request(ch, bdev_io);
4392 
4393 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4394 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4395 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4396 
4397 	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4398 	SPDK_CU_ASSERT_FATAL(req != NULL);
4399 
4400 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4401 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4402 
4403 	poll_thread_times(0, 1);
4404 
4405 	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4406 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4407 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4408 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4409 
4410 	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4411 	nvme_qpair1->qpair = NULL;
4412 
4413 	poll_threads();
4414 
4415 	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4416 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4417 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4418 
4419 	free(bdev_io);
4420 
4421 	spdk_put_io_channel(ch);
4422 
4423 	poll_threads();
4424 
4425 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4426 	CU_ASSERT(rc == 0);
4427 
4428 	poll_threads();
4429 	spdk_delay_us(1000);
4430 	poll_threads();
4431 
4432 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4433 
4434 	g_opts.bdev_retry_count = 0;
4435 }
4436 
4437 static void
4438 test_retry_io_count(void)
4439 {
4440 	struct nvme_path_id path = {};
4441 	struct spdk_nvme_ctrlr *ctrlr;
4442 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4443 	struct nvme_ctrlr *nvme_ctrlr;
4444 	const int STRING_SIZE = 32;
4445 	const char *attached_names[STRING_SIZE];
4446 	struct nvme_bdev *bdev;
4447 	struct nvme_ns *nvme_ns;
4448 	struct spdk_bdev_io *bdev_io;
4449 	struct nvme_bdev_io *bio;
4450 	struct spdk_io_channel *ch;
4451 	struct nvme_bdev_channel *nbdev_ch;
4452 	struct nvme_io_path *io_path;
4453 	struct nvme_qpair *nvme_qpair;
4454 	struct ut_nvme_req *req;
4455 	int rc;
4456 
4457 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4458 	ut_init_trid(&path.trid);
4459 
4460 	set_thread(0);
4461 
4462 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4463 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4464 
4465 	g_ut_attach_ctrlr_status = 0;
4466 	g_ut_attach_bdev_count = 1;
4467 
4468 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4469 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4470 	CU_ASSERT(rc == 0);
4471 
4472 	spdk_delay_us(1000);
4473 	poll_threads();
4474 
4475 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4476 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4477 
4478 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4479 	CU_ASSERT(nvme_ctrlr != NULL);
4480 
4481 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4482 	CU_ASSERT(bdev != NULL);
4483 
4484 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4485 	CU_ASSERT(nvme_ns != NULL);
4486 
4487 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4488 	ut_bdev_io_set_buf(bdev_io);
4489 
4490 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4491 
4492 	ch = spdk_get_io_channel(bdev);
4493 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4494 
4495 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4496 
4497 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4498 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4499 
4500 	nvme_qpair = io_path->qpair;
4501 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4502 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4503 
4504 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4505 
4506 	/* If I/O is aborted by request, it should not be retried. */
4507 	g_opts.bdev_retry_count = 1;
4508 
4509 	bdev_io->internal.in_submit_request = true;
4510 
4511 	bdev_nvme_submit_request(ch, bdev_io);
4512 
4513 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4514 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4515 
4516 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4517 	SPDK_CU_ASSERT_FATAL(req != NULL);
4518 
4519 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4520 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4521 
4522 	poll_thread_times(0, 1);
4523 
4524 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4525 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4526 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4527 
4528 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4529 	 * the failed I/O should not be retried.
4530 	 */
4531 	g_opts.bdev_retry_count = 4;
4532 
4533 	bdev_io->internal.in_submit_request = true;
4534 
4535 	bdev_nvme_submit_request(ch, bdev_io);
4536 
4537 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4538 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4539 
4540 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4541 	SPDK_CU_ASSERT_FATAL(req != NULL);
4542 
4543 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4544 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4545 	bio->retry_count = 4;
4546 
4547 	poll_thread_times(0, 1);
4548 
4549 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4550 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4551 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4552 
4553 	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4554 	g_opts.bdev_retry_count = -1;
4555 
4556 	bdev_io->internal.in_submit_request = true;
4557 
4558 	bdev_nvme_submit_request(ch, bdev_io);
4559 
4560 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4561 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4562 
4563 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4564 	SPDK_CU_ASSERT_FATAL(req != NULL);
4565 
4566 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4567 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4568 	bio->retry_count = 4;
4569 
4570 	poll_thread_times(0, 1);
4571 
4572 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4573 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4574 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4575 
4576 	poll_threads();
4577 
4578 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4579 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4580 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4581 
4582 	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4583 	 * the failed I/O should be retried.
4584 	 */
4585 	g_opts.bdev_retry_count = 4;
4586 
4587 	bdev_io->internal.in_submit_request = true;
4588 
4589 	bdev_nvme_submit_request(ch, bdev_io);
4590 
4591 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4592 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4593 
4594 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4595 	SPDK_CU_ASSERT_FATAL(req != NULL);
4596 
4597 	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4598 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4599 	bio->retry_count = 3;
4600 
4601 	poll_thread_times(0, 1);
4602 
4603 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4604 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4605 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4606 
4607 	poll_threads();
4608 
4609 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4610 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4611 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4612 
4613 	free(bdev_io);
4614 
4615 	spdk_put_io_channel(ch);
4616 
4617 	poll_threads();
4618 
4619 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4620 	CU_ASSERT(rc == 0);
4621 
4622 	poll_threads();
4623 	spdk_delay_us(1000);
4624 	poll_threads();
4625 
4626 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4627 
4628 	g_opts.bdev_retry_count = 0;
4629 }
4630 
4631 static void
4632 test_concurrent_read_ana_log_page(void)
4633 {
4634 	struct spdk_nvme_transport_id trid = {};
4635 	struct spdk_nvme_ctrlr *ctrlr;
4636 	struct nvme_ctrlr *nvme_ctrlr;
4637 	const int STRING_SIZE = 32;
4638 	const char *attached_names[STRING_SIZE];
4639 	int rc;
4640 
4641 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4642 	ut_init_trid(&trid);
4643 
4644 	set_thread(0);
4645 
4646 	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4647 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4648 
4649 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4650 
4651 	g_ut_attach_ctrlr_status = 0;
4652 	g_ut_attach_bdev_count = 1;
4653 
4654 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
4655 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4656 	CU_ASSERT(rc == 0);
4657 
4658 	spdk_delay_us(1000);
4659 	poll_threads();
4660 
4661 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4662 	poll_threads();
4663 
4664 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4665 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4666 
4667 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4668 
4669 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4670 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4671 
4672 	/* Following read request should be rejected. */
4673 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4674 
4675 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4676 
4677 	set_thread(1);
4678 
4679 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4680 
4681 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4682 
4683 	/* Reset request while reading ANA log page should not be rejected. */
4684 	rc = bdev_nvme_reset(nvme_ctrlr);
4685 	CU_ASSERT(rc == 0);
4686 
4687 	poll_threads();
4688 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4689 	poll_threads();
4690 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4691 	poll_threads();
4692 
4693 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4694 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4695 
4696 	/* Read ANA log page while resetting ctrlr should be rejected. */
4697 	rc = bdev_nvme_reset(nvme_ctrlr);
4698 	CU_ASSERT(rc == 0);
4699 
4700 	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4701 
4702 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4703 
4704 	poll_threads();
4705 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4706 	poll_threads();
4707 
4708 	set_thread(0);
4709 
4710 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4711 	CU_ASSERT(rc == 0);
4712 
4713 	poll_threads();
4714 	spdk_delay_us(1000);
4715 	poll_threads();
4716 
4717 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4718 }
4719 
4720 static void
4721 test_retry_io_for_ana_error(void)
4722 {
4723 	struct nvme_path_id path = {};
4724 	struct spdk_nvme_ctrlr *ctrlr;
4725 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4726 	struct nvme_ctrlr *nvme_ctrlr;
4727 	const int STRING_SIZE = 32;
4728 	const char *attached_names[STRING_SIZE];
4729 	struct nvme_bdev *bdev;
4730 	struct nvme_ns *nvme_ns;
4731 	struct spdk_bdev_io *bdev_io;
4732 	struct nvme_bdev_io *bio;
4733 	struct spdk_io_channel *ch;
4734 	struct nvme_bdev_channel *nbdev_ch;
4735 	struct nvme_io_path *io_path;
4736 	struct nvme_qpair *nvme_qpair;
4737 	struct ut_nvme_req *req;
4738 	uint64_t now;
4739 	int rc;
4740 
4741 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4742 	ut_init_trid(&path.trid);
4743 
4744 	g_opts.bdev_retry_count = 1;
4745 
4746 	set_thread(0);
4747 
4748 	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4749 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4750 
4751 	g_ut_attach_ctrlr_status = 0;
4752 	g_ut_attach_bdev_count = 1;
4753 
4754 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4755 			      attach_ctrlr_done, NULL, NULL, NULL, false);
4756 	CU_ASSERT(rc == 0);
4757 
4758 	spdk_delay_us(1000);
4759 	poll_threads();
4760 
4761 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4762 	poll_threads();
4763 
4764 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4765 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4766 
4767 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4768 	CU_ASSERT(nvme_ctrlr != NULL);
4769 
4770 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4771 	CU_ASSERT(bdev != NULL);
4772 
4773 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4774 	CU_ASSERT(nvme_ns != NULL);
4775 
4776 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4777 	ut_bdev_io_set_buf(bdev_io);
4778 
4779 	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4780 
4781 	ch = spdk_get_io_channel(bdev);
4782 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4783 
4784 	nbdev_ch = spdk_io_channel_get_ctx(ch);
4785 
4786 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4787 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4788 
4789 	nvme_qpair = io_path->qpair;
4790 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4791 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4792 
4793 	now = spdk_get_ticks();
4794 
4795 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4796 
4797 	/* If I/O got ANA error, it should be queued, the corresponding namespace
4798 	 * should be freezed and its ANA state should be updated.
4799 	 */
4800 	bdev_io->internal.in_submit_request = true;
4801 
4802 	bdev_nvme_submit_request(ch, bdev_io);
4803 
4804 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4805 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4806 
4807 	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4808 	SPDK_CU_ASSERT_FATAL(req != NULL);
4809 
4810 	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4811 	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
4812 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4813 
4814 	poll_thread_times(0, 1);
4815 
4816 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4817 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4818 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4819 	/* I/O should be retried immediately. */
4820 	CU_ASSERT(bio->retry_ticks == now);
4821 	CU_ASSERT(nvme_ns->ana_state_updating == true);
4822 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4823 
4824 	poll_threads();
4825 
4826 	/* Namespace is inaccessible, and hence I/O should be queued again. */
4827 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4828 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4829 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4830 	/* I/O should be retried after a second if no I/O path was found but
4831 	 * any I/O path may become available.
4832 	 */
4833 	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
4834 
4835 	/* Namespace should be unfreezed after completing to update its ANA state. */
4836 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4837 	poll_threads();
4838 
4839 	CU_ASSERT(nvme_ns->ana_state_updating == false);
4840 	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
4841 	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4842 
4843 	/* Retry the queued I/O should succeed. */
4844 	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
4845 	poll_threads();
4846 
4847 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4848 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4849 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4850 
4851 	free(bdev_io);
4852 
4853 	spdk_put_io_channel(ch);
4854 
4855 	poll_threads();
4856 
4857 	rc = bdev_nvme_delete("nvme0", &g_any_path);
4858 	CU_ASSERT(rc == 0);
4859 
4860 	poll_threads();
4861 	spdk_delay_us(1000);
4862 	poll_threads();
4863 
4864 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4865 
4866 	g_opts.bdev_retry_count = 0;
4867 }
4868 
4869 static void
4870 test_retry_admin_passthru_for_path_error(void)
4871 {
4872 	struct nvme_path_id path1 = {}, path2 = {};
4873 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4874 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4875 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4876 	const int STRING_SIZE = 32;
4877 	const char *attached_names[STRING_SIZE];
4878 	struct nvme_bdev *bdev;
4879 	struct spdk_bdev_io *admin_io;
4880 	struct spdk_io_channel *ch;
4881 	struct ut_nvme_req *req;
4882 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4883 	int rc;
4884 
4885 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4886 	ut_init_trid(&path1.trid);
4887 	ut_init_trid2(&path2.trid);
4888 
4889 	g_opts.bdev_retry_count = 1;
4890 
4891 	set_thread(0);
4892 
4893 	g_ut_attach_ctrlr_status = 0;
4894 	g_ut_attach_bdev_count = 1;
4895 
4896 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4897 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4898 
4899 	ctrlr1->ns[0].uuid = &uuid1;
4900 
4901 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4902 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4903 	CU_ASSERT(rc == 0);
4904 
4905 	spdk_delay_us(1000);
4906 	poll_threads();
4907 
4908 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4909 	poll_threads();
4910 
4911 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4912 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4913 
4914 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4915 	CU_ASSERT(nvme_ctrlr1 != NULL);
4916 
4917 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4918 	CU_ASSERT(bdev != NULL);
4919 
4920 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
4921 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
4922 
4923 	ch = spdk_get_io_channel(bdev);
4924 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4925 
4926 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
4927 
4928 	/* Admin passthrough got a path error, but it should not retry if DNR is set. */
4929 	admin_io->internal.in_submit_request = true;
4930 
4931 	bdev_nvme_submit_request(ch, admin_io);
4932 
4933 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4934 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4935 
4936 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4937 	SPDK_CU_ASSERT_FATAL(req != NULL);
4938 
4939 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4940 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4941 	req->cpl.status.dnr = 1;
4942 
4943 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4944 	poll_thread_times(0, 2);
4945 
4946 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4947 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4948 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4949 
4950 	/* Admin passthrough got a path error, but it should succeed after retry. */
4951 	admin_io->internal.in_submit_request = true;
4952 
4953 	bdev_nvme_submit_request(ch, admin_io);
4954 
4955 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4956 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4957 
4958 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
4959 	SPDK_CU_ASSERT_FATAL(req != NULL);
4960 
4961 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4962 	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4963 
4964 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4965 	poll_thread_times(0, 2);
4966 
4967 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
4968 	CU_ASSERT(admin_io->internal.in_submit_request == true);
4969 
4970 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4971 	poll_threads();
4972 
4973 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
4974 	CU_ASSERT(admin_io->internal.in_submit_request == false);
4975 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4976 
4977 	/* Add ctrlr2 dynamically, and create a multipath configuration. */
4978 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4979 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4980 
4981 	ctrlr2->ns[0].uuid = &uuid1;
4982 
4983 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4984 			      attach_ctrlr_done, NULL, NULL, NULL, true);
4985 	CU_ASSERT(rc == 0);
4986 
4987 	spdk_delay_us(1000);
4988 	poll_threads();
4989 
4990 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4991 	poll_threads();
4992 
4993 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4994 	CU_ASSERT(nvme_ctrlr2 != NULL);
4995 
4996 	/* Admin passthrough was submitted to ctrlr1, but ctrlr1 was failed.
4997 	 * Hence the admin passthrough was aborted. But ctrlr2 is avaialble.
4998 	 * So after a retry, the admin passthrough is submitted to ctrlr2 and
4999 	 * should succeed.
5000 	 */
5001 	admin_io->internal.in_submit_request = true;
5002 
5003 	bdev_nvme_submit_request(ch, admin_io);
5004 
5005 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 1);
5006 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
5007 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5008 
5009 	req = ut_get_outstanding_nvme_request(&ctrlr1->adminq, admin_io->driver_ctx);
5010 	SPDK_CU_ASSERT_FATAL(req != NULL);
5011 
5012 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
5013 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5014 	ctrlr1->is_failed = true;
5015 
5016 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5017 	poll_thread_times(0, 2);
5018 
5019 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
5020 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
5021 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5022 
5023 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5024 	poll_threads();
5025 
5026 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
5027 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5028 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5029 
5030 	free(admin_io);
5031 
5032 	spdk_put_io_channel(ch);
5033 
5034 	poll_threads();
5035 
5036 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5037 	CU_ASSERT(rc == 0);
5038 
5039 	poll_threads();
5040 	spdk_delay_us(1000);
5041 	poll_threads();
5042 
5043 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5044 
5045 	g_opts.bdev_retry_count = 0;
5046 }
5047 
5048 static void
5049 test_retry_admin_passthru_by_count(void)
5050 {
5051 	struct nvme_path_id path = {};
5052 	struct spdk_nvme_ctrlr *ctrlr;
5053 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5054 	struct nvme_ctrlr *nvme_ctrlr;
5055 	const int STRING_SIZE = 32;
5056 	const char *attached_names[STRING_SIZE];
5057 	struct nvme_bdev *bdev;
5058 	struct spdk_bdev_io *admin_io;
5059 	struct nvme_bdev_io *admin_bio;
5060 	struct spdk_io_channel *ch;
5061 	struct ut_nvme_req *req;
5062 	int rc;
5063 
5064 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5065 	ut_init_trid(&path.trid);
5066 
5067 	set_thread(0);
5068 
5069 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5070 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5071 
5072 	g_ut_attach_ctrlr_status = 0;
5073 	g_ut_attach_bdev_count = 1;
5074 
5075 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5076 			      attach_ctrlr_done, NULL, NULL, NULL, false);
5077 	CU_ASSERT(rc == 0);
5078 
5079 	spdk_delay_us(1000);
5080 	poll_threads();
5081 
5082 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5083 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5084 
5085 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5086 	CU_ASSERT(nvme_ctrlr != NULL);
5087 
5088 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5089 	CU_ASSERT(bdev != NULL);
5090 
5091 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
5092 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
5093 
5094 	admin_bio = (struct nvme_bdev_io *)admin_io->driver_ctx;
5095 
5096 	ch = spdk_get_io_channel(bdev);
5097 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5098 
5099 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
5100 
5101 	/* If admin passthrough is aborted by request, it should not be retried. */
5102 	g_opts.bdev_retry_count = 1;
5103 
5104 	admin_io->internal.in_submit_request = true;
5105 
5106 	bdev_nvme_submit_request(ch, admin_io);
5107 
5108 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5109 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5110 
5111 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
5112 	SPDK_CU_ASSERT_FATAL(req != NULL);
5113 
5114 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
5115 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5116 
5117 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5118 	poll_thread_times(0, 2);
5119 
5120 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5121 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5122 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
5123 
5124 	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
5125 	 * the failed admin passthrough should not be retried.
5126 	 */
5127 	g_opts.bdev_retry_count = 4;
5128 
5129 	admin_io->internal.in_submit_request = true;
5130 
5131 	bdev_nvme_submit_request(ch, admin_io);
5132 
5133 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5134 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5135 
5136 	req = ut_get_outstanding_nvme_request(&ctrlr->adminq, admin_bio);
5137 	SPDK_CU_ASSERT_FATAL(req != NULL);
5138 
5139 	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
5140 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
5141 	admin_bio->retry_count = 4;
5142 
5143 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5144 	poll_thread_times(0, 2);
5145 
5146 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5147 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5148 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
5149 
5150 	free(admin_io);
5151 
5152 	spdk_put_io_channel(ch);
5153 
5154 	poll_threads();
5155 
5156 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5157 	CU_ASSERT(rc == 0);
5158 
5159 	poll_threads();
5160 	spdk_delay_us(1000);
5161 	poll_threads();
5162 
5163 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5164 
5165 	g_opts.bdev_retry_count = 0;
5166 }
5167 
5168 static void
5169 test_check_multipath_params(void)
5170 {
5171 	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
5172 	 * 3rd parameter is fast_io_fail_timeout_sec.
5173 	 */
5174 	CU_ASSERT(bdev_nvme_check_multipath_params(-2, 1, 0) == false);
5175 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 0, 0) == false);
5176 	CU_ASSERT(bdev_nvme_check_multipath_params(1, 0, 0) == false);
5177 	CU_ASSERT(bdev_nvme_check_multipath_params(1, 2, 0) == false);
5178 	CU_ASSERT(bdev_nvme_check_multipath_params(0, 1, 0) == false);
5179 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 0) == true);
5180 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 2, 0) == true);
5181 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 0) == true);
5182 	CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, 0) == true);
5183 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, 0) == true);
5184 	CU_ASSERT(bdev_nvme_check_multipath_params(0, 0, 1) == false);
5185 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 2, 1) == false);
5186 	CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 4) == false);
5187 	CU_ASSERT(bdev_nvme_check_multipath_params(3, 2, 1) == false);
5188 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, 1, 1) == true);
5189 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 2) == true);
5190 	CU_ASSERT(bdev_nvme_check_multipath_params(2, 1, 1) == true);
5191 	CU_ASSERT(bdev_nvme_check_multipath_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
5192 	CU_ASSERT(bdev_nvme_check_multipath_params(-1, UINT32_MAX, UINT32_MAX) == true);
5193 }
5194 
5195 static void
5196 test_retry_io_if_ctrlr_is_resetting(void)
5197 {
5198 	struct nvme_path_id path = {};
5199 	struct nvme_ctrlr_opts opts = {};
5200 	struct spdk_nvme_ctrlr *ctrlr;
5201 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5202 	struct nvme_ctrlr *nvme_ctrlr;
5203 	const int STRING_SIZE = 32;
5204 	const char *attached_names[STRING_SIZE];
5205 	struct nvme_bdev *bdev;
5206 	struct nvme_ns *nvme_ns;
5207 	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5208 	struct spdk_io_channel *ch;
5209 	struct nvme_bdev_channel *nbdev_ch;
5210 	struct nvme_io_path *io_path;
5211 	struct nvme_qpair *nvme_qpair;
5212 	int rc;
5213 
5214 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5215 	ut_init_trid(&path.trid);
5216 
5217 	set_thread(0);
5218 
5219 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5220 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5221 
5222 	g_ut_attach_ctrlr_status = 0;
5223 	g_ut_attach_bdev_count = 1;
5224 
5225 	opts.ctrlr_loss_timeout_sec = -1;
5226 	opts.reconnect_delay_sec = 1;
5227 
5228 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5229 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5230 	CU_ASSERT(rc == 0);
5231 
5232 	spdk_delay_us(1000);
5233 	poll_threads();
5234 
5235 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5236 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5237 
5238 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5239 	CU_ASSERT(nvme_ctrlr != NULL);
5240 
5241 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5242 	CU_ASSERT(bdev != NULL);
5243 
5244 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5245 	CU_ASSERT(nvme_ns != NULL);
5246 
5247 	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5248 	ut_bdev_io_set_buf(bdev_io1);
5249 
5250 	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5251 	ut_bdev_io_set_buf(bdev_io2);
5252 
5253 	ch = spdk_get_io_channel(bdev);
5254 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5255 
5256 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5257 
5258 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5259 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5260 
5261 	nvme_qpair = io_path->qpair;
5262 	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5263 	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5264 
5265 	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5266 	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5267 
5268 	/* If qpair is connected, I/O should succeed. */
5269 	bdev_io1->internal.in_submit_request = true;
5270 
5271 	bdev_nvme_submit_request(ch, bdev_io1);
5272 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5273 
5274 	poll_threads();
5275 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5276 	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5277 
5278 	/* If qpair is disconnected, it is freed and then reconnected via resetting
5279 	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5280 	 * while resetting the nvme_ctrlr.
5281 	 */
5282 	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5283 	ctrlr->is_failed = true;
5284 
5285 	poll_thread_times(0, 5);
5286 
5287 	CU_ASSERT(nvme_qpair->qpair == NULL);
5288 	CU_ASSERT(nvme_ctrlr->resetting == true);
5289 	CU_ASSERT(ctrlr->is_failed == false);
5290 
5291 	bdev_io1->internal.in_submit_request = true;
5292 
5293 	bdev_nvme_submit_request(ch, bdev_io1);
5294 
5295 	spdk_delay_us(1);
5296 
5297 	bdev_io2->internal.in_submit_request = true;
5298 
5299 	bdev_nvme_submit_request(ch, bdev_io2);
5300 
5301 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5302 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5303 	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5304 	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5305 
5306 	poll_threads();
5307 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5308 	poll_threads();
5309 
5310 	CU_ASSERT(nvme_qpair->qpair != NULL);
5311 	CU_ASSERT(nvme_ctrlr->resetting == false);
5312 
5313 	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5314 
5315 	poll_thread_times(0, 1);
5316 
5317 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5318 	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5319 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5320 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5321 
5322 	poll_threads();
5323 
5324 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5325 	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5326 	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5327 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5328 	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5329 
5330 	spdk_delay_us(1);
5331 
5332 	poll_thread_times(0, 1);
5333 
5334 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5335 	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5336 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5337 
5338 	poll_threads();
5339 
5340 	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5341 	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5342 	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5343 
5344 	free(bdev_io1);
5345 	free(bdev_io2);
5346 
5347 	spdk_put_io_channel(ch);
5348 
5349 	poll_threads();
5350 
5351 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5352 	CU_ASSERT(rc == 0);
5353 
5354 	poll_threads();
5355 	spdk_delay_us(1000);
5356 	poll_threads();
5357 
5358 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5359 }
5360 
5361 static void
5362 test_retry_admin_passthru_if_ctrlr_is_resetting(void)
5363 {
5364 	struct nvme_path_id path = {};
5365 	struct nvme_ctrlr_opts opts = {};
5366 	struct spdk_nvme_ctrlr *ctrlr;
5367 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5368 	struct nvme_ctrlr *nvme_ctrlr;
5369 	const int STRING_SIZE = 32;
5370 	const char *attached_names[STRING_SIZE];
5371 	struct nvme_bdev *bdev;
5372 	struct spdk_bdev_io *admin_io;
5373 	struct spdk_io_channel *ch;
5374 	struct nvme_bdev_channel *nbdev_ch;
5375 	int rc;
5376 
5377 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5378 	ut_init_trid(&path.trid);
5379 
5380 	g_opts.bdev_retry_count = 1;
5381 
5382 	set_thread(0);
5383 
5384 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5385 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5386 
5387 	g_ut_attach_ctrlr_status = 0;
5388 	g_ut_attach_bdev_count = 1;
5389 
5390 	opts.ctrlr_loss_timeout_sec = -1;
5391 	opts.reconnect_delay_sec = 1;
5392 
5393 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5394 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5395 	CU_ASSERT(rc == 0);
5396 
5397 	spdk_delay_us(1000);
5398 	poll_threads();
5399 
5400 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5401 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5402 
5403 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5404 	CU_ASSERT(nvme_ctrlr != NULL);
5405 
5406 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5407 	CU_ASSERT(bdev != NULL);
5408 
5409 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
5410 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
5411 
5412 	ch = spdk_get_io_channel(bdev);
5413 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5414 
5415 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5416 
5417 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch;
5418 
5419 	/* If ctrlr is available, admin passthrough should succeed. */
5420 	admin_io->internal.in_submit_request = true;
5421 
5422 	bdev_nvme_submit_request(ch, admin_io);
5423 
5424 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5425 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5426 
5427 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5428 	poll_threads();
5429 
5430 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5431 	CU_ASSERT(admin_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5432 
5433 	/* If ctrlr is resetting, admin passthrough request should be queued
5434 	 * if it is submitted while resetting ctrlr.
5435 	 */
5436 	bdev_nvme_reset(nvme_ctrlr);
5437 
5438 	poll_thread_times(0, 1);
5439 
5440 	admin_io->internal.in_submit_request = true;
5441 
5442 	bdev_nvme_submit_request(ch, admin_io);
5443 
5444 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5445 	CU_ASSERT(admin_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5446 
5447 	poll_threads();
5448 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5449 	poll_threads();
5450 
5451 	CU_ASSERT(nvme_ctrlr->resetting == false);
5452 
5453 	spdk_delay_us(1000000 - g_opts.nvme_adminq_poll_period_us);
5454 	poll_thread_times(0, 1);
5455 
5456 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
5457 	CU_ASSERT(admin_io->internal.in_submit_request == true);
5458 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5459 
5460 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5461 	poll_threads();
5462 
5463 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
5464 	CU_ASSERT(admin_io->internal.in_submit_request == false);
5465 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5466 
5467 	free(admin_io);
5468 
5469 	spdk_put_io_channel(ch);
5470 
5471 	poll_threads();
5472 
5473 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5474 	CU_ASSERT(rc == 0);
5475 
5476 	poll_threads();
5477 	spdk_delay_us(1000);
5478 	poll_threads();
5479 
5480 	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5481 
5482 	g_opts.bdev_retry_count = 0;
5483 }
5484 
5485 static void
5486 test_reconnect_ctrlr(void)
5487 {
5488 	struct spdk_nvme_transport_id trid = {};
5489 	struct spdk_nvme_ctrlr ctrlr = {};
5490 	struct nvme_ctrlr *nvme_ctrlr;
5491 	struct spdk_io_channel *ch1, *ch2;
5492 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5493 	int rc;
5494 
5495 	ut_init_trid(&trid);
5496 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5497 
5498 	set_thread(0);
5499 
5500 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5501 	CU_ASSERT(rc == 0);
5502 
5503 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5504 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5505 
5506 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5507 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5508 
5509 	ch1 = spdk_get_io_channel(nvme_ctrlr);
5510 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5511 
5512 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5513 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5514 
5515 	set_thread(1);
5516 
5517 	ch2 = spdk_get_io_channel(nvme_ctrlr);
5518 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5519 
5520 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5521 
5522 	/* Reset starts from thread 1. */
5523 	set_thread(1);
5524 
5525 	/* The reset should fail and a reconnect timer should be registered. */
5526 	ctrlr.fail_reset = true;
5527 	ctrlr.is_failed = true;
5528 
5529 	rc = bdev_nvme_reset(nvme_ctrlr);
5530 	CU_ASSERT(rc == 0);
5531 	CU_ASSERT(nvme_ctrlr->resetting == true);
5532 	CU_ASSERT(ctrlr.is_failed == true);
5533 
5534 	poll_threads();
5535 
5536 	CU_ASSERT(nvme_ctrlr->resetting == false);
5537 	CU_ASSERT(ctrlr.is_failed == false);
5538 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5539 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5540 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5541 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5542 
5543 	/* Then a reconnect retry should suceeed. */
5544 	ctrlr.fail_reset = false;
5545 
5546 	spdk_delay_us(SPDK_SEC_TO_USEC);
5547 	poll_thread_times(0, 1);
5548 
5549 	CU_ASSERT(nvme_ctrlr->resetting == true);
5550 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5551 
5552 	poll_threads();
5553 
5554 	CU_ASSERT(nvme_ctrlr->resetting == false);
5555 	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5556 	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5557 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5558 
5559 	/* The reset should fail and a reconnect timer should be registered. */
5560 	ctrlr.fail_reset = true;
5561 	ctrlr.is_failed = true;
5562 
5563 	rc = bdev_nvme_reset(nvme_ctrlr);
5564 	CU_ASSERT(rc == 0);
5565 	CU_ASSERT(nvme_ctrlr->resetting == true);
5566 	CU_ASSERT(ctrlr.is_failed == true);
5567 
5568 	poll_threads();
5569 
5570 	CU_ASSERT(nvme_ctrlr->resetting == false);
5571 	CU_ASSERT(ctrlr.is_failed == false);
5572 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5573 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5574 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5575 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5576 
5577 	/* Then a reconnect retry should still fail. */
5578 	spdk_delay_us(SPDK_SEC_TO_USEC);
5579 	poll_thread_times(0, 1);
5580 
5581 	CU_ASSERT(nvme_ctrlr->resetting == true);
5582 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5583 
5584 	poll_threads();
5585 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5586 	poll_threads();
5587 
5588 	CU_ASSERT(nvme_ctrlr->resetting == false);
5589 	CU_ASSERT(ctrlr.is_failed == false);
5590 	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5591 	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5592 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5593 
5594 	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5595 	spdk_delay_us(SPDK_SEC_TO_USEC);
5596 	poll_threads();
5597 
5598 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5599 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5600 	CU_ASSERT(nvme_ctrlr->destruct == true);
5601 
5602 	spdk_put_io_channel(ch2);
5603 
5604 	set_thread(0);
5605 
5606 	spdk_put_io_channel(ch1);
5607 
5608 	poll_threads();
5609 	spdk_delay_us(1000);
5610 	poll_threads();
5611 
5612 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5613 }
5614 
5615 static struct nvme_path_id *
5616 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5617 		       const struct spdk_nvme_transport_id *trid)
5618 {
5619 	struct nvme_path_id *p;
5620 
5621 	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5622 		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5623 			break;
5624 		}
5625 	}
5626 
5627 	return p;
5628 }
5629 
5630 static void
5631 test_retry_failover_ctrlr(void)
5632 {
5633 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5634 	struct spdk_nvme_ctrlr ctrlr = {};
5635 	struct nvme_ctrlr *nvme_ctrlr = NULL;
5636 	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5637 	struct spdk_io_channel *ch;
5638 	struct nvme_ctrlr_channel *ctrlr_ch;
5639 	int rc;
5640 
5641 	ut_init_trid(&trid1);
5642 	ut_init_trid2(&trid2);
5643 	ut_init_trid3(&trid3);
5644 	TAILQ_INIT(&ctrlr.active_io_qpairs);
5645 
5646 	set_thread(0);
5647 
5648 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5649 	CU_ASSERT(rc == 0);
5650 
5651 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5652 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5653 
5654 	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5655 	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5656 
5657 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5658 	CU_ASSERT(rc == 0);
5659 
5660 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5661 	CU_ASSERT(rc == 0);
5662 
5663 	ch = spdk_get_io_channel(nvme_ctrlr);
5664 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5665 
5666 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5667 
5668 	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5669 	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5670 	CU_ASSERT(path_id1->is_failed == false);
5671 	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5672 
5673 	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5674 	ctrlr.fail_reset = true;
5675 	ctrlr.is_failed = true;
5676 
5677 	rc = bdev_nvme_reset(nvme_ctrlr);
5678 	CU_ASSERT(rc == 0);
5679 
5680 	poll_threads();
5681 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5682 	poll_threads();
5683 
5684 	CU_ASSERT(nvme_ctrlr->resetting == false);
5685 	CU_ASSERT(ctrlr.is_failed == false);
5686 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5687 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5688 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5689 	CU_ASSERT(path_id1->is_failed == true);
5690 
5691 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5692 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5693 
5694 	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5695 	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5696 	CU_ASSERT(path_id2->is_failed == false);
5697 	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5698 
5699 	/* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is
5700 	 * switched to trid3 but reset is not started.
5701 	 */
5702 	rc = bdev_nvme_failover(nvme_ctrlr, true);
5703 	CU_ASSERT(rc == 0);
5704 
5705 	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL);
5706 
5707 	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5708 	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5709 	CU_ASSERT(path_id3->is_failed == false);
5710 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5711 
5712 	CU_ASSERT(nvme_ctrlr->resetting == false);
5713 
5714 	/* If reconnect succeeds, trid3 should be the active path_id */
5715 	ctrlr.fail_reset = false;
5716 
5717 	spdk_delay_us(SPDK_SEC_TO_USEC);
5718 	poll_thread_times(0, 1);
5719 
5720 	CU_ASSERT(nvme_ctrlr->resetting == true);
5721 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5722 
5723 	poll_threads();
5724 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5725 	poll_threads();
5726 
5727 	CU_ASSERT(path_id3->is_failed == false);
5728 	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5729 	CU_ASSERT(nvme_ctrlr->resetting == false);
5730 	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5731 	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5732 
5733 	spdk_put_io_channel(ch);
5734 
5735 	poll_threads();
5736 
5737 	rc = bdev_nvme_delete("nvme0", &g_any_path);
5738 	CU_ASSERT(rc == 0);
5739 
5740 	poll_threads();
5741 	spdk_delay_us(1000);
5742 	poll_threads();
5743 
5744 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5745 }
5746 
5747 static void
5748 test_fail_path(void)
5749 {
5750 	struct nvme_path_id path = {};
5751 	struct nvme_ctrlr_opts opts = {};
5752 	struct spdk_nvme_ctrlr *ctrlr;
5753 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5754 	struct nvme_ctrlr *nvme_ctrlr;
5755 	const int STRING_SIZE = 32;
5756 	const char *attached_names[STRING_SIZE];
5757 	struct nvme_bdev *bdev;
5758 	struct nvme_ns *nvme_ns;
5759 	struct spdk_bdev_io *bdev_io;
5760 	struct spdk_io_channel *ch;
5761 	struct nvme_bdev_channel *nbdev_ch;
5762 	struct nvme_io_path *io_path;
5763 	struct nvme_ctrlr_channel *ctrlr_ch;
5764 	int rc;
5765 
5766 	/* The test scenario is the following.
5767 	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5768 	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5769 	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5770 	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5771 	 *   comes first. The queued I/O is failed.
5772 	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5773 	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5774 	 */
5775 
5776 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5777 	ut_init_trid(&path.trid);
5778 
5779 	set_thread(0);
5780 
5781 	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5782 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5783 
5784 	g_ut_attach_ctrlr_status = 0;
5785 	g_ut_attach_bdev_count = 1;
5786 
5787 	opts.ctrlr_loss_timeout_sec = 4;
5788 	opts.reconnect_delay_sec = 1;
5789 	opts.fast_io_fail_timeout_sec = 2;
5790 
5791 	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5792 			      attach_ctrlr_done, NULL, NULL, &opts, false);
5793 	CU_ASSERT(rc == 0);
5794 
5795 	spdk_delay_us(1000);
5796 	poll_threads();
5797 
5798 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5799 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5800 
5801 	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5802 	CU_ASSERT(nvme_ctrlr != NULL);
5803 
5804 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5805 	CU_ASSERT(bdev != NULL);
5806 
5807 	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5808 	CU_ASSERT(nvme_ns != NULL);
5809 
5810 	ch = spdk_get_io_channel(bdev);
5811 	SPDK_CU_ASSERT_FATAL(ch != NULL);
5812 
5813 	nbdev_ch = spdk_io_channel_get_ctx(ch);
5814 
5815 	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5816 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5817 
5818 	ctrlr_ch = io_path->qpair->ctrlr_ch;
5819 	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5820 	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
5821 
5822 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5823 	ut_bdev_io_set_buf(bdev_io);
5824 
5825 
5826 	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5827 	ctrlr->fail_reset = true;
5828 	ctrlr->is_failed = true;
5829 
5830 	rc = bdev_nvme_reset(nvme_ctrlr);
5831 	CU_ASSERT(rc == 0);
5832 	CU_ASSERT(nvme_ctrlr->resetting == true);
5833 	CU_ASSERT(ctrlr->is_failed == true);
5834 
5835 	poll_threads();
5836 
5837 	CU_ASSERT(nvme_ctrlr->resetting == false);
5838 	CU_ASSERT(ctrlr->is_failed == false);
5839 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5840 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5841 	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5842 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5843 
5844 	/* I/O should be queued. */
5845 	bdev_io->internal.in_submit_request = true;
5846 
5847 	bdev_nvme_submit_request(ch, bdev_io);
5848 
5849 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5850 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5851 
5852 	/* After a second, the I/O should be still queued and the ctrlr should be
5853 	 * still recovering.
5854 	 */
5855 	spdk_delay_us(SPDK_SEC_TO_USEC);
5856 	poll_threads();
5857 
5858 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5859 	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5860 
5861 	CU_ASSERT(nvme_ctrlr->resetting == false);
5862 	CU_ASSERT(ctrlr->is_failed == false);
5863 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5864 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5865 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5866 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5867 
5868 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5869 
5870 	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5871 	spdk_delay_us(SPDK_SEC_TO_USEC);
5872 	poll_threads();
5873 
5874 	CU_ASSERT(nvme_ctrlr->resetting == false);
5875 	CU_ASSERT(ctrlr->is_failed == false);
5876 	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5877 	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5878 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5879 	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5880 
5881 	/* Then within a second, pending I/O should be failed. */
5882 	spdk_delay_us(SPDK_SEC_TO_USEC);
5883 	poll_threads();
5884 
5885 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5886 	poll_threads();
5887 
5888 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5889 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5890 	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5891 
5892 	/* Another I/O submission should be failed immediately. */
5893 	bdev_io->internal.in_submit_request = true;
5894 
5895 	bdev_nvme_submit_request(ch, bdev_io);
5896 
5897 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5898 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5899 
5900 	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5901 	 * be deleted.
5902 	 */
5903 	spdk_delay_us(SPDK_SEC_TO_USEC);
5904 	poll_threads();
5905 
5906 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5907 	poll_threads();
5908 
5909 	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5910 	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5911 	CU_ASSERT(nvme_ctrlr->destruct == true);
5912 
5913 	spdk_put_io_channel(ch);
5914 
5915 	poll_threads();
5916 	spdk_delay_us(1000);
5917 	poll_threads();
5918 
5919 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5920 
5921 	free(bdev_io);
5922 }
5923 
5924 static void
5925 test_nvme_ns_cmp(void)
5926 {
5927 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
5928 
5929 	nvme_ns1.id = 0;
5930 	nvme_ns2.id = UINT32_MAX;
5931 
5932 	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
5933 	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
5934 }
5935 
5936 static void
5937 test_ana_transition(void)
5938 {
5939 	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
5940 	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
5941 	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
5942 	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
5943 
5944 	/* case 1: ANA transition timedout is canceled. */
5945 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5946 	nvme_ns.ana_transition_timedout = true;
5947 
5948 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5949 
5950 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5951 
5952 	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
5953 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5954 
5955 	/* case 2: ANATT timer is kept. */
5956 	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5957 	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
5958 			      &nvme_ns,
5959 			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5960 
5961 	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5962 
5963 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5964 
5965 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5966 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
5967 
5968 	/* case 3: ANATT timer is stopped. */
5969 	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5970 
5971 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5972 
5973 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5974 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5975 
5976 	/* ANATT timer is started. */
5977 	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5978 
5979 	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5980 
5981 	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5982 	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
5983 
5984 	/* ANATT timer is expired. */
5985 	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5986 
5987 	poll_threads();
5988 
5989 	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5990 	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
5991 }
5992 
5993 static void
5994 _set_preferred_path_cb(void *cb_arg, int rc)
5995 {
5996 	bool *done = cb_arg;
5997 
5998 	*done = true;
5999 }
6000 
6001 static void
6002 test_set_preferred_path(void)
6003 {
6004 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
6005 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
6006 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6007 	const int STRING_SIZE = 32;
6008 	const char *attached_names[STRING_SIZE];
6009 	struct nvme_bdev *bdev;
6010 	struct spdk_io_channel *ch;
6011 	struct nvme_bdev_channel *nbdev_ch;
6012 	struct nvme_io_path *io_path;
6013 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6014 	const struct spdk_nvme_ctrlr_data *cdata;
6015 	bool done;
6016 	int rc;
6017 
6018 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6019 	ut_init_trid(&path1.trid);
6020 	ut_init_trid2(&path2.trid);
6021 	ut_init_trid3(&path3.trid);
6022 	g_ut_attach_ctrlr_status = 0;
6023 	g_ut_attach_bdev_count = 1;
6024 
6025 	set_thread(0);
6026 
6027 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6028 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6029 
6030 	ctrlr1->ns[0].uuid = &uuid1;
6031 
6032 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6033 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6034 	CU_ASSERT(rc == 0);
6035 
6036 	spdk_delay_us(1000);
6037 	poll_threads();
6038 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6039 	poll_threads();
6040 
6041 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6042 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6043 
6044 	ctrlr2->ns[0].uuid = &uuid1;
6045 
6046 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6047 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6048 	CU_ASSERT(rc == 0);
6049 
6050 	spdk_delay_us(1000);
6051 	poll_threads();
6052 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6053 	poll_threads();
6054 
6055 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
6056 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
6057 
6058 	ctrlr3->ns[0].uuid = &uuid1;
6059 
6060 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
6061 			      attach_ctrlr_done, NULL, NULL, NULL, true);
6062 	CU_ASSERT(rc == 0);
6063 
6064 	spdk_delay_us(1000);
6065 	poll_threads();
6066 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6067 	poll_threads();
6068 
6069 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6070 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6071 
6072 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6073 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6074 
6075 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6076 
6077 	ch = spdk_get_io_channel(bdev);
6078 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6079 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6080 
6081 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6082 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6083 
6084 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6085 
6086 	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
6087 	 * should return io_path to ctrlr2.
6088 	 */
6089 
6090 	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
6091 	done = false;
6092 
6093 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6094 
6095 	poll_threads();
6096 	CU_ASSERT(done == true);
6097 
6098 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6099 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6100 
6101 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6102 
6103 	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
6104 	 * aquired, find_io_path() should return io_path to ctrlr3.
6105 	 */
6106 
6107 	spdk_put_io_channel(ch);
6108 
6109 	poll_threads();
6110 
6111 	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
6112 	done = false;
6113 
6114 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6115 
6116 	poll_threads();
6117 	CU_ASSERT(done == true);
6118 
6119 	ch = spdk_get_io_channel(bdev);
6120 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6121 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6122 
6123 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6124 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6125 
6126 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
6127 
6128 	spdk_put_io_channel(ch);
6129 
6130 	poll_threads();
6131 
6132 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6133 	CU_ASSERT(rc == 0);
6134 
6135 	poll_threads();
6136 	spdk_delay_us(1000);
6137 	poll_threads();
6138 
6139 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6140 }
6141 
6142 static void
6143 test_find_next_io_path(void)
6144 {
6145 	struct nvme_bdev_channel nbdev_ch = {
6146 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
6147 		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6148 	};
6149 	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
6150 	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
6151 	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
6152 	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
6153 	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
6154 	struct nvme_ctrlr_channel ctrlr_ch1 = {};
6155 	struct nvme_ctrlr_channel ctrlr_ch2 = {};
6156 	struct nvme_ctrlr_channel ctrlr_ch3 = {};
6157 	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
6158 	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
6159 	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
6160 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
6161 	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
6162 	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
6163 	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
6164 
6165 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
6166 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
6167 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
6168 
6169 	/* nbdev_ch->current_io_path is filled always when bdev_nvme_find_next_io_path() is called. */
6170 
6171 	nbdev_ch.current_io_path = &io_path2;
6172 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6173 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6174 	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6175 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6176 
6177 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6178 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6179 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6180 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6181 
6182 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6183 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
6184 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6185 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
6186 
6187 	nbdev_ch.current_io_path = &io_path3;
6188 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
6189 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6190 	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
6191 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
6192 }
6193 
6194 static void
6195 test_disable_auto_failback(void)
6196 {
6197 	struct nvme_path_id path1 = {}, path2 = {};
6198 	struct nvme_ctrlr_opts opts = {};
6199 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6200 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6201 	struct nvme_ctrlr *nvme_ctrlr1;
6202 	const int STRING_SIZE = 32;
6203 	const char *attached_names[STRING_SIZE];
6204 	struct nvme_bdev *bdev;
6205 	struct spdk_io_channel *ch;
6206 	struct nvme_bdev_channel *nbdev_ch;
6207 	struct nvme_io_path *io_path;
6208 	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6209 	const struct spdk_nvme_ctrlr_data *cdata;
6210 	bool done;
6211 	int rc;
6212 
6213 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6214 	ut_init_trid(&path1.trid);
6215 	ut_init_trid2(&path2.trid);
6216 	g_ut_attach_ctrlr_status = 0;
6217 	g_ut_attach_bdev_count = 1;
6218 
6219 	g_opts.disable_auto_failback = true;
6220 
6221 	opts.ctrlr_loss_timeout_sec = -1;
6222 	opts.reconnect_delay_sec = 1;
6223 
6224 	set_thread(0);
6225 
6226 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6227 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6228 
6229 	ctrlr1->ns[0].uuid = &uuid1;
6230 
6231 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6232 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6233 	CU_ASSERT(rc == 0);
6234 
6235 	spdk_delay_us(1000);
6236 	poll_threads();
6237 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6238 	poll_threads();
6239 
6240 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6241 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6242 
6243 	ctrlr2->ns[0].uuid = &uuid1;
6244 
6245 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6246 			      attach_ctrlr_done, NULL, NULL, &opts, true);
6247 	CU_ASSERT(rc == 0);
6248 
6249 	spdk_delay_us(1000);
6250 	poll_threads();
6251 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6252 	poll_threads();
6253 
6254 	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6255 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6256 
6257 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6258 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6259 
6260 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
6261 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
6262 
6263 	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
6264 
6265 	ch = spdk_get_io_channel(bdev);
6266 	SPDK_CU_ASSERT_FATAL(ch != NULL);
6267 	nbdev_ch = spdk_io_channel_get_ctx(ch);
6268 
6269 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6270 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6271 
6272 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6273 
6274 	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
6275 	ctrlr1->fail_reset = true;
6276 	ctrlr1->is_failed = true;
6277 
6278 	bdev_nvme_reset(nvme_ctrlr1);
6279 
6280 	poll_threads();
6281 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6282 	poll_threads();
6283 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6284 	poll_threads();
6285 
6286 	CU_ASSERT(ctrlr1->adminq.is_connected == false);
6287 
6288 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6289 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6290 
6291 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6292 
6293 	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
6294 	 * Hence, io_path to ctrlr2 should still be used.
6295 	 */
6296 	ctrlr1->fail_reset = false;
6297 
6298 	spdk_delay_us(SPDK_SEC_TO_USEC);
6299 	poll_threads();
6300 
6301 	CU_ASSERT(ctrlr1->adminq.is_connected == true);
6302 
6303 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6304 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6305 
6306 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
6307 
6308 	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
6309 	 * be used again.
6310 	 */
6311 
6312 	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
6313 	done = false;
6314 
6315 	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
6316 
6317 	poll_threads();
6318 	CU_ASSERT(done == true);
6319 
6320 	io_path = bdev_nvme_find_io_path(nbdev_ch);
6321 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6322 
6323 	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6324 
6325 	spdk_put_io_channel(ch);
6326 
6327 	poll_threads();
6328 
6329 	rc = bdev_nvme_delete("nvme0", &g_any_path);
6330 	CU_ASSERT(rc == 0);
6331 
6332 	poll_threads();
6333 	spdk_delay_us(1000);
6334 	poll_threads();
6335 
6336 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6337 
6338 	g_opts.disable_auto_failback = false;
6339 }
6340 
6341 int
6342 main(int argc, const char **argv)
6343 {
6344 	CU_pSuite	suite = NULL;
6345 	unsigned int	num_failures;
6346 
6347 	CU_set_error_action(CUEA_ABORT);
6348 	CU_initialize_registry();
6349 
6350 	suite = CU_add_suite("nvme", NULL, NULL);
6351 
6352 	CU_ADD_TEST(suite, test_create_ctrlr);
6353 	CU_ADD_TEST(suite, test_reset_ctrlr);
6354 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
6355 	CU_ADD_TEST(suite, test_failover_ctrlr);
6356 	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
6357 	CU_ADD_TEST(suite, test_pending_reset);
6358 	CU_ADD_TEST(suite, test_attach_ctrlr);
6359 	CU_ADD_TEST(suite, test_aer_cb);
6360 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
6361 	CU_ADD_TEST(suite, test_add_remove_trid);
6362 	CU_ADD_TEST(suite, test_abort);
6363 	CU_ADD_TEST(suite, test_get_io_qpair);
6364 	CU_ADD_TEST(suite, test_bdev_unregister);
6365 	CU_ADD_TEST(suite, test_compare_ns);
6366 	CU_ADD_TEST(suite, test_init_ana_log_page);
6367 	CU_ADD_TEST(suite, test_get_memory_domains);
6368 	CU_ADD_TEST(suite, test_reconnect_qpair);
6369 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
6370 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
6371 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
6372 	CU_ADD_TEST(suite, test_admin_path);
6373 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
6374 	CU_ADD_TEST(suite, test_find_io_path);
6375 	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
6376 	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
6377 	CU_ADD_TEST(suite, test_retry_io_count);
6378 	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
6379 	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
6380 	CU_ADD_TEST(suite, test_retry_admin_passthru_for_path_error);
6381 	CU_ADD_TEST(suite, test_retry_admin_passthru_by_count);
6382 	CU_ADD_TEST(suite, test_check_multipath_params);
6383 	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
6384 	CU_ADD_TEST(suite, test_retry_admin_passthru_if_ctrlr_is_resetting);
6385 	CU_ADD_TEST(suite, test_reconnect_ctrlr);
6386 	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
6387 	CU_ADD_TEST(suite, test_fail_path);
6388 	CU_ADD_TEST(suite, test_nvme_ns_cmp);
6389 	CU_ADD_TEST(suite, test_ana_transition);
6390 	CU_ADD_TEST(suite, test_set_preferred_path);
6391 	CU_ADD_TEST(suite, test_find_next_io_path);
6392 	CU_ADD_TEST(suite, test_disable_auto_failback);
6393 
6394 	CU_basic_set_mode(CU_BRM_VERBOSE);
6395 
6396 	allocate_threads(3);
6397 	set_thread(0);
6398 	bdev_nvme_library_init();
6399 	init_accel();
6400 
6401 	CU_basic_run_tests();
6402 
6403 	set_thread(0);
6404 	bdev_nvme_library_fini();
6405 	fini_accel();
6406 	free_threads();
6407 
6408 	num_failures = CU_get_number_of_failures();
6409 	CU_cleanup_registry();
6410 
6411 	return num_failures;
6412 }
6413