xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 9efad7468f30e1c5f7442823f5a8b17acd1e6a9b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "spdk/thread.h"
38 #include "spdk/bdev_module.h"
39 #include "spdk/bdev_module.h"
40 
41 #include "common/lib/ut_multithread.c"
42 
43 #include "bdev/nvme/bdev_nvme.c"
44 
45 #include "unit/lib/json_mock.c"
46 
47 static void *g_accel_p = (void *)0xdeadbeaf;
48 
49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
50 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
51 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
52 	     spdk_nvme_remove_cb remove_cb), NULL);
53 
54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
55 		enum spdk_nvme_transport_type trtype));
56 
57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
58 	    NULL);
59 
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 
62 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
63 		struct spdk_nvme_transport_id *trid), 0);
64 
65 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
66 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
67 
68 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
69 
70 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
71 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
72 
73 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int);
74 
75 int spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
76 				       struct spdk_memory_domain **domains, int array_size)
77 {
78 	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain);
79 
80 	return 0;
81 }
82 
83 struct spdk_io_channel *
84 spdk_accel_engine_get_io_channel(void)
85 {
86 	return spdk_get_io_channel(g_accel_p);
87 }
88 
89 void
90 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
91 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
92 {
93 	/* Avoid warning that opts is used uninitialised */
94 	memset(opts, 0, opts_size);
95 }
96 
97 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
98 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
99 
100 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
101 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
102 
103 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
104 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
105 
106 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
107 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
108 
109 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
110 
111 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
112 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
113 
114 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
115 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
116 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
117 
118 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
119 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
120 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
121 
122 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
123 
124 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
125 
126 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
127 
128 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
129 
130 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
131 
132 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
133 
134 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
135 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
136 
137 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
138 
139 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi,
140 	    (const struct spdk_nvme_ns *ns), 0);
141 
142 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
143 		char *name, size_t *size), 0);
144 
145 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
146 	    (struct spdk_nvme_ns *ns), 0);
147 
148 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
149 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
150 
151 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
152 	    (struct spdk_nvme_ns *ns), 0);
153 
154 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
155 	    (struct spdk_nvme_ns *ns), 0);
156 
157 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
158 	    (struct spdk_nvme_ns *ns), 0);
159 
160 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
161 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
162 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
163 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
164 
165 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
166 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
167 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
168 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
169 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
170 
171 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
172 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
173 	     void *payload, uint32_t payload_size, uint64_t slba,
174 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
175 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
176 
177 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
178 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
179 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
180 
181 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
182 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
183 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
184 
185 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
186 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
187 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
188 
189 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
190 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
191 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
192 
193 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
194 
195 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
196 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
197 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
198 
199 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
200 
201 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
202 
203 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
204 
205 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
206 
207 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
208 		struct iovec *iov,
209 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
210 
211 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
212 
213 struct ut_nvme_req {
214 	uint16_t			opc;
215 	spdk_nvme_cmd_cb		cb_fn;
216 	void				*cb_arg;
217 	struct spdk_nvme_cpl		cpl;
218 	TAILQ_ENTRY(ut_nvme_req)	tailq;
219 };
220 
221 struct spdk_nvme_ns {
222 	struct spdk_nvme_ctrlr		*ctrlr;
223 	uint32_t			id;
224 	bool				is_active;
225 	struct spdk_uuid		uuid;
226 	enum spdk_nvme_ana_state	ana_state;
227 };
228 
229 struct spdk_nvme_qpair {
230 	struct spdk_nvme_ctrlr		*ctrlr;
231 	bool				is_connected;
232 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
233 	uint32_t			num_outstanding_reqs;
234 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
235 	struct spdk_nvme_poll_group	*poll_group;
236 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
237 };
238 
239 struct spdk_nvme_ctrlr {
240 	uint32_t			num_ns;
241 	struct spdk_nvme_ns		*ns;
242 	struct spdk_nvme_ns_data	*nsdata;
243 	struct spdk_nvme_qpair		adminq;
244 	struct spdk_nvme_ctrlr_data	cdata;
245 	bool				attached;
246 	bool				is_failed;
247 	bool				fail_reset;
248 	struct spdk_nvme_transport_id	trid;
249 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
250 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
251 	struct spdk_nvme_ctrlr_opts	opts;
252 };
253 
254 struct spdk_nvme_poll_group {
255 	void				*ctx;
256 	struct spdk_nvme_accel_fn_table	accel_fn_table;
257 	TAILQ_HEAD(, spdk_nvme_qpair)	qpairs;
258 };
259 
260 struct spdk_nvme_probe_ctx {
261 	struct spdk_nvme_transport_id	trid;
262 	void				*cb_ctx;
263 	spdk_nvme_attach_cb		attach_cb;
264 	struct spdk_nvme_ctrlr		*init_ctrlr;
265 };
266 
267 struct spdk_nvme_ctrlr_reset_ctx {
268 	struct spdk_nvme_ctrlr		*ctrlr;
269 };
270 
271 uint32_t
272 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
273 {
274 	uint32_t nsid;
275 
276 	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
277 		if (ctrlr->ns[nsid - 1].is_active) {
278 			return nsid;
279 		}
280 	}
281 
282 	return 0;
283 }
284 
285 uint32_t
286 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
287 {
288 	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
289 		if (ctrlr->ns[nsid - 1].is_active) {
290 			return nsid;
291 		}
292 	}
293 
294 	return 0;
295 }
296 
297 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
298 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
299 			g_ut_attached_ctrlrs);
300 static int g_ut_attach_ctrlr_status;
301 static size_t g_ut_attach_bdev_count;
302 static int g_ut_register_bdev_status;
303 static uint16_t g_ut_cntlid;
304 static struct nvme_path_id g_any_path = {};
305 
306 static void
307 ut_init_trid(struct spdk_nvme_transport_id *trid)
308 {
309 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
310 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
311 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
312 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
313 }
314 
315 static void
316 ut_init_trid2(struct spdk_nvme_transport_id *trid)
317 {
318 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
319 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
320 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
321 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
322 }
323 
324 static void
325 ut_init_trid3(struct spdk_nvme_transport_id *trid)
326 {
327 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
328 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
329 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
330 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
331 }
332 
333 static int
334 cmp_int(int a, int b)
335 {
336 	return a - b;
337 }
338 
339 int
340 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
341 			       const struct spdk_nvme_transport_id *trid2)
342 {
343 	int cmp;
344 
345 	/* We assume trtype is TCP for now. */
346 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
347 
348 	cmp = cmp_int(trid1->trtype, trid2->trtype);
349 	if (cmp) {
350 		return cmp;
351 	}
352 
353 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
354 	if (cmp) {
355 		return cmp;
356 	}
357 
358 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
359 	if (cmp) {
360 		return cmp;
361 	}
362 
363 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
364 	if (cmp) {
365 		return cmp;
366 	}
367 
368 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
369 	if (cmp) {
370 		return cmp;
371 	}
372 
373 	return 0;
374 }
375 
376 static struct spdk_nvme_ctrlr *
377 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
378 		bool ana_reporting, bool multi_ctrlr)
379 {
380 	struct spdk_nvme_ctrlr *ctrlr;
381 	uint32_t i;
382 
383 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
384 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
385 			/* There is a ctrlr whose trid matches. */
386 			return NULL;
387 		}
388 	}
389 
390 	ctrlr = calloc(1, sizeof(*ctrlr));
391 	if (ctrlr == NULL) {
392 		return NULL;
393 	}
394 
395 	ctrlr->attached = true;
396 	ctrlr->adminq.ctrlr = ctrlr;
397 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
398 
399 	if (num_ns != 0) {
400 		ctrlr->num_ns = num_ns;
401 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
402 		if (ctrlr->ns == NULL) {
403 			free(ctrlr);
404 			return NULL;
405 		}
406 
407 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
408 		if (ctrlr->nsdata == NULL) {
409 			free(ctrlr->ns);
410 			free(ctrlr);
411 			return NULL;
412 		}
413 
414 		for (i = 0; i < num_ns; i++) {
415 			ctrlr->ns[i].id = i + 1;
416 			ctrlr->ns[i].ctrlr = ctrlr;
417 			ctrlr->ns[i].is_active = true;
418 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
419 			ctrlr->nsdata[i].nsze = 1024;
420 		}
421 
422 		ctrlr->cdata.nn = num_ns;
423 		ctrlr->cdata.nanagrpid = num_ns;
424 	}
425 
426 	ctrlr->cdata.cntlid = ++g_ut_cntlid;
427 	ctrlr->cdata.cmic.multi_ctrlr = multi_ctrlr;
428 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
429 	ctrlr->trid = *trid;
430 	TAILQ_INIT(&ctrlr->active_io_qpairs);
431 
432 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
433 
434 	return ctrlr;
435 }
436 
437 static void
438 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
439 {
440 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
441 
442 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
443 	free(ctrlr->nsdata);
444 	free(ctrlr->ns);
445 	free(ctrlr);
446 }
447 
448 static int
449 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
450 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
451 {
452 	struct ut_nvme_req *req;
453 
454 	req = calloc(1, sizeof(*req));
455 	if (req == NULL) {
456 		return -ENOMEM;
457 	}
458 
459 	req->opc = opc;
460 	req->cb_fn = cb_fn;
461 	req->cb_arg = cb_arg;
462 
463 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
464 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
465 
466 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
467 	qpair->num_outstanding_reqs++;
468 
469 	return 0;
470 }
471 
472 static struct spdk_bdev_io *
473 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
474 		 struct spdk_io_channel *ch)
475 {
476 	struct spdk_bdev_io *bdev_io;
477 
478 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
479 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
480 	bdev_io->type = type;
481 	bdev_io->bdev = &nbdev->disk;
482 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
483 
484 	return bdev_io;
485 }
486 
487 static void
488 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
489 {
490 	bdev_io->u.bdev.iovs = &bdev_io->iov;
491 	bdev_io->u.bdev.iovcnt = 1;
492 
493 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
494 	bdev_io->iov.iov_len = 4096;
495 }
496 
497 static void
498 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
499 {
500 	if (ctrlr->is_failed) {
501 		free(ctrlr);
502 		return;
503 	}
504 
505 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
506 	if (probe_ctx->cb_ctx) {
507 		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
508 	}
509 
510 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
511 
512 	if (probe_ctx->attach_cb) {
513 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
514 	}
515 }
516 
517 int
518 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
519 {
520 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
521 
522 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
523 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
524 			continue;
525 		}
526 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
527 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
528 	}
529 
530 	free(probe_ctx);
531 
532 	return 0;
533 }
534 
535 struct spdk_nvme_probe_ctx *
536 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
537 			const struct spdk_nvme_ctrlr_opts *opts,
538 			spdk_nvme_attach_cb attach_cb)
539 {
540 	struct spdk_nvme_probe_ctx *probe_ctx;
541 
542 	if (trid == NULL) {
543 		return NULL;
544 	}
545 
546 	probe_ctx = calloc(1, sizeof(*probe_ctx));
547 	if (probe_ctx == NULL) {
548 		return NULL;
549 	}
550 
551 	probe_ctx->trid = *trid;
552 	probe_ctx->cb_ctx = (void *)opts;
553 	probe_ctx->attach_cb = attach_cb;
554 
555 	return probe_ctx;
556 }
557 
558 int
559 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
560 {
561 	if (ctrlr->attached) {
562 		ut_detach_ctrlr(ctrlr);
563 	}
564 
565 	return 0;
566 }
567 
568 int
569 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
570 {
571 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
572 	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
573 
574 	return 0;
575 }
576 
577 int
578 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
579 {
580 	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
581 }
582 
583 void
584 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
585 {
586 	memset(opts, 0, opts_size);
587 
588 	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
589 		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
590 }
591 
592 const struct spdk_nvme_ctrlr_data *
593 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
594 {
595 	return &ctrlr->cdata;
596 }
597 
598 uint32_t
599 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
600 {
601 	return ctrlr->num_ns;
602 }
603 
604 struct spdk_nvme_ns *
605 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
606 {
607 	if (nsid < 1 || nsid > ctrlr->num_ns) {
608 		return NULL;
609 	}
610 
611 	return &ctrlr->ns[nsid - 1];
612 }
613 
614 bool
615 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
616 {
617 	if (nsid < 1 || nsid > ctrlr->num_ns) {
618 		return false;
619 	}
620 
621 	return ctrlr->ns[nsid - 1].is_active;
622 }
623 
624 union spdk_nvme_csts_register
625 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
626 {
627 	union spdk_nvme_csts_register csts;
628 
629 	csts.raw = 0;
630 
631 	return csts;
632 }
633 
634 union spdk_nvme_vs_register
635 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
636 {
637 	union spdk_nvme_vs_register vs;
638 
639 	vs.raw = 0;
640 
641 	return vs;
642 }
643 
644 struct spdk_nvme_qpair *
645 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
646 			       const struct spdk_nvme_io_qpair_opts *user_opts,
647 			       size_t opts_size)
648 {
649 	struct spdk_nvme_qpair *qpair;
650 
651 	qpair = calloc(1, sizeof(*qpair));
652 	if (qpair == NULL) {
653 		return NULL;
654 	}
655 
656 	qpair->ctrlr = ctrlr;
657 	TAILQ_INIT(&qpair->outstanding_reqs);
658 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
659 
660 	return qpair;
661 }
662 
663 int
664 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
665 				 struct spdk_nvme_qpair *qpair)
666 {
667 	if (qpair->is_connected) {
668 		return -EISCONN;
669 	}
670 
671 	qpair->is_connected = true;
672 
673 	return 0;
674 }
675 
676 int
677 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
678 {
679 	struct spdk_nvme_ctrlr *ctrlr;
680 
681 	ctrlr = qpair->ctrlr;
682 
683 	if (ctrlr->is_failed) {
684 		return -ENXIO;
685 	}
686 	qpair->is_connected = true;
687 
688 	return 0;
689 }
690 
691 void
692 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
693 {
694 	qpair->is_connected = false;
695 }
696 
697 int
698 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
699 {
700 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
701 
702 	qpair->is_connected = false;
703 
704 	if (qpair->poll_group != NULL) {
705 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
706 	}
707 
708 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
709 
710 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
711 
712 	free(qpair);
713 
714 	return 0;
715 }
716 
717 int
718 spdk_nvme_ctrlr_reset_poll_async(struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx)
719 {
720 	struct spdk_nvme_ctrlr *ctrlr = ctrlr_reset_ctx->ctrlr;
721 
722 	free(ctrlr_reset_ctx);
723 
724 	if (ctrlr->fail_reset) {
725 		ctrlr->is_failed = true;
726 		return -EIO;
727 	}
728 
729 	return 0;
730 }
731 
732 int
733 spdk_nvme_ctrlr_reset_async(struct spdk_nvme_ctrlr *ctrlr,
734 			    struct spdk_nvme_ctrlr_reset_ctx **reset_ctx)
735 {
736 	struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx;
737 
738 	ctrlr_reset_ctx = calloc(1, sizeof(*ctrlr_reset_ctx));
739 	if (!ctrlr_reset_ctx) {
740 		return -ENOMEM;
741 	}
742 
743 	ctrlr->is_failed = false;
744 
745 	ctrlr_reset_ctx->ctrlr = ctrlr;
746 	*reset_ctx = ctrlr_reset_ctx;
747 
748 	return 0;
749 }
750 
751 void
752 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
753 {
754 	ctrlr->is_failed = true;
755 }
756 
757 bool
758 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
759 {
760 	return ctrlr->is_failed;
761 }
762 
763 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
764 				 sizeof(uint32_t))
765 static void
766 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
767 {
768 	struct spdk_nvme_ana_page ana_hdr;
769 	char _ana_desc[UT_ANA_DESC_SIZE];
770 	struct spdk_nvme_ana_group_descriptor *ana_desc;
771 	struct spdk_nvme_ns *ns;
772 	uint32_t i;
773 
774 	memset(&ana_hdr, 0, sizeof(ana_hdr));
775 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
776 
777 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
778 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
779 
780 	buf += sizeof(ana_hdr);
781 	length -= sizeof(ana_hdr);
782 
783 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
784 
785 	for (i = 0; i < ctrlr->num_ns; i++) {
786 		ns = &ctrlr->ns[i];
787 
788 		if (!ns->is_active) {
789 			continue;
790 		}
791 
792 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
793 
794 		ana_desc->ana_group_id = ns->id;
795 		ana_desc->num_of_nsid = 1;
796 		ana_desc->ana_state = ns->ana_state;
797 		ana_desc->nsid[0] = ns->id;
798 
799 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
800 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
801 
802 		buf += UT_ANA_DESC_SIZE;
803 		length -= UT_ANA_DESC_SIZE;
804 	}
805 }
806 
807 int
808 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
809 				 uint8_t log_page, uint32_t nsid,
810 				 void *payload, uint32_t payload_size,
811 				 uint64_t offset,
812 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
813 {
814 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
815 		SPDK_CU_ASSERT_FATAL(offset == 0);
816 		ut_create_ana_log_page(ctrlr, payload, payload_size);
817 	}
818 
819 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
820 				      cb_fn, cb_arg);
821 }
822 
823 int
824 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
825 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
826 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
827 {
828 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
829 }
830 
831 int
832 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
833 			      void *cmd_cb_arg,
834 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
835 {
836 	struct ut_nvme_req *req = NULL, *abort_req;
837 
838 	if (qpair == NULL) {
839 		qpair = &ctrlr->adminq;
840 	}
841 
842 	abort_req = calloc(1, sizeof(*abort_req));
843 	if (abort_req == NULL) {
844 		return -ENOMEM;
845 	}
846 
847 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
848 		if (req->cb_arg == cmd_cb_arg) {
849 			break;
850 		}
851 	}
852 
853 	if (req == NULL) {
854 		free(abort_req);
855 		return -ENOENT;
856 	}
857 
858 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
859 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
860 
861 	abort_req->opc = SPDK_NVME_OPC_ABORT;
862 	abort_req->cb_fn = cb_fn;
863 	abort_req->cb_arg = cb_arg;
864 
865 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
866 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
867 	abort_req->cpl.cdw0 = 0;
868 
869 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
870 	ctrlr->adminq.num_outstanding_reqs++;
871 
872 	return 0;
873 }
874 
875 int32_t
876 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
877 {
878 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
879 }
880 
881 uint32_t
882 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
883 {
884 	return ns->id;
885 }
886 
887 struct spdk_nvme_ctrlr *
888 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
889 {
890 	return ns->ctrlr;
891 }
892 
893 static inline struct spdk_nvme_ns_data *
894 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
895 {
896 	return &ns->ctrlr->nsdata[ns->id - 1];
897 }
898 
899 const struct spdk_nvme_ns_data *
900 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
901 {
902 	return _nvme_ns_get_data(ns);
903 }
904 
905 uint64_t
906 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
907 {
908 	return _nvme_ns_get_data(ns)->nsze;
909 }
910 
911 const struct spdk_uuid *
912 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
913 {
914 	return &ns->uuid;
915 }
916 
917 int
918 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
919 			      void *metadata, uint64_t lba, uint32_t lba_count,
920 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
921 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
922 {
923 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
924 }
925 
926 int
927 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
928 			       void *buffer, void *metadata, uint64_t lba,
929 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
930 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
931 {
932 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
933 }
934 
935 int
936 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
937 			       uint64_t lba, uint32_t lba_count,
938 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
939 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
940 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
941 			       uint16_t apptag_mask, uint16_t apptag)
942 {
943 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
944 }
945 
946 int
947 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
948 				uint64_t lba, uint32_t lba_count,
949 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
950 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
951 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
952 				uint16_t apptag_mask, uint16_t apptag)
953 {
954 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
955 }
956 
957 static bool g_ut_readv_ext_called;
958 int
959 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
960 			   uint64_t lba, uint32_t lba_count,
961 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
962 			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
963 			   spdk_nvme_req_next_sge_cb next_sge_fn,
964 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
965 {
966 	g_ut_readv_ext_called = true;
967 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
968 }
969 
970 static bool g_ut_writev_ext_called;
971 int
972 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
973 			    uint64_t lba, uint32_t lba_count,
974 			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
975 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
976 			    spdk_nvme_req_next_sge_cb next_sge_fn,
977 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
978 {
979 	g_ut_writev_ext_called = true;
980 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
981 }
982 
983 int
984 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
985 				  uint64_t lba, uint32_t lba_count,
986 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
987 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
988 				  spdk_nvme_req_next_sge_cb next_sge_fn,
989 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
990 {
991 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
992 }
993 
994 int
995 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
996 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
997 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
998 {
999 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1000 }
1001 
1002 int
1003 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1004 			      uint64_t lba, uint32_t lba_count,
1005 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1006 			      uint32_t io_flags)
1007 {
1008 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1009 }
1010 
1011 struct spdk_nvme_poll_group *
1012 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1013 {
1014 	struct spdk_nvme_poll_group *group;
1015 
1016 	group = calloc(1, sizeof(*group));
1017 	if (group == NULL) {
1018 		return NULL;
1019 	}
1020 
1021 	group->ctx = ctx;
1022 	if (table != NULL) {
1023 		group->accel_fn_table = *table;
1024 	}
1025 	TAILQ_INIT(&group->qpairs);
1026 
1027 	return group;
1028 }
1029 
1030 int
1031 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1032 {
1033 	if (!TAILQ_EMPTY(&group->qpairs)) {
1034 		return -EBUSY;
1035 	}
1036 
1037 	free(group);
1038 
1039 	return 0;
1040 }
1041 
1042 int32_t
1043 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1044 				    uint32_t max_completions)
1045 {
1046 	struct ut_nvme_req *req, *tmp;
1047 	uint32_t num_completions = 0;
1048 
1049 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1050 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1051 		qpair->num_outstanding_reqs--;
1052 
1053 		req->cb_fn(req->cb_arg, &req->cpl);
1054 
1055 		free(req);
1056 		num_completions++;
1057 	}
1058 
1059 	return num_completions;
1060 }
1061 
1062 int64_t
1063 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1064 		uint32_t completions_per_qpair,
1065 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1066 {
1067 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1068 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1069 
1070 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1071 
1072 	if (disconnected_qpair_cb == NULL) {
1073 		return -EINVAL;
1074 	}
1075 
1076 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1077 		if (qpair->is_connected) {
1078 			local_completions = spdk_nvme_qpair_process_completions(qpair,
1079 					    completions_per_qpair);
1080 			if (local_completions < 0 && error_reason == 0) {
1081 				error_reason = local_completions;
1082 			} else {
1083 				num_completions += local_completions;
1084 				assert(num_completions >= 0);
1085 			}
1086 		}
1087 	}
1088 
1089 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
1090 		if (!qpair->is_connected) {
1091 			disconnected_qpair_cb(qpair, group->ctx);
1092 		}
1093 	}
1094 
1095 	return error_reason ? error_reason : num_completions;
1096 }
1097 
1098 int
1099 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1100 			 struct spdk_nvme_qpair *qpair)
1101 {
1102 	CU_ASSERT(!qpair->is_connected);
1103 
1104 	qpair->poll_group = group;
1105 	TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq);
1106 
1107 	return 0;
1108 }
1109 
1110 int
1111 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1112 			    struct spdk_nvme_qpair *qpair)
1113 {
1114 	CU_ASSERT(!qpair->is_connected);
1115 
1116 	TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq);
1117 
1118 	return 0;
1119 }
1120 
1121 int
1122 spdk_bdev_register(struct spdk_bdev *bdev)
1123 {
1124 	return g_ut_register_bdev_status;
1125 }
1126 
1127 void
1128 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1129 {
1130 	int rc;
1131 
1132 	rc = bdev->fn_table->destruct(bdev->ctxt);
1133 	if (rc <= 0 && cb_fn != NULL) {
1134 		cb_fn(cb_arg, rc);
1135 	}
1136 }
1137 
1138 int
1139 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1140 {
1141 	bdev->blockcnt = size;
1142 
1143 	return 0;
1144 }
1145 
1146 struct spdk_io_channel *
1147 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1148 {
1149 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1150 }
1151 
1152 void
1153 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1154 {
1155 	bdev_io->internal.status = status;
1156 	bdev_io->internal.in_submit_request = false;
1157 }
1158 
1159 void
1160 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1161 {
1162 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1163 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1164 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1165 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1166 	} else {
1167 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1168 	}
1169 
1170 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1171 	bdev_io->internal.error.nvme.sct = sct;
1172 	bdev_io->internal.error.nvme.sc = sc;
1173 
1174 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1175 }
1176 
1177 void
1178 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1179 {
1180 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1181 
1182 	ut_bdev_io_set_buf(bdev_io);
1183 
1184 	cb(ch, bdev_io, true);
1185 }
1186 
1187 static void
1188 test_create_ctrlr(void)
1189 {
1190 	struct spdk_nvme_transport_id trid = {};
1191 	struct spdk_nvme_ctrlr ctrlr = {};
1192 	int rc;
1193 
1194 	ut_init_trid(&trid);
1195 
1196 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1197 	CU_ASSERT(rc == 0);
1198 
1199 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1200 
1201 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1202 	CU_ASSERT(rc == 0);
1203 
1204 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1205 
1206 	poll_threads();
1207 	spdk_delay_us(1000);
1208 	poll_threads();
1209 
1210 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1211 }
1212 
1213 static void
1214 test_reset_ctrlr(void)
1215 {
1216 	struct spdk_nvme_transport_id trid = {};
1217 	struct spdk_nvme_ctrlr ctrlr = {};
1218 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1219 	struct nvme_path_id *curr_trid;
1220 	struct spdk_io_channel *ch1, *ch2;
1221 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1222 	int rc;
1223 
1224 	ut_init_trid(&trid);
1225 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1226 
1227 	set_thread(0);
1228 
1229 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1230 	CU_ASSERT(rc == 0);
1231 
1232 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1233 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1234 
1235 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1236 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1237 
1238 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1239 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1240 
1241 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1242 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1243 
1244 	set_thread(1);
1245 
1246 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1247 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1248 
1249 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1250 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1251 
1252 	/* Reset starts from thread 1. */
1253 	set_thread(1);
1254 
1255 	/* Case 1: ctrlr is already being destructed. */
1256 	nvme_ctrlr->destruct = true;
1257 
1258 	rc = bdev_nvme_reset(nvme_ctrlr);
1259 	CU_ASSERT(rc == -ENXIO);
1260 
1261 	/* Case 2: reset is in progress. */
1262 	nvme_ctrlr->destruct = false;
1263 	nvme_ctrlr->resetting = true;
1264 
1265 	rc = bdev_nvme_reset(nvme_ctrlr);
1266 	CU_ASSERT(rc == -EBUSY);
1267 
1268 	/* Case 3: reset completes successfully. */
1269 	nvme_ctrlr->resetting = false;
1270 	curr_trid->is_failed = true;
1271 	ctrlr.is_failed = true;
1272 
1273 	rc = bdev_nvme_reset(nvme_ctrlr);
1274 	CU_ASSERT(rc == 0);
1275 	CU_ASSERT(nvme_ctrlr->resetting == true);
1276 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1277 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1278 
1279 	poll_thread_times(0, 1);
1280 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1281 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1282 
1283 	poll_thread_times(1, 1);
1284 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1285 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1286 	CU_ASSERT(ctrlr.is_failed == true);
1287 
1288 	poll_thread_times(1, 1);
1289 	CU_ASSERT(ctrlr.is_failed == false);
1290 
1291 	poll_thread_times(0, 1);
1292 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1293 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1294 
1295 	poll_thread_times(1, 1);
1296 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1297 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1298 	CU_ASSERT(nvme_ctrlr->resetting == true);
1299 	CU_ASSERT(curr_trid->is_failed == true);
1300 
1301 	poll_thread_times(1, 1);
1302 	CU_ASSERT(nvme_ctrlr->resetting == true);
1303 	poll_thread_times(0, 1);
1304 	CU_ASSERT(nvme_ctrlr->resetting == true);
1305 	poll_thread_times(1, 1);
1306 	CU_ASSERT(nvme_ctrlr->resetting == true);
1307 	poll_thread_times(1, 1);
1308 	CU_ASSERT(nvme_ctrlr->resetting == false);
1309 	CU_ASSERT(curr_trid->is_failed == false);
1310 
1311 	spdk_put_io_channel(ch2);
1312 
1313 	set_thread(0);
1314 
1315 	spdk_put_io_channel(ch1);
1316 
1317 	poll_threads();
1318 
1319 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1320 	CU_ASSERT(rc == 0);
1321 
1322 	poll_threads();
1323 	spdk_delay_us(1000);
1324 	poll_threads();
1325 
1326 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1327 }
1328 
1329 static void
1330 test_race_between_reset_and_destruct_ctrlr(void)
1331 {
1332 	struct spdk_nvme_transport_id trid = {};
1333 	struct spdk_nvme_ctrlr ctrlr = {};
1334 	struct nvme_ctrlr *nvme_ctrlr;
1335 	struct spdk_io_channel *ch1, *ch2;
1336 	int rc;
1337 
1338 	ut_init_trid(&trid);
1339 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1340 
1341 	set_thread(0);
1342 
1343 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1344 	CU_ASSERT(rc == 0);
1345 
1346 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1347 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1348 
1349 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1350 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1351 
1352 	set_thread(1);
1353 
1354 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1355 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1356 
1357 	/* Reset starts from thread 1. */
1358 	set_thread(1);
1359 
1360 	rc = bdev_nvme_reset(nvme_ctrlr);
1361 	CU_ASSERT(rc == 0);
1362 	CU_ASSERT(nvme_ctrlr->resetting == true);
1363 
1364 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1365 	set_thread(0);
1366 
1367 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1368 	CU_ASSERT(rc == 0);
1369 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1370 	CU_ASSERT(nvme_ctrlr->destruct == true);
1371 	CU_ASSERT(nvme_ctrlr->resetting == true);
1372 
1373 	poll_threads();
1374 
1375 	/* Reset completed but ctrlr is not still destructed yet. */
1376 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1377 	CU_ASSERT(nvme_ctrlr->destruct == true);
1378 	CU_ASSERT(nvme_ctrlr->resetting == false);
1379 
1380 	/* New reset request is rejected. */
1381 	rc = bdev_nvme_reset(nvme_ctrlr);
1382 	CU_ASSERT(rc == -ENXIO);
1383 
1384 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1385 	 * However there are two channels and destruct is not completed yet.
1386 	 */
1387 	poll_threads();
1388 
1389 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1390 
1391 	set_thread(0);
1392 
1393 	spdk_put_io_channel(ch1);
1394 
1395 	set_thread(1);
1396 
1397 	spdk_put_io_channel(ch2);
1398 
1399 	poll_threads();
1400 	spdk_delay_us(1000);
1401 	poll_threads();
1402 
1403 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1404 }
1405 
1406 static void
1407 test_failover_ctrlr(void)
1408 {
1409 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1410 	struct spdk_nvme_ctrlr ctrlr = {};
1411 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1412 	struct nvme_path_id *curr_trid, *next_trid;
1413 	struct spdk_io_channel *ch1, *ch2;
1414 	int rc;
1415 
1416 	ut_init_trid(&trid1);
1417 	ut_init_trid2(&trid2);
1418 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1419 
1420 	set_thread(0);
1421 
1422 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
1423 	CU_ASSERT(rc == 0);
1424 
1425 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1426 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1427 
1428 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1429 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1430 
1431 	set_thread(1);
1432 
1433 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1434 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1435 
1436 	/* First, test one trid case. */
1437 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1438 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1439 
1440 	/* Failover starts from thread 1. */
1441 	set_thread(1);
1442 
1443 	/* Case 1: ctrlr is already being destructed. */
1444 	nvme_ctrlr->destruct = true;
1445 
1446 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1447 	CU_ASSERT(rc == -ENXIO);
1448 	CU_ASSERT(curr_trid->is_failed == false);
1449 
1450 	/* Case 2: reset is in progress. */
1451 	nvme_ctrlr->destruct = false;
1452 	nvme_ctrlr->resetting = true;
1453 
1454 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1455 	CU_ASSERT(rc == 0);
1456 
1457 	/* Case 3: failover is in progress. */
1458 	nvme_ctrlr->failover_in_progress = true;
1459 
1460 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1461 	CU_ASSERT(rc == 0);
1462 	CU_ASSERT(curr_trid->is_failed == false);
1463 
1464 	/* Case 4: reset completes successfully. */
1465 	nvme_ctrlr->resetting = false;
1466 	nvme_ctrlr->failover_in_progress = false;
1467 
1468 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1469 	CU_ASSERT(rc == 0);
1470 
1471 	CU_ASSERT(nvme_ctrlr->resetting == true);
1472 	CU_ASSERT(curr_trid->is_failed == true);
1473 
1474 	poll_threads();
1475 
1476 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1477 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1478 
1479 	CU_ASSERT(nvme_ctrlr->resetting == false);
1480 	CU_ASSERT(curr_trid->is_failed == false);
1481 
1482 	set_thread(0);
1483 
1484 	/* Second, test two trids case. */
1485 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1486 	CU_ASSERT(rc == 0);
1487 
1488 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1489 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1490 	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1491 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1492 
1493 	/* Failover starts from thread 1. */
1494 	set_thread(1);
1495 
1496 	/* Case 5: reset is in progress. */
1497 	nvme_ctrlr->resetting = true;
1498 
1499 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1500 	CU_ASSERT(rc == -EBUSY);
1501 
1502 	/* Case 5: failover is in progress. */
1503 	nvme_ctrlr->failover_in_progress = true;
1504 
1505 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1506 	CU_ASSERT(rc == 0);
1507 
1508 	/* Case 6: failover completes successfully. */
1509 	nvme_ctrlr->resetting = false;
1510 	nvme_ctrlr->failover_in_progress = false;
1511 
1512 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1513 	CU_ASSERT(rc == 0);
1514 
1515 	CU_ASSERT(nvme_ctrlr->resetting == true);
1516 	CU_ASSERT(nvme_ctrlr->failover_in_progress == true);
1517 
1518 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1519 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1520 	CU_ASSERT(next_trid != curr_trid);
1521 	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1522 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1523 
1524 	poll_threads();
1525 
1526 	CU_ASSERT(nvme_ctrlr->resetting == false);
1527 	CU_ASSERT(nvme_ctrlr->failover_in_progress == false);
1528 
1529 	spdk_put_io_channel(ch2);
1530 
1531 	set_thread(0);
1532 
1533 	spdk_put_io_channel(ch1);
1534 
1535 	poll_threads();
1536 
1537 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1538 	CU_ASSERT(rc == 0);
1539 
1540 	poll_threads();
1541 	spdk_delay_us(1000);
1542 	poll_threads();
1543 
1544 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1545 }
1546 
1547 static void
1548 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1549 {
1550 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1551 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1552 }
1553 
1554 static void
1555 test_pending_reset(void)
1556 {
1557 	struct spdk_nvme_transport_id trid = {};
1558 	struct spdk_nvme_ctrlr *ctrlr;
1559 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1560 	const int STRING_SIZE = 32;
1561 	const char *attached_names[STRING_SIZE];
1562 	struct nvme_bdev *bdev;
1563 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1564 	struct spdk_io_channel *ch1, *ch2;
1565 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1566 	struct nvme_io_path *io_path1, *io_path2;
1567 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1568 	int rc;
1569 
1570 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1571 	ut_init_trid(&trid);
1572 
1573 	set_thread(0);
1574 
1575 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1576 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1577 
1578 	g_ut_attach_ctrlr_status = 0;
1579 	g_ut_attach_bdev_count = 1;
1580 
1581 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1582 			      attach_ctrlr_done, NULL, NULL, false);
1583 	CU_ASSERT(rc == 0);
1584 
1585 	spdk_delay_us(1000);
1586 	poll_threads();
1587 
1588 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1589 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1590 
1591 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1592 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1593 
1594 	ch1 = spdk_get_io_channel(bdev);
1595 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1596 
1597 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1598 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1599 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1600 	ctrlr_ch1 = io_path1->ctrlr_ch;
1601 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1602 
1603 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1604 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1605 
1606 	set_thread(1);
1607 
1608 	ch2 = spdk_get_io_channel(bdev);
1609 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1610 
1611 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1612 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1613 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1614 	ctrlr_ch2 = io_path2->ctrlr_ch;
1615 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1616 
1617 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1618 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1619 
1620 	/* The first reset request is submitted on thread 1, and the second reset request
1621 	 * is submitted on thread 0 while processing the first request.
1622 	 */
1623 	bdev_nvme_submit_request(ch2, first_bdev_io);
1624 	CU_ASSERT(nvme_ctrlr->resetting == true);
1625 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1626 
1627 	set_thread(0);
1628 
1629 	bdev_nvme_submit_request(ch1, second_bdev_io);
1630 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1631 
1632 	poll_threads();
1633 
1634 	CU_ASSERT(nvme_ctrlr->resetting == false);
1635 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1636 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1637 
1638 	/* The first reset request is submitted on thread 1, and the second reset request
1639 	 * is submitted on thread 0 while processing the first request.
1640 	 *
1641 	 * The difference from the above scenario is that the controller is removed while
1642 	 * processing the first request. Hence both reset requests should fail.
1643 	 */
1644 	set_thread(1);
1645 
1646 	bdev_nvme_submit_request(ch2, first_bdev_io);
1647 	CU_ASSERT(nvme_ctrlr->resetting == true);
1648 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1649 
1650 	set_thread(0);
1651 
1652 	bdev_nvme_submit_request(ch1, second_bdev_io);
1653 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1654 
1655 	ctrlr->fail_reset = true;
1656 
1657 	poll_threads();
1658 
1659 	CU_ASSERT(nvme_ctrlr->resetting == false);
1660 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1661 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1662 
1663 	spdk_put_io_channel(ch1);
1664 
1665 	set_thread(1);
1666 
1667 	spdk_put_io_channel(ch2);
1668 
1669 	poll_threads();
1670 
1671 	set_thread(0);
1672 
1673 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1674 	CU_ASSERT(rc == 0);
1675 
1676 	poll_threads();
1677 	spdk_delay_us(1000);
1678 	poll_threads();
1679 
1680 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1681 
1682 	free(first_bdev_io);
1683 	free(second_bdev_io);
1684 }
1685 
1686 static void
1687 test_attach_ctrlr(void)
1688 {
1689 	struct spdk_nvme_transport_id trid = {};
1690 	struct spdk_nvme_ctrlr *ctrlr;
1691 	struct nvme_ctrlr *nvme_ctrlr;
1692 	const int STRING_SIZE = 32;
1693 	const char *attached_names[STRING_SIZE];
1694 	struct nvme_bdev *nbdev;
1695 	int rc;
1696 
1697 	set_thread(0);
1698 
1699 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1700 	ut_init_trid(&trid);
1701 
1702 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1703 	 * by probe polling.
1704 	 */
1705 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1706 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1707 
1708 	ctrlr->is_failed = true;
1709 	g_ut_attach_ctrlr_status = -EIO;
1710 	g_ut_attach_bdev_count = 0;
1711 
1712 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1713 			      attach_ctrlr_done, NULL, NULL, false);
1714 	CU_ASSERT(rc == 0);
1715 
1716 	spdk_delay_us(1000);
1717 	poll_threads();
1718 
1719 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1720 
1721 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1722 	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1723 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1724 
1725 	g_ut_attach_ctrlr_status = 0;
1726 
1727 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1728 			      attach_ctrlr_done, NULL, NULL, false);
1729 	CU_ASSERT(rc == 0);
1730 
1731 	spdk_delay_us(1000);
1732 	poll_threads();
1733 
1734 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1735 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1736 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1737 
1738 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1739 	CU_ASSERT(rc == 0);
1740 
1741 	poll_threads();
1742 	spdk_delay_us(1000);
1743 	poll_threads();
1744 
1745 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1746 
1747 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1748 	 * one nvme_bdev is created.
1749 	 */
1750 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1751 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1752 
1753 	g_ut_attach_bdev_count = 1;
1754 
1755 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1756 			      attach_ctrlr_done, NULL, NULL, false);
1757 	CU_ASSERT(rc == 0);
1758 
1759 	spdk_delay_us(1000);
1760 	poll_threads();
1761 
1762 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1763 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1764 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1765 
1766 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1767 	attached_names[0] = NULL;
1768 
1769 	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1770 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1771 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1772 
1773 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1774 	CU_ASSERT(rc == 0);
1775 
1776 	poll_threads();
1777 	spdk_delay_us(1000);
1778 	poll_threads();
1779 
1780 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1781 
1782 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1783 	 * created because creating one nvme_bdev failed.
1784 	 */
1785 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1786 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1787 
1788 	g_ut_register_bdev_status = -EINVAL;
1789 	g_ut_attach_bdev_count = 0;
1790 
1791 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1792 			      attach_ctrlr_done, NULL, NULL, false);
1793 	CU_ASSERT(rc == 0);
1794 
1795 	spdk_delay_us(1000);
1796 	poll_threads();
1797 
1798 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1799 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1800 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1801 
1802 	CU_ASSERT(attached_names[0] == NULL);
1803 
1804 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1805 	CU_ASSERT(rc == 0);
1806 
1807 	poll_threads();
1808 	spdk_delay_us(1000);
1809 	poll_threads();
1810 
1811 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1812 
1813 	g_ut_register_bdev_status = 0;
1814 }
1815 
1816 static void
1817 test_aer_cb(void)
1818 {
1819 	struct spdk_nvme_transport_id trid = {};
1820 	struct spdk_nvme_ctrlr *ctrlr;
1821 	struct nvme_ctrlr *nvme_ctrlr;
1822 	struct nvme_bdev *bdev;
1823 	const int STRING_SIZE = 32;
1824 	const char *attached_names[STRING_SIZE];
1825 	union spdk_nvme_async_event_completion event = {};
1826 	struct spdk_nvme_cpl cpl = {};
1827 	int rc;
1828 
1829 	set_thread(0);
1830 
1831 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1832 	ut_init_trid(&trid);
1833 
1834 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
1835 	 * namespaces are populated.
1836 	 */
1837 	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
1838 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1839 
1840 	ctrlr->ns[0].is_active = false;
1841 
1842 	g_ut_attach_ctrlr_status = 0;
1843 	g_ut_attach_bdev_count = 3;
1844 
1845 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
1846 			      attach_ctrlr_done, NULL, NULL, false);
1847 	CU_ASSERT(rc == 0);
1848 
1849 	spdk_delay_us(1000);
1850 	poll_threads();
1851 
1852 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1853 	poll_threads();
1854 
1855 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1856 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1857 
1858 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
1859 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1860 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
1861 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1862 
1863 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
1864 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1865 	CU_ASSERT(bdev->disk.blockcnt == 1024);
1866 
1867 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
1868 	 * change the size of the 4th namespace.
1869 	 */
1870 	ctrlr->ns[0].is_active = true;
1871 	ctrlr->ns[2].is_active = false;
1872 	ctrlr->nsdata[3].nsze = 2048;
1873 
1874 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1875 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
1876 	cpl.cdw0 = event.raw;
1877 
1878 	aer_cb(nvme_ctrlr, &cpl);
1879 
1880 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
1881 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
1882 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
1883 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
1884 	CU_ASSERT(bdev->disk.blockcnt == 2048);
1885 
1886 	/* Change ANA state of active namespaces. */
1887 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
1888 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
1889 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
1890 
1891 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1892 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
1893 	cpl.cdw0 = event.raw;
1894 
1895 	aer_cb(nvme_ctrlr, &cpl);
1896 
1897 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1898 	poll_threads();
1899 
1900 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
1901 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
1902 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
1903 
1904 	rc = bdev_nvme_delete("nvme0", &g_any_path);
1905 	CU_ASSERT(rc == 0);
1906 
1907 	poll_threads();
1908 	spdk_delay_us(1000);
1909 	poll_threads();
1910 
1911 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1912 }
1913 
1914 static void
1915 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1916 			enum spdk_bdev_io_type io_type)
1917 {
1918 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1919 	struct nvme_io_path *io_path;
1920 	struct spdk_nvme_qpair *qpair;
1921 
1922 	io_path = bdev_nvme_find_io_path(nbdev_ch);
1923 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
1924 	qpair = io_path->ctrlr_ch->qpair;
1925 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
1926 
1927 	bdev_io->type = io_type;
1928 	bdev_io->internal.in_submit_request = true;
1929 
1930 	bdev_nvme_submit_request(ch, bdev_io);
1931 
1932 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1933 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
1934 
1935 	poll_threads();
1936 
1937 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1938 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1939 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1940 }
1941 
1942 static void
1943 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1944 		   enum spdk_bdev_io_type io_type)
1945 {
1946 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1947 	struct nvme_io_path *io_path;
1948 	struct spdk_nvme_qpair *qpair;
1949 
1950 	io_path = bdev_nvme_find_io_path(nbdev_ch);
1951 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
1952 	qpair = io_path->ctrlr_ch->qpair;
1953 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
1954 
1955 	bdev_io->type = io_type;
1956 	bdev_io->internal.in_submit_request = true;
1957 
1958 	bdev_nvme_submit_request(ch, bdev_io);
1959 
1960 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1961 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1962 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1963 }
1964 
1965 static void
1966 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
1967 {
1968 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1969 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
1970 	struct ut_nvme_req *req;
1971 	struct nvme_io_path *io_path;
1972 	struct spdk_nvme_qpair *qpair;
1973 
1974 	io_path = bdev_nvme_find_io_path(nbdev_ch);
1975 	SPDK_CU_ASSERT_FATAL(io_path != NULL);
1976 	qpair = io_path->ctrlr_ch->qpair;
1977 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
1978 
1979 	/* Only compare and write now. */
1980 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
1981 	bdev_io->internal.in_submit_request = true;
1982 
1983 	bdev_nvme_submit_request(ch, bdev_io);
1984 
1985 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1986 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
1987 	CU_ASSERT(bio->first_fused_submitted == true);
1988 
1989 	/* First outstanding request is compare operation. */
1990 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
1991 	SPDK_CU_ASSERT_FATAL(req != NULL);
1992 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
1993 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
1994 
1995 	poll_threads();
1996 
1997 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1998 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1999 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2000 }
2001 
2002 static void
2003 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2004 			 struct spdk_nvme_ctrlr *ctrlr)
2005 {
2006 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2007 	bdev_io->internal.in_submit_request = true;
2008 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2009 
2010 	bdev_nvme_submit_request(ch, bdev_io);
2011 
2012 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2013 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2014 
2015 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2016 	poll_thread_times(1, 1);
2017 
2018 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2019 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2020 
2021 	poll_thread_times(0, 1);
2022 
2023 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2024 }
2025 
2026 static void
2027 test_submit_nvme_cmd(void)
2028 {
2029 	struct spdk_nvme_transport_id trid = {};
2030 	struct spdk_nvme_ctrlr *ctrlr;
2031 	struct nvme_ctrlr *nvme_ctrlr;
2032 	const int STRING_SIZE = 32;
2033 	const char *attached_names[STRING_SIZE];
2034 	struct nvme_bdev *bdev;
2035 	struct spdk_bdev_io *bdev_io;
2036 	struct spdk_io_channel *ch;
2037 	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2038 	int rc;
2039 
2040 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2041 	ut_init_trid(&trid);
2042 
2043 	set_thread(1);
2044 
2045 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2046 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2047 
2048 	g_ut_attach_ctrlr_status = 0;
2049 	g_ut_attach_bdev_count = 1;
2050 
2051 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2052 			      attach_ctrlr_done, NULL, NULL, false);
2053 	CU_ASSERT(rc == 0);
2054 
2055 	spdk_delay_us(1000);
2056 	poll_threads();
2057 
2058 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2059 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2060 
2061 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2062 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2063 
2064 	set_thread(0);
2065 
2066 	ch = spdk_get_io_channel(bdev);
2067 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2068 
2069 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2070 
2071 	bdev_io->u.bdev.iovs = NULL;
2072 
2073 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2074 
2075 	ut_bdev_io_set_buf(bdev_io);
2076 
2077 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2078 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2079 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2080 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2081 
2082 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2083 
2084 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2085 
2086 	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2087 	bdev_io->internal.ext_opts = &ext_io_opts;
2088 	g_ut_readv_ext_called = false;
2089 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2090 	CU_ASSERT(g_ut_readv_ext_called == true);
2091 	g_ut_readv_ext_called = false;
2092 
2093 	g_ut_writev_ext_called = false;
2094 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2095 	CU_ASSERT(g_ut_writev_ext_called == true);
2096 	g_ut_writev_ext_called = false;
2097 	bdev_io->internal.ext_opts = NULL;
2098 
2099 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2100 
2101 	free(bdev_io);
2102 
2103 	spdk_put_io_channel(ch);
2104 
2105 	poll_threads();
2106 
2107 	set_thread(1);
2108 
2109 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2110 	CU_ASSERT(rc == 0);
2111 
2112 	poll_threads();
2113 	spdk_delay_us(1000);
2114 	poll_threads();
2115 
2116 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2117 }
2118 
2119 static void
2120 test_add_remove_trid(void)
2121 {
2122 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2123 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2124 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2125 	const int STRING_SIZE = 32;
2126 	const char *attached_names[STRING_SIZE];
2127 	struct nvme_path_id *ctrid;
2128 	int rc;
2129 
2130 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2131 	ut_init_trid(&path1.trid);
2132 	ut_init_trid2(&path2.trid);
2133 	ut_init_trid3(&path3.trid);
2134 
2135 	set_thread(0);
2136 
2137 	g_ut_attach_ctrlr_status = 0;
2138 	g_ut_attach_bdev_count = 0;
2139 
2140 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2141 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2142 
2143 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2144 			      attach_ctrlr_done, NULL, NULL, false);
2145 	CU_ASSERT(rc == 0);
2146 
2147 	spdk_delay_us(1000);
2148 	poll_threads();
2149 
2150 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2151 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2152 
2153 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2154 
2155 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2156 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2157 
2158 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2159 			      attach_ctrlr_done, NULL, NULL, false);
2160 	CU_ASSERT(rc == 0);
2161 
2162 	spdk_delay_us(1000);
2163 	poll_threads();
2164 
2165 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2166 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2167 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2168 			break;
2169 		}
2170 	}
2171 	CU_ASSERT(ctrid != NULL);
2172 
2173 	/* trid3 is not in the registered list. */
2174 	rc = bdev_nvme_delete("nvme0", &path3);
2175 	CU_ASSERT(rc == -ENXIO);
2176 
2177 	/* trid2 is not used, and simply removed. */
2178 	rc = bdev_nvme_delete("nvme0", &path2);
2179 	CU_ASSERT(rc == 0);
2180 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2181 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2182 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2183 	}
2184 
2185 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2186 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2187 
2188 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
2189 			      attach_ctrlr_done, NULL, NULL, false);
2190 	CU_ASSERT(rc == 0);
2191 
2192 	spdk_delay_us(1000);
2193 	poll_threads();
2194 
2195 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2196 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2197 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2198 			break;
2199 		}
2200 	}
2201 	CU_ASSERT(ctrid != NULL);
2202 
2203 	/* path1 is currently used and path3 is an alternative path.
2204 	 * If we remove path1, path is changed to path3.
2205 	 */
2206 	rc = bdev_nvme_delete("nvme0", &path1);
2207 	CU_ASSERT(rc == 0);
2208 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2209 	CU_ASSERT(nvme_ctrlr->resetting == true);
2210 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2211 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2212 	}
2213 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2214 
2215 	poll_threads();
2216 
2217 	CU_ASSERT(nvme_ctrlr->resetting == false);
2218 
2219 	/* path3 is the current and only path. If we remove path3, the corresponding
2220 	 * nvme_ctrlr is removed.
2221 	 */
2222 	rc = bdev_nvme_delete("nvme0", &path3);
2223 	CU_ASSERT(rc == 0);
2224 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2225 
2226 	poll_threads();
2227 	spdk_delay_us(1000);
2228 	poll_threads();
2229 
2230 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2231 
2232 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2233 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2234 
2235 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2236 			      attach_ctrlr_done, NULL, NULL, false);
2237 	CU_ASSERT(rc == 0);
2238 
2239 	spdk_delay_us(1000);
2240 	poll_threads();
2241 
2242 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2243 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2244 
2245 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2246 
2247 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2248 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2249 
2250 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2251 			      attach_ctrlr_done, NULL, NULL, false);
2252 	CU_ASSERT(rc == 0);
2253 
2254 	spdk_delay_us(1000);
2255 	poll_threads();
2256 
2257 	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2258 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2259 		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2260 			break;
2261 		}
2262 	}
2263 	CU_ASSERT(ctrid != NULL);
2264 
2265 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2266 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2267 	CU_ASSERT(rc == 0);
2268 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2269 
2270 	poll_threads();
2271 	spdk_delay_us(1000);
2272 	poll_threads();
2273 
2274 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2275 }
2276 
2277 static void
2278 test_abort(void)
2279 {
2280 	struct spdk_nvme_transport_id trid = {};
2281 	struct spdk_nvme_ctrlr *ctrlr;
2282 	struct nvme_ctrlr *nvme_ctrlr;
2283 	const int STRING_SIZE = 32;
2284 	const char *attached_names[STRING_SIZE];
2285 	struct nvme_bdev *bdev;
2286 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2287 	struct spdk_io_channel *ch1, *ch2;
2288 	struct nvme_bdev_channel *nbdev_ch1;
2289 	struct nvme_io_path *io_path1;
2290 	struct nvme_ctrlr_channel *ctrlr_ch1;
2291 	int rc;
2292 
2293 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2294 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2295 	 * are submitted on thread 1. Both should succeed.
2296 	 */
2297 
2298 	ut_init_trid(&trid);
2299 
2300 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2301 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2302 
2303 	g_ut_attach_ctrlr_status = 0;
2304 	g_ut_attach_bdev_count = 1;
2305 
2306 	set_thread(1);
2307 
2308 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2309 			      attach_ctrlr_done, NULL, NULL, false);
2310 	CU_ASSERT(rc == 0);
2311 
2312 	spdk_delay_us(1000);
2313 	poll_threads();
2314 
2315 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2316 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2317 
2318 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2319 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2320 
2321 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2322 	ut_bdev_io_set_buf(write_io);
2323 
2324 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2325 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2326 
2327 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2328 
2329 	set_thread(0);
2330 
2331 	ch1 = spdk_get_io_channel(bdev);
2332 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2333 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2334 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2335 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2336 	ctrlr_ch1 = io_path1->ctrlr_ch;
2337 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2338 
2339 	set_thread(1);
2340 
2341 	ch2 = spdk_get_io_channel(bdev);
2342 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2343 
2344 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2345 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2346 
2347 	/* Aborting the already completed request should fail. */
2348 	write_io->internal.in_submit_request = true;
2349 	bdev_nvme_submit_request(ch1, write_io);
2350 	poll_threads();
2351 
2352 	CU_ASSERT(write_io->internal.in_submit_request == false);
2353 
2354 	abort_io->u.abort.bio_to_abort = write_io;
2355 	abort_io->internal.in_submit_request = true;
2356 
2357 	bdev_nvme_submit_request(ch1, abort_io);
2358 
2359 	poll_threads();
2360 
2361 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2362 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2363 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2364 
2365 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2366 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2367 
2368 	admin_io->internal.in_submit_request = true;
2369 	bdev_nvme_submit_request(ch1, admin_io);
2370 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2371 	poll_threads();
2372 
2373 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2374 
2375 	abort_io->u.abort.bio_to_abort = admin_io;
2376 	abort_io->internal.in_submit_request = true;
2377 
2378 	bdev_nvme_submit_request(ch2, abort_io);
2379 
2380 	poll_threads();
2381 
2382 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2383 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2384 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2385 
2386 	/* Aborting the write request should succeed. */
2387 	write_io->internal.in_submit_request = true;
2388 	bdev_nvme_submit_request(ch1, write_io);
2389 
2390 	CU_ASSERT(write_io->internal.in_submit_request == true);
2391 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2392 
2393 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2394 	abort_io->u.abort.bio_to_abort = write_io;
2395 	abort_io->internal.in_submit_request = true;
2396 
2397 	bdev_nvme_submit_request(ch1, abort_io);
2398 
2399 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2400 	poll_threads();
2401 
2402 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2403 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2404 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2405 	CU_ASSERT(write_io->internal.in_submit_request == false);
2406 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2407 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2408 
2409 	/* Aborting the admin request should succeed. */
2410 	admin_io->internal.in_submit_request = true;
2411 	bdev_nvme_submit_request(ch1, admin_io);
2412 
2413 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2414 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2415 
2416 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2417 	abort_io->u.abort.bio_to_abort = admin_io;
2418 	abort_io->internal.in_submit_request = true;
2419 
2420 	bdev_nvme_submit_request(ch2, abort_io);
2421 
2422 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2423 	poll_threads();
2424 
2425 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2426 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2427 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2428 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2429 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2430 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2431 
2432 	set_thread(0);
2433 
2434 	spdk_put_io_channel(ch1);
2435 
2436 	set_thread(1);
2437 
2438 	spdk_put_io_channel(ch2);
2439 
2440 	poll_threads();
2441 
2442 	free(write_io);
2443 	free(admin_io);
2444 	free(abort_io);
2445 
2446 	set_thread(1);
2447 
2448 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2449 	CU_ASSERT(rc == 0);
2450 
2451 	poll_threads();
2452 	spdk_delay_us(1000);
2453 	poll_threads();
2454 
2455 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2456 }
2457 
2458 static void
2459 test_get_io_qpair(void)
2460 {
2461 	struct spdk_nvme_transport_id trid = {};
2462 	struct spdk_nvme_ctrlr ctrlr = {};
2463 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2464 	struct spdk_io_channel *ch;
2465 	struct nvme_ctrlr_channel *ctrlr_ch;
2466 	struct spdk_nvme_qpair *qpair;
2467 	int rc;
2468 
2469 	ut_init_trid(&trid);
2470 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2471 
2472 	set_thread(0);
2473 
2474 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
2475 	CU_ASSERT(rc == 0);
2476 
2477 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2478 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2479 
2480 	ch = spdk_get_io_channel(nvme_ctrlr);
2481 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2482 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2483 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2484 
2485 	qpair = bdev_nvme_get_io_qpair(ch);
2486 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2487 
2488 	spdk_put_io_channel(ch);
2489 
2490 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2491 	CU_ASSERT(rc == 0);
2492 
2493 	poll_threads();
2494 	spdk_delay_us(1000);
2495 	poll_threads();
2496 
2497 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2498 }
2499 
2500 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2501  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2502  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2503  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2504  */
2505 static void
2506 test_bdev_unregister(void)
2507 {
2508 	struct spdk_nvme_transport_id trid = {};
2509 	struct spdk_nvme_ctrlr *ctrlr;
2510 	struct nvme_ctrlr *nvme_ctrlr;
2511 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2512 	const int STRING_SIZE = 32;
2513 	const char *attached_names[STRING_SIZE];
2514 	struct nvme_bdev *bdev1, *bdev2;
2515 	int rc;
2516 
2517 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2518 	ut_init_trid(&trid);
2519 
2520 	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2521 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2522 
2523 	g_ut_attach_ctrlr_status = 0;
2524 	g_ut_attach_bdev_count = 2;
2525 
2526 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2527 			      attach_ctrlr_done, NULL, NULL, false);
2528 	CU_ASSERT(rc == 0);
2529 
2530 	spdk_delay_us(1000);
2531 	poll_threads();
2532 
2533 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2534 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2535 
2536 	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2537 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2538 
2539 	bdev1 = nvme_ns1->bdev;
2540 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2541 
2542 	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2543 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2544 
2545 	bdev2 = nvme_ns2->bdev;
2546 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2547 
2548 	bdev_nvme_destruct(&bdev1->disk);
2549 	bdev_nvme_destruct(&bdev2->disk);
2550 
2551 	poll_threads();
2552 
2553 	CU_ASSERT(nvme_ns1->bdev == NULL);
2554 	CU_ASSERT(nvme_ns2->bdev == NULL);
2555 
2556 	nvme_ctrlr->destruct = true;
2557 	_nvme_ctrlr_destruct(nvme_ctrlr);
2558 
2559 	poll_threads();
2560 	spdk_delay_us(1000);
2561 	poll_threads();
2562 
2563 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2564 }
2565 
2566 static void
2567 test_compare_ns(void)
2568 {
2569 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2570 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2571 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2572 
2573 	/* No IDs are defined. */
2574 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2575 
2576 	/* Only EUI64 are defined and not matched. */
2577 	nsdata1.eui64 = 0xABCDEF0123456789;
2578 	nsdata2.eui64 = 0xBBCDEF0123456789;
2579 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2580 
2581 	/* Only EUI64 are defined and matched. */
2582 	nsdata2.eui64 = 0xABCDEF0123456789;
2583 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2584 
2585 	/* Only NGUID are defined and not matched. */
2586 	nsdata1.eui64 = 0x0;
2587 	nsdata2.eui64 = 0x0;
2588 	nsdata1.nguid[0] = 0x12;
2589 	nsdata2.nguid[0] = 0x10;
2590 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2591 
2592 	/* Only NGUID are defined and matched. */
2593 	nsdata2.nguid[0] = 0x12;
2594 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2595 
2596 	/* Only UUID are defined and not matched. */
2597 	nsdata1.nguid[0] = 0x0;
2598 	nsdata2.nguid[0] = 0x0;
2599 	ns1.uuid.u.raw[0] = 0xAA;
2600 	ns2.uuid.u.raw[0] = 0xAB;
2601 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2602 
2603 	/* Only UUID are defined and matched. */
2604 	ns1.uuid.u.raw[0] = 0xAB;
2605 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2606 
2607 	/* All EUI64, NGUID, and UUID are defined and matched. */
2608 	nsdata1.eui64 = 0x123456789ABCDEF;
2609 	nsdata2.eui64 = 0x123456789ABCDEF;
2610 	nsdata1.nguid[15] = 0x34;
2611 	nsdata2.nguid[15] = 0x34;
2612 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2613 }
2614 
2615 static void
2616 test_init_ana_log_page(void)
2617 {
2618 	struct spdk_nvme_transport_id trid = {};
2619 	struct spdk_nvme_ctrlr *ctrlr;
2620 	struct nvme_ctrlr *nvme_ctrlr;
2621 	const int STRING_SIZE = 32;
2622 	const char *attached_names[STRING_SIZE];
2623 	int rc;
2624 
2625 	set_thread(0);
2626 
2627 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2628 	ut_init_trid(&trid);
2629 
2630 	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
2631 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2632 
2633 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2634 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2635 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2636 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2637 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2638 
2639 	g_ut_attach_ctrlr_status = 0;
2640 	g_ut_attach_bdev_count = 5;
2641 
2642 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2643 			      attach_ctrlr_done, NULL, NULL, false);
2644 	CU_ASSERT(rc == 0);
2645 
2646 	spdk_delay_us(1000);
2647 	poll_threads();
2648 
2649 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2650 	poll_threads();
2651 
2652 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2653 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2654 
2655 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2656 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2657 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2658 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2659 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
2660 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2661 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2662 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2663 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2664 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2665 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
2666 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
2667 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
2668 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
2669 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
2670 
2671 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2672 	CU_ASSERT(rc == 0);
2673 
2674 	poll_threads();
2675 	spdk_delay_us(1000);
2676 	poll_threads();
2677 
2678 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2679 }
2680 
2681 static void
2682 init_accel(void)
2683 {
2684 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2685 				sizeof(int), "accel_p");
2686 }
2687 
2688 static void
2689 fini_accel(void)
2690 {
2691 	spdk_io_device_unregister(g_accel_p, NULL);
2692 }
2693 
2694 static void
2695 test_get_memory_domains(void)
2696 {
2697 	struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
2698 	struct nvme_ns ns = { .ctrlr = &ctrlr };
2699 	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
2700 	struct spdk_memory_domain *domains[2] = {};
2701 	int rc = 0;
2702 
2703 	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq);
2704 
2705 	/* nvme controller doesn't have memory domainы */
2706 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0);
2707 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2708 	CU_ASSERT(rc == 0)
2709 
2710 	/* nvme controller has a memory domain */
2711 	MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1);
2712 	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
2713 	CU_ASSERT(rc == 1);
2714 	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain);
2715 }
2716 
2717 static void
2718 test_reconnect_qpair(void)
2719 {
2720 	struct spdk_nvme_transport_id trid = {};
2721 	struct spdk_nvme_ctrlr *ctrlr;
2722 	struct nvme_ctrlr *nvme_ctrlr;
2723 	const int STRING_SIZE = 32;
2724 	const char *attached_names[STRING_SIZE];
2725 	struct nvme_bdev *bdev;
2726 	struct spdk_io_channel *ch1, *ch2;
2727 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
2728 	struct nvme_io_path *io_path1, *io_path2;
2729 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
2730 	int rc;
2731 
2732 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2733 	ut_init_trid(&trid);
2734 
2735 	set_thread(0);
2736 
2737 	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2738 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2739 
2740 	g_ut_attach_ctrlr_status = 0;
2741 	g_ut_attach_bdev_count = 1;
2742 
2743 	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
2744 			      attach_ctrlr_done, NULL, NULL, false);
2745 	CU_ASSERT(rc == 0);
2746 
2747 	spdk_delay_us(1000);
2748 	poll_threads();
2749 
2750 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2751 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2752 
2753 	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2754 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2755 
2756 	ch1 = spdk_get_io_channel(bdev);
2757 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2758 
2759 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2760 	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2761 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2762 	ctrlr_ch1 = io_path1->ctrlr_ch;
2763 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2764 
2765 	set_thread(1);
2766 
2767 	ch2 = spdk_get_io_channel(bdev);
2768 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2769 
2770 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
2771 	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
2772 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
2773 	ctrlr_ch2 = io_path2->ctrlr_ch;
2774 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
2775 
2776 	/* If a qpair is disconnected, it is freed and then reconnected via
2777 	 * resetting the corresponding nvme_ctrlr.
2778 	 */
2779 	ctrlr_ch2->qpair->is_connected = false;
2780 	ctrlr->is_failed = true;
2781 
2782 	poll_thread_times(1, 1);
2783 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2784 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2785 	CU_ASSERT(nvme_ctrlr->resetting == true);
2786 
2787 	poll_thread_times(0, 1);
2788 	poll_thread_times(1, 1);
2789 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2790 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2791 	CU_ASSERT(ctrlr->is_failed == true);
2792 
2793 	poll_thread_times(1, 1);
2794 	CU_ASSERT(ctrlr->is_failed == false);
2795 
2796 	poll_thread_times(0, 1);
2797 	poll_thread_times(1, 1);
2798 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2799 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
2800 	CU_ASSERT(nvme_ctrlr->resetting == true);
2801 
2802 	poll_thread_times(1, 1);
2803 	poll_thread_times(0, 1);
2804 	poll_thread_times(1, 1);
2805 	poll_thread_times(1, 1);
2806 	CU_ASSERT(nvme_ctrlr->resetting == false);
2807 
2808 	poll_threads();
2809 
2810 	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
2811 	 * fails, the qpair is just freed.
2812 	 */
2813 	ctrlr_ch2->qpair->is_connected = false;
2814 	ctrlr->is_failed = true;
2815 	ctrlr->fail_reset = true;
2816 
2817 	poll_thread_times(1, 1);
2818 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
2819 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2820 	CU_ASSERT(nvme_ctrlr->resetting == true);
2821 
2822 	poll_thread_times(0, 1);
2823 	poll_thread_times(1, 1);
2824 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2825 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2826 	CU_ASSERT(ctrlr->is_failed == true);
2827 
2828 	poll_thread_times(1, 1);
2829 	poll_thread_times(0, 1);
2830 	poll_thread_times(1, 1);
2831 	poll_thread_times(1, 1);
2832 	CU_ASSERT(ctrlr->is_failed == true);
2833 	CU_ASSERT(nvme_ctrlr->resetting == false);
2834 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
2835 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
2836 
2837 	poll_threads();
2838 
2839 	spdk_put_io_channel(ch2);
2840 
2841 	set_thread(0);
2842 
2843 	spdk_put_io_channel(ch1);
2844 
2845 	poll_threads();
2846 
2847 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2848 	CU_ASSERT(rc == 0);
2849 
2850 	poll_threads();
2851 	spdk_delay_us(1000);
2852 	poll_threads();
2853 
2854 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2855 }
2856 
2857 static void
2858 test_create_bdev_ctrlr(void)
2859 {
2860 	struct nvme_path_id path1 = {}, path2 = {};
2861 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
2862 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
2863 	const int STRING_SIZE = 32;
2864 	const char *attached_names[STRING_SIZE];
2865 	int rc;
2866 
2867 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2868 	ut_init_trid(&path1.trid);
2869 	ut_init_trid2(&path2.trid);
2870 
2871 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
2872 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2873 
2874 	g_ut_attach_ctrlr_status = 0;
2875 	g_ut_attach_bdev_count = 0;
2876 
2877 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2878 			      attach_ctrlr_done, NULL, NULL, true);
2879 
2880 	spdk_delay_us(1000);
2881 	poll_threads();
2882 
2883 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2884 	poll_threads();
2885 
2886 	nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
2887 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
2888 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
2889 
2890 	/* cntlid is duplicated, and adding the second ctrlr should fail. */
2891 	g_ut_attach_ctrlr_status = -EINVAL;
2892 
2893 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
2894 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2895 
2896 	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
2897 
2898 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2899 			      attach_ctrlr_done, NULL, NULL, true);
2900 	CU_ASSERT(rc == 0);
2901 
2902 	spdk_delay_us(1000);
2903 	poll_threads();
2904 
2905 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2906 	poll_threads();
2907 
2908 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
2909 
2910 	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
2911 	g_ut_attach_ctrlr_status = 0;
2912 
2913 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
2914 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2915 
2916 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2917 			      attach_ctrlr_done, NULL, NULL, true);
2918 	CU_ASSERT(rc == 0);
2919 
2920 	spdk_delay_us(1000);
2921 	poll_threads();
2922 
2923 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2924 	poll_threads();
2925 
2926 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
2927 
2928 	/* Delete two ctrlrs at once. */
2929 	rc = bdev_nvme_delete("nvme0", &g_any_path);
2930 	CU_ASSERT(rc == 0);
2931 
2932 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
2933 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
2934 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
2935 
2936 	poll_threads();
2937 	spdk_delay_us(1000);
2938 	poll_threads();
2939 
2940 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
2941 
2942 	/* Add two ctrlrs and delete one by one. */
2943 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
2944 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2945 
2946 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
2947 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2948 
2949 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
2950 			      attach_ctrlr_done, NULL, NULL, true);
2951 	CU_ASSERT(rc == 0);
2952 
2953 	spdk_delay_us(1000);
2954 	poll_threads();
2955 
2956 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2957 	poll_threads();
2958 
2959 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
2960 			      attach_ctrlr_done, NULL, NULL, true);
2961 	CU_ASSERT(rc == 0);
2962 
2963 	spdk_delay_us(1000);
2964 	poll_threads();
2965 
2966 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2967 	poll_threads();
2968 
2969 	nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
2970 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
2971 
2972 	rc = bdev_nvme_delete("nvme0", &path1);
2973 	CU_ASSERT(rc == 0);
2974 
2975 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
2976 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
2977 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
2978 
2979 	poll_threads();
2980 	spdk_delay_us(1000);
2981 	poll_threads();
2982 
2983 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
2984 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
2985 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
2986 
2987 	rc = bdev_nvme_delete("nvme0", &path2);
2988 	CU_ASSERT(rc == 0);
2989 
2990 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
2991 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
2992 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
2993 
2994 	poll_threads();
2995 	spdk_delay_us(1000);
2996 	poll_threads();
2997 
2998 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
2999 }
3000 
3001 static struct nvme_ns *
3002 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3003 {
3004 	struct nvme_ns *nvme_ns;
3005 
3006 	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3007 		if (nvme_ns->ctrlr == nvme_ctrlr) {
3008 			return nvme_ns;
3009 		}
3010 	}
3011 
3012 	return NULL;
3013 }
3014 
3015 static void
3016 test_add_multi_ns_to_bdev(void)
3017 {
3018 	struct nvme_path_id path1 = {}, path2 = {};
3019 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3020 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3021 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3022 	struct nvme_ns *nvme_ns1, *nvme_ns2;
3023 	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3024 	const int STRING_SIZE = 32;
3025 	const char *attached_names[STRING_SIZE];
3026 	int rc;
3027 
3028 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3029 	ut_init_trid(&path1.trid);
3030 	ut_init_trid2(&path2.trid);
3031 
3032 	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3033 
3034 	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3035 	 * namespaces are populated.
3036 	 */
3037 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3038 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3039 
3040 	ctrlr1->ns[1].is_active = false;
3041 	ctrlr1->ns[4].is_active = false;
3042 	memset(&ctrlr1->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
3043 	memset(&ctrlr1->ns[2].uuid, 0x3, sizeof(struct spdk_uuid));
3044 	memset(&ctrlr1->ns[3].uuid, 0x4, sizeof(struct spdk_uuid));
3045 
3046 	g_ut_attach_ctrlr_status = 0;
3047 	g_ut_attach_bdev_count = 3;
3048 
3049 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3050 			      attach_ctrlr_done, NULL, NULL, true);
3051 	CU_ASSERT(rc == 0);
3052 
3053 	spdk_delay_us(1000);
3054 	poll_threads();
3055 
3056 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3057 	poll_threads();
3058 
3059 	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3060 	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3061 	 * adding 4th namespace to a bdev should fail.
3062 	 */
3063 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3064 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3065 
3066 	ctrlr2->ns[2].is_active = false;
3067 	ctrlr2->ns[4].is_active = false;
3068 	memset(&ctrlr2->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
3069 	memset(&ctrlr2->ns[1].uuid, 0x2, sizeof(struct spdk_uuid));
3070 	memset(&ctrlr2->ns[3].uuid, 0x44, sizeof(struct spdk_uuid));
3071 
3072 	g_ut_attach_ctrlr_status = 0;
3073 	g_ut_attach_bdev_count = 2;
3074 
3075 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3076 			      attach_ctrlr_done, NULL, NULL, true);
3077 	CU_ASSERT(rc == 0);
3078 
3079 	spdk_delay_us(1000);
3080 	poll_threads();
3081 
3082 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3083 	poll_threads();
3084 
3085 	nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
3086 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3087 
3088 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3089 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3090 
3091 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3092 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3093 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3094 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3095 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3096 
3097 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3098 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3099 
3100 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3101 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3102 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3103 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3104 	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3105 
3106 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3107 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3108 	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3109 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3110 	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3111 	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3112 	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3113 	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3114 	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3115 
3116 	CU_ASSERT(bdev1->ref == 2);
3117 	CU_ASSERT(bdev2->ref == 1);
3118 	CU_ASSERT(bdev3->ref == 1);
3119 	CU_ASSERT(bdev4->ref == 1);
3120 
3121 	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3122 	rc = bdev_nvme_delete("nvme0", &path1);
3123 	CU_ASSERT(rc == 0);
3124 
3125 	poll_threads();
3126 	spdk_delay_us(1000);
3127 	poll_threads();
3128 
3129 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
3130 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3131 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3132 
3133 	rc = bdev_nvme_delete("nvme0", &path2);
3134 	CU_ASSERT(rc == 0);
3135 
3136 	poll_threads();
3137 	spdk_delay_us(1000);
3138 	poll_threads();
3139 
3140 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
3141 
3142 	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3143 	 * can be deleted when the bdev subsystem shutdown.
3144 	 */
3145 	g_ut_attach_bdev_count = 1;
3146 
3147 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3148 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3149 
3150 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3151 
3152 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
3153 			      attach_ctrlr_done, NULL, NULL, true);
3154 	CU_ASSERT(rc == 0);
3155 
3156 	spdk_delay_us(1000);
3157 	poll_threads();
3158 
3159 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3160 	poll_threads();
3161 
3162 	ut_init_trid2(&path2.trid);
3163 
3164 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3165 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3166 
3167 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3168 
3169 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
3170 			      attach_ctrlr_done, NULL, NULL, true);
3171 	CU_ASSERT(rc == 0);
3172 
3173 	spdk_delay_us(1000);
3174 	poll_threads();
3175 
3176 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3177 	poll_threads();
3178 
3179 	nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
3180 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3181 
3182 	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3183 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3184 
3185 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3186 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3187 
3188 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3189 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3190 
3191 	/* Check if a nvme_bdev has two nvme_ns. */
3192 	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3193 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3194 	CU_ASSERT(nvme_ns1->bdev == bdev1);
3195 
3196 	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3197 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3198 	CU_ASSERT(nvme_ns2->bdev == bdev1);
3199 
3200 	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3201 	bdev_nvme_destruct(&bdev1->disk);
3202 
3203 	poll_threads();
3204 
3205 	CU_ASSERT(nvme_ns1->bdev == NULL);
3206 	CU_ASSERT(nvme_ns2->bdev == NULL);
3207 
3208 	nvme_ctrlr1->destruct = true;
3209 	_nvme_ctrlr_destruct(nvme_ctrlr1);
3210 
3211 	poll_threads();
3212 	spdk_delay_us(1000);
3213 	poll_threads();
3214 
3215 	nvme_ctrlr2->destruct = true;
3216 	_nvme_ctrlr_destruct(nvme_ctrlr2);
3217 
3218 	poll_threads();
3219 	spdk_delay_us(1000);
3220 	poll_threads();
3221 
3222 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
3223 }
3224 
3225 static void
3226 test_add_multi_io_paths_to_nbdev_ch(void)
3227 {
3228 	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3229 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3230 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3231 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3232 	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3233 	const int STRING_SIZE = 32;
3234 	const char *attached_names[STRING_SIZE];
3235 	struct nvme_bdev *bdev;
3236 	struct spdk_io_channel *ch;
3237 	struct nvme_bdev_channel *nbdev_ch;
3238 	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3239 	int rc;
3240 
3241 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3242 	ut_init_trid(&path1.trid);
3243 	ut_init_trid2(&path2.trid);
3244 	ut_init_trid3(&path3.trid);
3245 	g_ut_attach_ctrlr_status = 0;
3246 	g_ut_attach_bdev_count = 1;
3247 
3248 	set_thread(1);
3249 
3250 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3251 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3252 
3253 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3254 
3255 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3256 			      attach_ctrlr_done, NULL, NULL, true);
3257 	CU_ASSERT(rc == 0);
3258 
3259 	spdk_delay_us(1000);
3260 	poll_threads();
3261 
3262 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3263 	poll_threads();
3264 
3265 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3266 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3267 
3268 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3269 
3270 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3271 			      attach_ctrlr_done, NULL, NULL, true);
3272 	CU_ASSERT(rc == 0);
3273 
3274 	spdk_delay_us(1000);
3275 	poll_threads();
3276 
3277 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3278 	poll_threads();
3279 
3280 	nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
3281 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3282 
3283 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3284 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3285 
3286 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3287 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3288 
3289 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3290 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3291 
3292 	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3293 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3294 
3295 	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3296 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3297 
3298 	set_thread(0);
3299 
3300 	ch = spdk_get_io_channel(bdev);
3301 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3302 	nbdev_ch = spdk_io_channel_get_ctx(ch);
3303 
3304 	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3305 	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3306 
3307 	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3308 	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3309 
3310 	set_thread(1);
3311 
3312 	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3313 	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3314 	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3315 
3316 	memset(&ctrlr3->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3317 
3318 	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
3319 			      attach_ctrlr_done, NULL, NULL, true);
3320 	CU_ASSERT(rc == 0);
3321 
3322 	spdk_delay_us(1000);
3323 	poll_threads();
3324 
3325 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3326 	poll_threads();
3327 
3328 	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3329 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3330 
3331 	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3332 	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3333 
3334 	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3335 	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3336 
3337 	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3338 	rc = bdev_nvme_delete("nvme0", &path2);
3339 	CU_ASSERT(rc == 0);
3340 
3341 	poll_threads();
3342 	spdk_delay_us(1000);
3343 	poll_threads();
3344 
3345 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3346 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3347 	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3348 
3349 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3350 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3351 	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3352 
3353 	set_thread(0);
3354 
3355 	spdk_put_io_channel(ch);
3356 
3357 	poll_threads();
3358 
3359 	set_thread(1);
3360 
3361 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3362 	CU_ASSERT(rc == 0);
3363 
3364 	poll_threads();
3365 	spdk_delay_us(1000);
3366 	poll_threads();
3367 
3368 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
3369 }
3370 
3371 static void
3372 test_admin_path(void)
3373 {
3374 	struct nvme_path_id path1 = {}, path2 = {};
3375 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3376 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3377 	const int STRING_SIZE = 32;
3378 	const char *attached_names[STRING_SIZE];
3379 	struct nvme_bdev *bdev;
3380 	struct spdk_io_channel *ch;
3381 	struct spdk_bdev_io *bdev_io;
3382 	int rc;
3383 
3384 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3385 	ut_init_trid(&path1.trid);
3386 	ut_init_trid2(&path2.trid);
3387 	g_ut_attach_ctrlr_status = 0;
3388 	g_ut_attach_bdev_count = 1;
3389 
3390 	set_thread(0);
3391 
3392 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3393 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3394 
3395 	memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3396 
3397 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3398 			      attach_ctrlr_done, NULL, NULL, true);
3399 	CU_ASSERT(rc == 0);
3400 
3401 	spdk_delay_us(1000);
3402 	poll_threads();
3403 
3404 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3405 	poll_threads();
3406 
3407 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3408 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3409 
3410 	memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
3411 
3412 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3413 			      attach_ctrlr_done, NULL, NULL, true);
3414 	CU_ASSERT(rc == 0);
3415 
3416 	spdk_delay_us(1000);
3417 	poll_threads();
3418 
3419 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3420 	poll_threads();
3421 
3422 	nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
3423 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3424 
3425 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3426 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3427 
3428 	ch = spdk_get_io_channel(bdev);
3429 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3430 
3431 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3432 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3433 
3434 	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3435 	 * submitted to ctrlr2.
3436 	 */
3437 	ctrlr1->is_failed = true;
3438 	bdev_io->internal.in_submit_request = true;
3439 
3440 	bdev_nvme_submit_request(ch, bdev_io);
3441 
3442 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3443 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3444 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3445 
3446 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3447 	poll_threads();
3448 
3449 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3450 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3451 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3452 
3453 	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3454 	ctrlr2->is_failed = true;
3455 	bdev_io->internal.in_submit_request = true;
3456 
3457 	bdev_nvme_submit_request(ch, bdev_io);
3458 
3459 	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3460 	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3461 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3462 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3463 
3464 	free(bdev_io);
3465 
3466 	spdk_put_io_channel(ch);
3467 
3468 	poll_threads();
3469 
3470 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3471 	CU_ASSERT(rc == 0);
3472 
3473 	poll_threads();
3474 	spdk_delay_us(1000);
3475 	poll_threads();
3476 
3477 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
3478 }
3479 
3480 static struct nvme_io_path *
3481 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3482 			struct nvme_ctrlr *nvme_ctrlr)
3483 {
3484 	struct nvme_io_path *io_path;
3485 	struct nvme_ctrlr *_nvme_ctrlr;
3486 
3487 	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3488 		_nvme_ctrlr = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(io_path->ctrlr_ch));
3489 		if (_nvme_ctrlr == nvme_ctrlr) {
3490 			return io_path;
3491 		}
3492 	}
3493 
3494 	return NULL;
3495 }
3496 
3497 static void
3498 test_reset_bdev_ctrlr(void)
3499 {
3500 	struct nvme_path_id path1 = {}, path2 = {};
3501 	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3502 	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3503 	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3504 	struct nvme_path_id *curr_path1, *curr_path2;
3505 	const int STRING_SIZE = 32;
3506 	const char *attached_names[STRING_SIZE];
3507 	struct nvme_bdev *bdev;
3508 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3509 	struct nvme_bdev_io *first_bio;
3510 	struct spdk_io_channel *ch1, *ch2;
3511 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3512 	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3513 	int rc;
3514 
3515 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3516 	ut_init_trid(&path1.trid);
3517 	ut_init_trid2(&path2.trid);
3518 	g_ut_attach_ctrlr_status = 0;
3519 	g_ut_attach_bdev_count = 1;
3520 
3521 	set_thread(0);
3522 
3523 	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3524 	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3525 
3526 	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
3527 			      attach_ctrlr_done, NULL, NULL, true);
3528 	CU_ASSERT(rc == 0);
3529 
3530 	spdk_delay_us(1000);
3531 	poll_threads();
3532 
3533 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3534 	poll_threads();
3535 
3536 	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3537 	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3538 
3539 	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
3540 			      attach_ctrlr_done, NULL, NULL, true);
3541 	CU_ASSERT(rc == 0);
3542 
3543 	spdk_delay_us(1000);
3544 	poll_threads();
3545 
3546 	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3547 	poll_threads();
3548 
3549 	nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
3550 	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3551 
3552 	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3553 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3554 
3555 	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
3556 	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
3557 
3558 	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3559 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3560 
3561 	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
3562 	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
3563 
3564 	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3565 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3566 
3567 	set_thread(0);
3568 
3569 	ch1 = spdk_get_io_channel(bdev);
3570 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3571 
3572 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3573 	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
3574 	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
3575 	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
3576 	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
3577 
3578 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
3579 	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
3580 
3581 	set_thread(1);
3582 
3583 	ch2 = spdk_get_io_channel(bdev);
3584 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3585 
3586 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3587 	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
3588 	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
3589 	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
3590 	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
3591 
3592 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
3593 
3594 	/* The first reset request from bdev_io is submitted on thread 0.
3595 	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
3596 	 *
3597 	 * A few extra polls are necessary after resetting ctrlr1 to check
3598 	 * pending reset requests for ctrlr1.
3599 	 */
3600 	ctrlr1->is_failed = true;
3601 	curr_path1->is_failed = true;
3602 	ctrlr2->is_failed = true;
3603 	curr_path2->is_failed = true;
3604 
3605 	set_thread(0);
3606 
3607 	bdev_nvme_submit_request(ch1, first_bdev_io);
3608 	CU_ASSERT(first_bio->io_path == io_path11);
3609 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3610 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3611 
3612 	poll_thread_times(0, 1);
3613 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3614 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3615 
3616 	poll_thread_times(1, 1);
3617 	CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
3618 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3619 	CU_ASSERT(ctrlr1->is_failed == true);
3620 
3621 	poll_thread_times(0, 1);
3622 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3623 	CU_ASSERT(ctrlr1->is_failed == false);
3624 	CU_ASSERT(curr_path1->is_failed == true);
3625 
3626 	poll_thread_times(0, 1);
3627 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3628 	CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
3629 
3630 	poll_thread_times(1, 1);
3631 	CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
3632 	CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
3633 
3634 	poll_thread_times(0, 2);
3635 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3636 	poll_thread_times(1, 1);
3637 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3638 	poll_thread_times(0, 1);
3639 	CU_ASSERT(nvme_ctrlr1->resetting == false);
3640 	CU_ASSERT(curr_path1->is_failed == false);
3641 	CU_ASSERT(first_bio->io_path == io_path12);
3642 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3643 
3644 	poll_thread_times(0, 1);
3645 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3646 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3647 
3648 	poll_thread_times(1, 1);
3649 	CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
3650 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3651 	CU_ASSERT(ctrlr2->is_failed == true);
3652 
3653 	poll_thread_times(0, 2);
3654 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3655 	CU_ASSERT(ctrlr2->is_failed == false);
3656 	CU_ASSERT(curr_path2->is_failed == true);
3657 
3658 	poll_thread_times(0, 1);
3659 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3660 	CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
3661 
3662 	poll_thread_times(1, 2);
3663 	CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
3664 	CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
3665 
3666 	poll_thread_times(0, 2);
3667 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3668 	poll_thread_times(1, 1);
3669 	CU_ASSERT(nvme_ctrlr2->resetting == true);
3670 	poll_thread_times(0, 1);
3671 	CU_ASSERT(first_bio->io_path == NULL);
3672 	CU_ASSERT(nvme_ctrlr2->resetting == false);
3673 	CU_ASSERT(curr_path2->is_failed == false);
3674 
3675 	poll_threads();
3676 
3677 	/* There is a race between two reset requests from bdev_io.
3678 	 *
3679 	 * The first reset request is submitted on thread 0, and the second reset
3680 	 * request is submitted on thread 1 while the first is resetting ctrlr1.
3681 	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
3682 	 * both reset requests go to ctrlr2. The first comes earlier than the second.
3683 	 * The second is pending on ctrlr2 again. After the first completes resetting
3684 	 * ctrl2, both complete successfully.
3685 	 */
3686 	ctrlr1->is_failed = true;
3687 	curr_path1->is_failed = true;
3688 	ctrlr2->is_failed = true;
3689 	curr_path2->is_failed = true;
3690 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3691 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3692 
3693 	set_thread(0);
3694 
3695 	bdev_nvme_submit_request(ch1, first_bdev_io);
3696 
3697 	set_thread(1);
3698 
3699 	bdev_nvme_submit_request(ch2, second_bdev_io);
3700 
3701 	CU_ASSERT(nvme_ctrlr1->resetting == true);
3702 	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
3703 	CU_ASSERT(TAILQ_FIRST(&io_path21->ctrlr_ch->pending_resets) == second_bdev_io);
3704 
3705 	poll_threads();
3706 
3707 	CU_ASSERT(ctrlr1->is_failed == false);
3708 	CU_ASSERT(curr_path1->is_failed == false);
3709 	CU_ASSERT(ctrlr2->is_failed == false);
3710 	CU_ASSERT(curr_path2->is_failed == false);
3711 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3712 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3713 
3714 	set_thread(0);
3715 
3716 	spdk_put_io_channel(ch1);
3717 
3718 	set_thread(1);
3719 
3720 	spdk_put_io_channel(ch2);
3721 
3722 	poll_threads();
3723 
3724 	set_thread(0);
3725 
3726 	rc = bdev_nvme_delete("nvme0", &g_any_path);
3727 	CU_ASSERT(rc == 0);
3728 
3729 	poll_threads();
3730 	spdk_delay_us(1000);
3731 	poll_threads();
3732 
3733 	CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
3734 
3735 	free(first_bdev_io);
3736 	free(second_bdev_io);
3737 }
3738 
3739 static void
3740 test_find_io_path(void)
3741 {
3742 	struct nvme_bdev_channel nbdev_ch = {
3743 		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
3744 	};
3745 	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
3746 	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
3747 	struct nvme_io_path io_path1 = { .ctrlr_ch = &ctrlr_ch1, .nvme_ns = &nvme_ns1, };
3748 	struct nvme_io_path io_path2 = { .ctrlr_ch = &ctrlr_ch2, .nvme_ns = &nvme_ns2, };
3749 
3750 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
3751 
3752 	/* Test if io_path whose ANA state is not accessible is excluded. */
3753 
3754 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
3755 	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3756 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3757 
3758 	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
3759 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3760 
3761 	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
3762 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3763 
3764 	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3765 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3766 
3767 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3768 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3769 
3770 	/* Test if io_path whose qpair is resetting is excluced. */
3771 
3772 	ctrlr_ch1.qpair = NULL;
3773 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
3774 
3775 	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
3776 
3777 	/* Test if ANA optimized state or the first found ANA non-optimized state
3778 	 * is prioritized.
3779 	 */
3780 
3781 	ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
3782 	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3783 	ctrlr_ch2.qpair = (struct spdk_nvme_qpair *)0x1;
3784 	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3785 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
3786 
3787 	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3788 	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
3789 }
3790 
3791 int
3792 main(int argc, const char **argv)
3793 {
3794 	CU_pSuite	suite = NULL;
3795 	unsigned int	num_failures;
3796 
3797 	CU_set_error_action(CUEA_ABORT);
3798 	CU_initialize_registry();
3799 
3800 	suite = CU_add_suite("nvme", NULL, NULL);
3801 
3802 	CU_ADD_TEST(suite, test_create_ctrlr);
3803 	CU_ADD_TEST(suite, test_reset_ctrlr);
3804 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
3805 	CU_ADD_TEST(suite, test_failover_ctrlr);
3806 	CU_ADD_TEST(suite, test_pending_reset);
3807 	CU_ADD_TEST(suite, test_attach_ctrlr);
3808 	CU_ADD_TEST(suite, test_aer_cb);
3809 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
3810 	CU_ADD_TEST(suite, test_add_remove_trid);
3811 	CU_ADD_TEST(suite, test_abort);
3812 	CU_ADD_TEST(suite, test_get_io_qpair);
3813 	CU_ADD_TEST(suite, test_bdev_unregister);
3814 	CU_ADD_TEST(suite, test_compare_ns);
3815 	CU_ADD_TEST(suite, test_init_ana_log_page);
3816 	CU_ADD_TEST(suite, test_get_memory_domains);
3817 	CU_ADD_TEST(suite, test_reconnect_qpair);
3818 	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
3819 	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
3820 	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
3821 	CU_ADD_TEST(suite, test_admin_path);
3822 	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
3823 	CU_ADD_TEST(suite, test_find_io_path);
3824 
3825 	CU_basic_set_mode(CU_BRM_VERBOSE);
3826 
3827 	allocate_threads(3);
3828 	set_thread(0);
3829 	bdev_nvme_library_init();
3830 	init_accel();
3831 
3832 	CU_basic_run_tests();
3833 
3834 	set_thread(0);
3835 	bdev_nvme_library_fini();
3836 	fini_accel();
3837 	free_threads();
3838 
3839 	num_failures = CU_get_number_of_failures();
3840 	CU_cleanup_registry();
3841 
3842 	return num_failures;
3843 }
3844