xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision da2fd6651a9cd4732b0910d30291821e77f4d643)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk_cunit.h"
36 #include "spdk/thread.h"
37 #include "spdk/bdev_module.h"
38 #include "spdk/bdev_module.h"
39 
40 #include "common/lib/ut_multithread.c"
41 
42 #include "bdev/nvme/bdev_nvme.c"
43 #include "bdev/nvme/common.c"
44 
45 #include "unit/lib/json_mock.c"
46 
47 static void *g_accel_p = (void *)0xdeadbeaf;
48 
49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
50 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
51 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
52 	     spdk_nvme_remove_cb remove_cb), NULL);
53 
54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
55 		enum spdk_nvme_transport_type trtype));
56 
57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
58 	    NULL);
59 
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 
62 DEFINE_STUB_V(spdk_nvme_ctrlr_get_default_ctrlr_opts, (struct spdk_nvme_ctrlr_opts *opts,
63 		size_t opts_size));
64 
65 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
66 		struct spdk_nvme_transport_id *trid), 0);
67 
68 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
69 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
70 
71 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
72 
73 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
74 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
75 
76 struct spdk_io_channel *
77 spdk_accel_engine_get_io_channel(void)
78 {
79 	return spdk_get_io_channel(g_accel_p);
80 }
81 
82 void
83 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
84 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
85 {
86 	/* Avoid warning that opts is used uninitialised */
87 	memset(opts, 0, opts_size);
88 }
89 
90 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
91 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
92 
93 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
94 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
95 
96 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
97 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
98 
99 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
100 		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
101 
102 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
103 
104 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
105 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
106 
107 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
108 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
109 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
110 
111 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
112 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
113 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
114 
115 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
116 
117 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
118 
119 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
120 
121 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
122 
123 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
124 
125 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
126 
127 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
128 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
129 
130 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
131 
132 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi,
133 	    (const struct spdk_nvme_ns *ns), 0);
134 
135 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
136 		char *name, size_t *size), 0);
137 
138 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
139 	    (struct spdk_nvme_ns *ns), 0);
140 
141 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
142 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
143 
144 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
145 	    (struct spdk_nvme_ns *ns), 0);
146 
147 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
148 	    (struct spdk_nvme_ns *ns), 0);
149 
150 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
151 	    (struct spdk_nvme_ns *ns), 0);
152 
153 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
154 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
155 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
156 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
157 
158 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
159 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
160 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
161 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
162 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
163 
164 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
165 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
166 	     void *payload, uint32_t payload_size, uint64_t slba,
167 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
168 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
169 
170 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
171 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
172 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
173 
174 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
175 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
176 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
177 
178 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
179 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
180 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
181 
182 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
183 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
184 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
185 
186 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
187 
188 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
189 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
190 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
191 
192 DEFINE_STUB_V(spdk_bdev_module_finish_done, (void));
193 
194 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
195 
196 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
197 
198 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
199 
200 DEFINE_STUB_V(bdev_ocssd_populate_namespace, (struct nvme_ctrlr *nvme_ctrlr,
201 		struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *ctx));
202 
203 DEFINE_STUB_V(bdev_ocssd_depopulate_namespace, (struct nvme_ns *nvme_ns));
204 
205 DEFINE_STUB_V(bdev_ocssd_namespace_config_json, (struct spdk_json_write_ctx *w,
206 		struct nvme_ns *nvme_ns));
207 
208 DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_ctrlr_channel *ioch), 0);
209 
210 DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_ctrlr_channel *ioch));
211 
212 DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_ctrlr *nvme_ctrlr), 0);
213 
214 DEFINE_STUB_V(bdev_ocssd_fini_ctrlr, (struct nvme_ctrlr *nvme_ctrlr));
215 
216 DEFINE_STUB_V(bdev_ocssd_handle_chunk_notification, (struct nvme_ctrlr *nvme_ctrlr));
217 
218 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
219 		struct iovec *iov,
220 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
221 
222 
223 struct ut_nvme_req {
224 	uint16_t			opc;
225 	spdk_nvme_cmd_cb		cb_fn;
226 	void				*cb_arg;
227 	struct spdk_nvme_cpl		cpl;
228 	TAILQ_ENTRY(ut_nvme_req)	tailq;
229 };
230 
231 struct spdk_nvme_ns {
232 	struct spdk_nvme_ctrlr		*ctrlr;
233 	uint32_t			id;
234 	bool				is_active;
235 	struct spdk_uuid		uuid;
236 	enum spdk_nvme_ana_state	ana_state;
237 };
238 
239 struct spdk_nvme_qpair {
240 	struct spdk_nvme_ctrlr		*ctrlr;
241 	bool				is_connected;
242 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
243 	uint32_t			num_outstanding_reqs;
244 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
245 	struct spdk_nvme_poll_group	*poll_group;
246 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
247 };
248 
249 struct spdk_nvme_ctrlr {
250 	uint32_t			num_ns;
251 	struct spdk_nvme_ns		*ns;
252 	struct spdk_nvme_ns_data	*nsdata;
253 	struct spdk_nvme_qpair		adminq;
254 	struct spdk_nvme_ctrlr_data	cdata;
255 	bool				attached;
256 	bool				is_failed;
257 	bool				fail_reset;
258 	struct spdk_nvme_transport_id	trid;
259 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
260 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
261 	struct spdk_nvme_ctrlr_opts	opts;
262 };
263 
264 struct spdk_nvme_poll_group {
265 	void				*ctx;
266 	struct spdk_nvme_accel_fn_table	accel_fn_table;
267 	TAILQ_HEAD(, spdk_nvme_qpair)	qpairs;
268 };
269 
270 struct spdk_nvme_probe_ctx {
271 	struct spdk_nvme_transport_id	trid;
272 	void				*cb_ctx;
273 	spdk_nvme_attach_cb		attach_cb;
274 	struct spdk_nvme_ctrlr		*init_ctrlr;
275 };
276 
277 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
278 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
279 			g_ut_attached_ctrlrs);
280 static int g_ut_attach_ctrlr_status;
281 static size_t g_ut_attach_bdev_count;
282 static int g_ut_register_bdev_status;
283 
284 static void
285 ut_init_trid(struct spdk_nvme_transport_id *trid)
286 {
287 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
288 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
289 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
290 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
291 }
292 
293 static void
294 ut_init_trid2(struct spdk_nvme_transport_id *trid)
295 {
296 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
297 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
298 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
299 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
300 }
301 
302 static void
303 ut_init_trid3(struct spdk_nvme_transport_id *trid)
304 {
305 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
306 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
307 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
308 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
309 }
310 
311 static int
312 cmp_int(int a, int b)
313 {
314 	return a - b;
315 }
316 
317 int
318 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
319 			       const struct spdk_nvme_transport_id *trid2)
320 {
321 	int cmp;
322 
323 	/* We assume trtype is TCP for now. */
324 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
325 
326 	cmp = cmp_int(trid1->trtype, trid2->trtype);
327 	if (cmp) {
328 		return cmp;
329 	}
330 
331 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
332 	if (cmp) {
333 		return cmp;
334 	}
335 
336 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
337 	if (cmp) {
338 		return cmp;
339 	}
340 
341 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
342 	if (cmp) {
343 		return cmp;
344 	}
345 
346 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
347 	if (cmp) {
348 		return cmp;
349 	}
350 
351 	return 0;
352 }
353 
354 static struct spdk_nvme_ctrlr *
355 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
356 		bool ana_reporting)
357 {
358 	struct spdk_nvme_ctrlr *ctrlr;
359 	uint32_t i;
360 
361 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
362 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
363 			/* There is a ctrlr whose trid matches. */
364 			return NULL;
365 		}
366 	}
367 
368 	ctrlr = calloc(1, sizeof(*ctrlr));
369 	if (ctrlr == NULL) {
370 		return NULL;
371 	}
372 
373 	ctrlr->attached = true;
374 	ctrlr->adminq.ctrlr = ctrlr;
375 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
376 
377 	if (num_ns != 0) {
378 		ctrlr->num_ns = num_ns;
379 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
380 		if (ctrlr->ns == NULL) {
381 			free(ctrlr);
382 			return NULL;
383 		}
384 
385 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
386 		if (ctrlr->nsdata == NULL) {
387 			free(ctrlr->ns);
388 			free(ctrlr);
389 			return NULL;
390 		}
391 
392 		for (i = 0; i < num_ns; i++) {
393 			ctrlr->ns[i].id = i + 1;
394 			ctrlr->ns[i].ctrlr = ctrlr;
395 			ctrlr->ns[i].is_active = true;
396 			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
397 			ctrlr->nsdata[i].nsze = 1024;
398 		}
399 
400 		ctrlr->cdata.nn = num_ns;
401 		ctrlr->cdata.nanagrpid = num_ns;
402 	}
403 
404 	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
405 	ctrlr->trid = *trid;
406 	TAILQ_INIT(&ctrlr->active_io_qpairs);
407 
408 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
409 
410 	return ctrlr;
411 }
412 
413 static void
414 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
415 {
416 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
417 
418 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
419 	free(ctrlr->nsdata);
420 	free(ctrlr->ns);
421 	free(ctrlr);
422 }
423 
424 static int
425 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
426 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
427 {
428 	struct ut_nvme_req *req;
429 
430 	req = calloc(1, sizeof(*req));
431 	if (req == NULL) {
432 		return -ENOMEM;
433 	}
434 
435 	req->opc = opc;
436 	req->cb_fn = cb_fn;
437 	req->cb_arg = cb_arg;
438 
439 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
440 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
441 
442 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
443 	qpair->num_outstanding_reqs++;
444 
445 	return 0;
446 }
447 
448 static struct spdk_bdev_io *
449 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
450 		 struct spdk_io_channel *ch)
451 {
452 	struct spdk_bdev_io *bdev_io;
453 
454 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
455 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
456 	bdev_io->type = type;
457 	bdev_io->bdev = &nbdev->disk;
458 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
459 
460 	return bdev_io;
461 }
462 
463 static void
464 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
465 {
466 	bdev_io->u.bdev.iovs = &bdev_io->iov;
467 	bdev_io->u.bdev.iovcnt = 1;
468 
469 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
470 	bdev_io->iov.iov_len = 4096;
471 }
472 
473 static void
474 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
475 {
476 	if (ctrlr->is_failed) {
477 		free(ctrlr);
478 		return;
479 	}
480 
481 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
482 
483 	if (probe_ctx->attach_cb) {
484 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
485 	}
486 }
487 
488 int
489 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
490 {
491 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
492 
493 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
494 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
495 			continue;
496 		}
497 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
498 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
499 	}
500 
501 	free(probe_ctx);
502 
503 	return 0;
504 }
505 
506 struct spdk_nvme_probe_ctx *
507 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
508 			const struct spdk_nvme_ctrlr_opts *opts,
509 			spdk_nvme_attach_cb attach_cb)
510 {
511 	struct spdk_nvme_probe_ctx *probe_ctx;
512 
513 	if (trid == NULL) {
514 		return NULL;
515 	}
516 
517 	probe_ctx = calloc(1, sizeof(*probe_ctx));
518 	if (probe_ctx == NULL) {
519 		return NULL;
520 	}
521 
522 	probe_ctx->trid = *trid;
523 	probe_ctx->cb_ctx = (void *)opts;
524 	probe_ctx->attach_cb = attach_cb;
525 
526 	return probe_ctx;
527 }
528 
529 int
530 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
531 {
532 	if (ctrlr->attached) {
533 		ut_detach_ctrlr(ctrlr);
534 	}
535 
536 	return 0;
537 }
538 
539 const struct spdk_nvme_ctrlr_data *
540 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
541 {
542 	return &ctrlr->cdata;
543 }
544 
545 uint32_t
546 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
547 {
548 	return ctrlr->num_ns;
549 }
550 
551 struct spdk_nvme_ns *
552 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
553 {
554 	if (nsid < 1 || nsid > ctrlr->num_ns) {
555 		return NULL;
556 	}
557 
558 	return &ctrlr->ns[nsid - 1];
559 }
560 
561 bool
562 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
563 {
564 	if (nsid < 1 || nsid > ctrlr->num_ns) {
565 		return false;
566 	}
567 
568 	return ctrlr->ns[nsid - 1].is_active;
569 }
570 
571 union spdk_nvme_csts_register
572 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
573 {
574 	union spdk_nvme_csts_register csts;
575 
576 	csts.raw = 0;
577 
578 	return csts;
579 }
580 
581 union spdk_nvme_vs_register
582 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
583 {
584 	union spdk_nvme_vs_register vs;
585 
586 	vs.raw = 0;
587 
588 	return vs;
589 }
590 
591 struct spdk_nvme_qpair *
592 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
593 			       const struct spdk_nvme_io_qpair_opts *user_opts,
594 			       size_t opts_size)
595 {
596 	struct spdk_nvme_qpair *qpair;
597 
598 	qpair = calloc(1, sizeof(*qpair));
599 	if (qpair == NULL) {
600 		return NULL;
601 	}
602 
603 	qpair->ctrlr = ctrlr;
604 	TAILQ_INIT(&qpair->outstanding_reqs);
605 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
606 
607 	return qpair;
608 }
609 
610 int
611 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
612 				 struct spdk_nvme_qpair *qpair)
613 {
614 	if (qpair->is_connected) {
615 		return -EISCONN;
616 	}
617 
618 	qpair->is_connected = true;
619 
620 	return 0;
621 }
622 
623 int
624 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
625 {
626 	struct spdk_nvme_ctrlr *ctrlr;
627 
628 	ctrlr = qpair->ctrlr;
629 
630 	if (ctrlr->is_failed) {
631 		return -ENXIO;
632 	}
633 	qpair->is_connected = true;
634 
635 	return 0;
636 }
637 
638 void
639 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
640 {
641 	qpair->is_connected = false;
642 }
643 
644 int
645 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
646 {
647 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
648 
649 	qpair->is_connected = false;
650 
651 	if (qpair->poll_group != NULL) {
652 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
653 	}
654 
655 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
656 
657 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
658 
659 	free(qpair);
660 
661 	return 0;
662 }
663 
664 int
665 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
666 {
667 	if (ctrlr->fail_reset) {
668 		return -EIO;
669 	}
670 
671 	ctrlr->is_failed = false;
672 
673 	return 0;
674 }
675 
676 void
677 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
678 {
679 	ctrlr->is_failed = true;
680 }
681 
682 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
683 				 sizeof(uint32_t))
684 static void
685 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
686 {
687 	struct spdk_nvme_ana_page ana_hdr;
688 	char _ana_desc[UT_ANA_DESC_SIZE];
689 	struct spdk_nvme_ana_group_descriptor *ana_desc;
690 	struct spdk_nvme_ns *ns;
691 	uint32_t i;
692 
693 	memset(&ana_hdr, 0, sizeof(ana_hdr));
694 	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
695 
696 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
697 	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
698 
699 	buf += sizeof(ana_hdr);
700 	length -= sizeof(ana_hdr);
701 
702 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
703 
704 	for (i = 0; i < ctrlr->num_ns; i++) {
705 		ns = &ctrlr->ns[i];
706 
707 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
708 
709 		ana_desc->ana_group_id = ns->id;
710 		ana_desc->num_of_nsid = 1;
711 		ana_desc->ana_state = ns->ana_state;
712 		ana_desc->nsid[0] = ns->id;
713 
714 		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
715 		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
716 
717 		buf += UT_ANA_DESC_SIZE;
718 		length -= UT_ANA_DESC_SIZE;
719 	}
720 }
721 
722 int
723 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
724 				 uint8_t log_page, uint32_t nsid,
725 				 void *payload, uint32_t payload_size,
726 				 uint64_t offset,
727 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
728 {
729 	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
730 		SPDK_CU_ASSERT_FATAL(offset == 0);
731 		ut_create_ana_log_page(ctrlr, payload, payload_size);
732 	}
733 
734 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
735 				      cb_fn, cb_arg);
736 }
737 
738 int
739 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
740 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
741 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
742 {
743 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
744 }
745 
746 int
747 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
748 			      void *cmd_cb_arg,
749 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
750 {
751 	struct ut_nvme_req *req = NULL, *abort_req;
752 
753 	if (qpair == NULL) {
754 		qpair = &ctrlr->adminq;
755 	}
756 
757 	abort_req = calloc(1, sizeof(*abort_req));
758 	if (abort_req == NULL) {
759 		return -ENOMEM;
760 	}
761 
762 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
763 		if (req->cb_arg == cmd_cb_arg) {
764 			break;
765 		}
766 	}
767 
768 	if (req == NULL) {
769 		free(abort_req);
770 		return -ENOENT;
771 	}
772 
773 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
774 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
775 
776 	abort_req->opc = SPDK_NVME_OPC_ABORT;
777 	abort_req->cb_fn = cb_fn;
778 	abort_req->cb_arg = cb_arg;
779 
780 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
781 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
782 	abort_req->cpl.cdw0 = 0;
783 
784 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
785 	ctrlr->adminq.num_outstanding_reqs++;
786 
787 	return 0;
788 }
789 
790 int32_t
791 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
792 {
793 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
794 }
795 
796 uint32_t
797 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
798 {
799 	return ns->id;
800 }
801 
802 struct spdk_nvme_ctrlr *
803 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
804 {
805 	return ns->ctrlr;
806 }
807 
808 static inline struct spdk_nvme_ns_data *
809 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
810 {
811 	return &ns->ctrlr->nsdata[ns->id - 1];
812 }
813 
814 const struct spdk_nvme_ns_data *
815 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
816 {
817 	return _nvme_ns_get_data(ns);
818 }
819 
820 uint64_t
821 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
822 {
823 	return _nvme_ns_get_data(ns)->nsze;
824 }
825 
826 const struct spdk_uuid *
827 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
828 {
829 	return &ns->uuid;
830 }
831 
832 int
833 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
834 			      void *metadata, uint64_t lba, uint32_t lba_count,
835 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
836 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
837 {
838 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
839 }
840 
841 int
842 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
843 			       void *buffer, void *metadata, uint64_t lba,
844 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
845 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
846 {
847 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
848 }
849 
850 int
851 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
852 			       uint64_t lba, uint32_t lba_count,
853 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
854 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
855 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
856 			       uint16_t apptag_mask, uint16_t apptag)
857 {
858 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
859 }
860 
861 int
862 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
863 				uint64_t lba, uint32_t lba_count,
864 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
865 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
866 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
867 				uint16_t apptag_mask, uint16_t apptag)
868 {
869 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
870 }
871 
872 int
873 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
874 				  uint64_t lba, uint32_t lba_count,
875 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
876 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
877 				  spdk_nvme_req_next_sge_cb next_sge_fn,
878 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
879 {
880 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
881 }
882 
883 int
884 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
885 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
886 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
887 {
888 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
889 }
890 
891 int
892 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
893 			      uint64_t lba, uint32_t lba_count,
894 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
895 			      uint32_t io_flags)
896 {
897 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
898 }
899 
900 struct spdk_nvme_poll_group *
901 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
902 {
903 	struct spdk_nvme_poll_group *group;
904 
905 	group = calloc(1, sizeof(*group));
906 	if (group == NULL) {
907 		return NULL;
908 	}
909 
910 	group->ctx = ctx;
911 	if (table != NULL) {
912 		group->accel_fn_table = *table;
913 	}
914 	TAILQ_INIT(&group->qpairs);
915 
916 	return group;
917 }
918 
919 int
920 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
921 {
922 	if (!TAILQ_EMPTY(&group->qpairs)) {
923 		return -EBUSY;
924 	}
925 
926 	free(group);
927 
928 	return 0;
929 }
930 
931 int32_t
932 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
933 				    uint32_t max_completions)
934 {
935 	struct ut_nvme_req *req, *tmp;
936 	uint32_t num_completions = 0;
937 
938 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
939 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
940 		qpair->num_outstanding_reqs--;
941 
942 		req->cb_fn(req->cb_arg, &req->cpl);
943 
944 		free(req);
945 		num_completions++;
946 	}
947 
948 	return num_completions;
949 }
950 
951 int64_t
952 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
953 		uint32_t completions_per_qpair,
954 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
955 {
956 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
957 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
958 
959 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
960 
961 	if (disconnected_qpair_cb == NULL) {
962 		return -EINVAL;
963 	}
964 
965 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
966 		if (qpair->is_connected) {
967 			local_completions = spdk_nvme_qpair_process_completions(qpair,
968 					    completions_per_qpair);
969 			if (local_completions < 0 && error_reason == 0) {
970 				error_reason = local_completions;
971 			} else {
972 				num_completions += local_completions;
973 				assert(num_completions >= 0);
974 			}
975 		}
976 	}
977 
978 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
979 		if (!qpair->is_connected) {
980 			disconnected_qpair_cb(qpair, group->ctx);
981 		}
982 	}
983 
984 	return error_reason ? error_reason : num_completions;
985 }
986 
987 int
988 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
989 			 struct spdk_nvme_qpair *qpair)
990 {
991 	CU_ASSERT(!qpair->is_connected);
992 
993 	qpair->poll_group = group;
994 	TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq);
995 
996 	return 0;
997 }
998 
999 int
1000 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1001 			    struct spdk_nvme_qpair *qpair)
1002 {
1003 	CU_ASSERT(!qpair->is_connected);
1004 
1005 	TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq);
1006 
1007 	return 0;
1008 }
1009 
1010 int
1011 spdk_bdev_register(struct spdk_bdev *bdev)
1012 {
1013 	return g_ut_register_bdev_status;
1014 }
1015 
1016 void
1017 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1018 {
1019 	int rc;
1020 
1021 	rc = bdev->fn_table->destruct(bdev->ctxt);
1022 	if (rc <= 0 && cb_fn != NULL) {
1023 		cb_fn(cb_arg, rc);
1024 	}
1025 }
1026 
1027 int
1028 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1029 {
1030 	bdev->blockcnt = size;
1031 
1032 	return 0;
1033 }
1034 
1035 struct spdk_io_channel *
1036 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1037 {
1038 	return (struct spdk_io_channel *)bdev_io->internal.ch;
1039 }
1040 
1041 void
1042 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1043 {
1044 	bdev_io->internal.status = status;
1045 	bdev_io->internal.in_submit_request = false;
1046 }
1047 
1048 void
1049 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1050 {
1051 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1052 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1053 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1054 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1055 	} else {
1056 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1057 	}
1058 
1059 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1060 	bdev_io->internal.error.nvme.sct = sct;
1061 	bdev_io->internal.error.nvme.sc = sc;
1062 
1063 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1064 }
1065 
1066 void
1067 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1068 {
1069 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1070 
1071 	ut_bdev_io_set_buf(bdev_io);
1072 
1073 	cb(ch, bdev_io, true);
1074 }
1075 
1076 static void
1077 test_create_ctrlr(void)
1078 {
1079 	struct spdk_nvme_transport_id trid = {};
1080 	struct spdk_nvme_ctrlr ctrlr = {};
1081 	int rc;
1082 
1083 	ut_init_trid(&trid);
1084 
1085 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1086 	CU_ASSERT(rc == 0);
1087 
1088 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1089 
1090 	rc = bdev_nvme_delete("nvme0", NULL);
1091 	CU_ASSERT(rc == 0);
1092 
1093 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1094 
1095 	poll_threads();
1096 
1097 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1098 }
1099 
1100 static void
1101 test_reset_ctrlr(void)
1102 {
1103 	struct spdk_nvme_transport_id trid = {};
1104 	struct spdk_nvme_ctrlr ctrlr = {};
1105 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1106 	struct nvme_ctrlr_trid *curr_trid;
1107 	struct spdk_io_channel *ch1, *ch2;
1108 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1109 	int rc;
1110 
1111 	ut_init_trid(&trid);
1112 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1113 
1114 	set_thread(0);
1115 
1116 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1117 	CU_ASSERT(rc == 0);
1118 
1119 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1120 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1121 
1122 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1123 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1124 
1125 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1126 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1127 
1128 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1129 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1130 
1131 	set_thread(1);
1132 
1133 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1134 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1135 
1136 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1137 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1138 
1139 	/* Reset starts from thread 1. */
1140 	set_thread(1);
1141 
1142 	/* Case 1: ctrlr is already being destructed. */
1143 	nvme_ctrlr->destruct = true;
1144 
1145 	rc = bdev_nvme_reset(nvme_ctrlr);
1146 	CU_ASSERT(rc == -EBUSY);
1147 
1148 	/* Case 2: reset is in progress. */
1149 	nvme_ctrlr->destruct = false;
1150 	nvme_ctrlr->resetting = true;
1151 
1152 	rc = bdev_nvme_reset(nvme_ctrlr);
1153 	CU_ASSERT(rc == -EAGAIN);
1154 
1155 	/* Case 3: reset completes successfully. */
1156 	nvme_ctrlr->resetting = false;
1157 	curr_trid->is_failed = true;
1158 	ctrlr.is_failed = true;
1159 
1160 	rc = bdev_nvme_reset(nvme_ctrlr);
1161 	CU_ASSERT(rc == 0);
1162 	CU_ASSERT(nvme_ctrlr->resetting == true);
1163 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1164 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1165 
1166 	poll_thread_times(0, 1);
1167 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1168 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1169 
1170 	poll_thread_times(1, 1);
1171 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1172 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1173 	CU_ASSERT(ctrlr.is_failed == true);
1174 
1175 	poll_thread_times(1, 1);
1176 	CU_ASSERT(ctrlr.is_failed == false);
1177 
1178 	poll_thread_times(0, 1);
1179 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1180 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1181 
1182 	poll_thread_times(1, 1);
1183 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1184 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1185 	CU_ASSERT(nvme_ctrlr->resetting == true);
1186 	CU_ASSERT(curr_trid->is_failed == true);
1187 
1188 	poll_thread_times(1, 1);
1189 	CU_ASSERT(nvme_ctrlr->resetting == false);
1190 	CU_ASSERT(curr_trid->is_failed == false);
1191 
1192 	spdk_put_io_channel(ch2);
1193 
1194 	set_thread(0);
1195 
1196 	spdk_put_io_channel(ch1);
1197 
1198 	poll_threads();
1199 
1200 	rc = bdev_nvme_delete("nvme0", NULL);
1201 	CU_ASSERT(rc == 0);
1202 
1203 	poll_threads();
1204 
1205 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1206 }
1207 
1208 static void
1209 test_race_between_reset_and_destruct_ctrlr(void)
1210 {
1211 	struct spdk_nvme_transport_id trid = {};
1212 	struct spdk_nvme_ctrlr ctrlr = {};
1213 	struct nvme_ctrlr *nvme_ctrlr;
1214 	struct spdk_io_channel *ch1, *ch2;
1215 	int rc;
1216 
1217 	ut_init_trid(&trid);
1218 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1219 
1220 	set_thread(0);
1221 
1222 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1223 	CU_ASSERT(rc == 0);
1224 
1225 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1226 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1227 
1228 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1229 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1230 
1231 	set_thread(1);
1232 
1233 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1234 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1235 
1236 	/* Reset starts from thread 1. */
1237 	set_thread(1);
1238 
1239 	rc = bdev_nvme_reset(nvme_ctrlr);
1240 	CU_ASSERT(rc == 0);
1241 	CU_ASSERT(nvme_ctrlr->resetting == true);
1242 
1243 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1244 	set_thread(0);
1245 
1246 	rc = bdev_nvme_delete("nvme0", NULL);
1247 	CU_ASSERT(rc == 0);
1248 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1249 	CU_ASSERT(nvme_ctrlr->destruct == true);
1250 	CU_ASSERT(nvme_ctrlr->resetting == true);
1251 
1252 	poll_threads();
1253 
1254 	/* Reset completed but ctrlr is not still destructed yet. */
1255 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1256 	CU_ASSERT(nvme_ctrlr->destruct == true);
1257 	CU_ASSERT(nvme_ctrlr->resetting == false);
1258 
1259 	/* New reset request is rejected. */
1260 	rc = bdev_nvme_reset(nvme_ctrlr);
1261 	CU_ASSERT(rc == -EBUSY);
1262 
1263 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1264 	 * However there are two channels and destruct is not completed yet.
1265 	 */
1266 	poll_threads();
1267 
1268 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1269 
1270 	set_thread(0);
1271 
1272 	spdk_put_io_channel(ch1);
1273 
1274 	set_thread(1);
1275 
1276 	spdk_put_io_channel(ch2);
1277 
1278 	poll_threads();
1279 
1280 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1281 }
1282 
1283 static void
1284 test_failover_ctrlr(void)
1285 {
1286 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1287 	struct spdk_nvme_ctrlr ctrlr = {};
1288 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1289 	struct nvme_ctrlr_trid *curr_trid, *next_trid;
1290 	struct spdk_io_channel *ch1, *ch2;
1291 	int rc;
1292 
1293 	ut_init_trid(&trid1);
1294 	ut_init_trid2(&trid2);
1295 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1296 
1297 	set_thread(0);
1298 
1299 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
1300 	CU_ASSERT(rc == 0);
1301 
1302 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1303 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1304 
1305 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1306 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1307 
1308 	set_thread(1);
1309 
1310 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1311 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1312 
1313 	/* First, test one trid case. */
1314 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1315 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1316 
1317 	/* Failover starts from thread 1. */
1318 	set_thread(1);
1319 
1320 	/* Case 1: ctrlr is already being destructed. */
1321 	nvme_ctrlr->destruct = true;
1322 
1323 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1324 	CU_ASSERT(rc == 0);
1325 	CU_ASSERT(curr_trid->is_failed == false);
1326 
1327 	/* Case 2: reset is in progress. */
1328 	nvme_ctrlr->destruct = false;
1329 	nvme_ctrlr->resetting = true;
1330 
1331 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1332 	CU_ASSERT(rc == 0);
1333 
1334 	/* Case 3: failover is in progress. */
1335 	nvme_ctrlr->failover_in_progress = true;
1336 
1337 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1338 	CU_ASSERT(rc == 0);
1339 	CU_ASSERT(curr_trid->is_failed == false);
1340 
1341 	/* Case 4: reset completes successfully. */
1342 	nvme_ctrlr->resetting = false;
1343 	nvme_ctrlr->failover_in_progress = false;
1344 
1345 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1346 	CU_ASSERT(rc == 0);
1347 
1348 	CU_ASSERT(nvme_ctrlr->resetting == true);
1349 	CU_ASSERT(curr_trid->is_failed == true);
1350 
1351 	poll_threads();
1352 
1353 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1354 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1355 
1356 	CU_ASSERT(nvme_ctrlr->resetting == false);
1357 	CU_ASSERT(curr_trid->is_failed == false);
1358 
1359 	set_thread(0);
1360 
1361 	/* Second, test two trids case. */
1362 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1363 	CU_ASSERT(rc == 0);
1364 
1365 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1366 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1367 	CU_ASSERT(&curr_trid->trid == nvme_ctrlr->connected_trid);
1368 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1369 
1370 	/* Failover starts from thread 1. */
1371 	set_thread(1);
1372 
1373 	/* Case 5: reset is in progress. */
1374 	nvme_ctrlr->resetting = true;
1375 
1376 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1377 	CU_ASSERT(rc == -EAGAIN);
1378 
1379 	/* Case 5: failover is in progress. */
1380 	nvme_ctrlr->failover_in_progress = true;
1381 
1382 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1383 	CU_ASSERT(rc == 0);
1384 
1385 	/* Case 6: failover completes successfully. */
1386 	nvme_ctrlr->resetting = false;
1387 	nvme_ctrlr->failover_in_progress = false;
1388 
1389 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1390 	CU_ASSERT(rc == 0);
1391 
1392 	CU_ASSERT(nvme_ctrlr->resetting == true);
1393 	CU_ASSERT(nvme_ctrlr->failover_in_progress == true);
1394 
1395 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1396 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1397 	CU_ASSERT(next_trid != curr_trid);
1398 	CU_ASSERT(&next_trid->trid == nvme_ctrlr->connected_trid);
1399 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1400 
1401 	poll_threads();
1402 
1403 	CU_ASSERT(nvme_ctrlr->resetting == false);
1404 	CU_ASSERT(nvme_ctrlr->failover_in_progress == false);
1405 
1406 	spdk_put_io_channel(ch2);
1407 
1408 	set_thread(0);
1409 
1410 	spdk_put_io_channel(ch1);
1411 
1412 	poll_threads();
1413 
1414 	rc = bdev_nvme_delete("nvme0", NULL);
1415 	CU_ASSERT(rc == 0);
1416 
1417 	poll_threads();
1418 
1419 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1420 }
1421 
1422 static void
1423 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1424 {
1425 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1426 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1427 }
1428 
1429 static void
1430 test_pending_reset(void)
1431 {
1432 	struct spdk_nvme_transport_id trid = {};
1433 	struct spdk_nvme_host_id hostid = {};
1434 	struct spdk_nvme_ctrlr *ctrlr;
1435 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1436 	const int STRING_SIZE = 32;
1437 	const char *attached_names[STRING_SIZE];
1438 	struct nvme_bdev *bdev;
1439 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1440 	struct spdk_io_channel *ch1, *ch2;
1441 	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1442 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1443 	int rc;
1444 
1445 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1446 	ut_init_trid(&trid);
1447 
1448 	set_thread(0);
1449 
1450 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1451 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1452 
1453 	g_ut_attach_ctrlr_status = 0;
1454 	g_ut_attach_bdev_count = 1;
1455 
1456 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1457 			      attach_ctrlr_done, NULL, NULL);
1458 	CU_ASSERT(rc == 0);
1459 
1460 	spdk_delay_us(1000);
1461 	poll_threads();
1462 
1463 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1464 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1465 
1466 	bdev = nvme_ctrlr->namespaces[0]->bdev;
1467 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1468 
1469 	ch1 = spdk_get_io_channel(bdev);
1470 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1471 
1472 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1473 	ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
1474 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1475 
1476 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1477 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1478 
1479 	set_thread(1);
1480 
1481 	ch2 = spdk_get_io_channel(bdev);
1482 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1483 
1484 	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1485 	ctrlr_ch2 = nbdev_ch2->ctrlr_ch;
1486 	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1487 
1488 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1489 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1490 
1491 	/* The first reset request is submitted on thread 1, and the second reset request
1492 	 * is submitted on thread 0 while processing the first request.
1493 	 */
1494 	bdev_nvme_submit_request(ch2, first_bdev_io);
1495 	CU_ASSERT(nvme_ctrlr->resetting == true);
1496 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1497 
1498 	set_thread(0);
1499 
1500 	bdev_nvme_submit_request(ch1, second_bdev_io);
1501 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1502 
1503 	poll_threads();
1504 
1505 	CU_ASSERT(nvme_ctrlr->resetting == false);
1506 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1507 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1508 
1509 	/* The first reset request is submitted on thread 1, and the second reset request
1510 	 * is submitted on thread 0 while processing the first request.
1511 	 *
1512 	 * The difference from the above scenario is that the controller is removed while
1513 	 * processing the first request. Hence both reset requests should fail.
1514 	 */
1515 	set_thread(1);
1516 
1517 	bdev_nvme_submit_request(ch2, first_bdev_io);
1518 	CU_ASSERT(nvme_ctrlr->resetting == true);
1519 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1520 
1521 	set_thread(0);
1522 
1523 	bdev_nvme_submit_request(ch1, second_bdev_io);
1524 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1525 
1526 	ctrlr->fail_reset = true;
1527 
1528 	poll_threads();
1529 
1530 	CU_ASSERT(nvme_ctrlr->resetting == false);
1531 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1532 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1533 
1534 	spdk_put_io_channel(ch1);
1535 
1536 	set_thread(1);
1537 
1538 	spdk_put_io_channel(ch2);
1539 
1540 	poll_threads();
1541 
1542 	set_thread(0);
1543 
1544 	rc = bdev_nvme_delete("nvme0", NULL);
1545 	CU_ASSERT(rc == 0);
1546 
1547 	poll_threads();
1548 
1549 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1550 
1551 	free(first_bdev_io);
1552 	free(second_bdev_io);
1553 }
1554 
1555 static void
1556 test_attach_ctrlr(void)
1557 {
1558 	struct spdk_nvme_transport_id trid = {};
1559 	struct spdk_nvme_host_id hostid = {};
1560 	struct spdk_nvme_ctrlr *ctrlr;
1561 	struct nvme_ctrlr *nvme_ctrlr;
1562 	const int STRING_SIZE = 32;
1563 	const char *attached_names[STRING_SIZE];
1564 	struct nvme_bdev *nbdev;
1565 	int rc;
1566 
1567 	set_thread(0);
1568 
1569 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1570 	ut_init_trid(&trid);
1571 
1572 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1573 	 * by probe polling.
1574 	 */
1575 	ctrlr = ut_attach_ctrlr(&trid, 0, false);
1576 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1577 
1578 	ctrlr->is_failed = true;
1579 	g_ut_attach_ctrlr_status = -EIO;
1580 	g_ut_attach_bdev_count = 0;
1581 
1582 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1583 			      attach_ctrlr_done, NULL, NULL);
1584 	CU_ASSERT(rc == 0);
1585 
1586 	spdk_delay_us(1000);
1587 	poll_threads();
1588 
1589 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1590 
1591 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1592 	ctrlr = ut_attach_ctrlr(&trid, 0, false);
1593 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1594 
1595 	g_ut_attach_ctrlr_status = 0;
1596 
1597 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1598 			      attach_ctrlr_done, NULL, NULL);
1599 	CU_ASSERT(rc == 0);
1600 
1601 	spdk_delay_us(1000);
1602 	poll_threads();
1603 
1604 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1605 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1606 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1607 	CU_ASSERT(nvme_ctrlr->num_ns == 0);
1608 
1609 	rc = bdev_nvme_delete("nvme0", NULL);
1610 	CU_ASSERT(rc == 0);
1611 
1612 	poll_threads();
1613 
1614 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1615 
1616 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1617 	 * one nvme_bdev is created.
1618 	 */
1619 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1620 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1621 
1622 	g_ut_attach_bdev_count = 1;
1623 
1624 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1625 			      attach_ctrlr_done, NULL, NULL);
1626 	CU_ASSERT(rc == 0);
1627 
1628 	spdk_delay_us(1000);
1629 	poll_threads();
1630 
1631 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1632 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1633 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1634 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1635 
1636 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1637 	attached_names[0] = NULL;
1638 
1639 	nbdev = nvme_ctrlr->namespaces[0]->bdev;
1640 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1641 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1642 
1643 	rc = bdev_nvme_delete("nvme0", NULL);
1644 	CU_ASSERT(rc == 0);
1645 
1646 	poll_threads();
1647 
1648 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1649 
1650 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1651 	 * created because creating one nvme_bdev failed.
1652 	 */
1653 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1654 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1655 
1656 	g_ut_register_bdev_status = -EINVAL;
1657 	g_ut_attach_bdev_count = 0;
1658 
1659 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1660 			      attach_ctrlr_done, NULL, NULL);
1661 	CU_ASSERT(rc == 0);
1662 
1663 	spdk_delay_us(1000);
1664 	poll_threads();
1665 
1666 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1667 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1668 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1669 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1670 
1671 	CU_ASSERT(attached_names[0] == NULL);
1672 
1673 	rc = bdev_nvme_delete("nvme0", NULL);
1674 	CU_ASSERT(rc == 0);
1675 
1676 	poll_threads();
1677 
1678 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1679 
1680 	g_ut_register_bdev_status = 0;
1681 }
1682 
1683 static void
1684 test_reconnect_qpair(void)
1685 {
1686 	struct spdk_nvme_transport_id trid = {};
1687 	struct spdk_nvme_ctrlr ctrlr = {};
1688 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1689 	struct spdk_io_channel *ch;
1690 	struct nvme_ctrlr_channel *ctrlr_ch;
1691 	int rc;
1692 
1693 	set_thread(0);
1694 
1695 	ut_init_trid(&trid);
1696 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1697 
1698 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1699 	CU_ASSERT(rc == 0);
1700 
1701 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1702 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1703 
1704 	ch = spdk_get_io_channel(nvme_ctrlr);
1705 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1706 
1707 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
1708 	CU_ASSERT(ctrlr_ch->qpair != NULL);
1709 	CU_ASSERT(ctrlr_ch->group != NULL);
1710 	CU_ASSERT(ctrlr_ch->group->group != NULL);
1711 	CU_ASSERT(ctrlr_ch->group->poller != NULL);
1712 
1713 	/* Test if the disconnected qpair is reconnected. */
1714 	ctrlr_ch->qpair->is_connected = false;
1715 
1716 	poll_threads();
1717 
1718 	CU_ASSERT(ctrlr_ch->qpair->is_connected == true);
1719 
1720 	/* If the ctrlr is failed, reconnecting qpair should fail too. */
1721 	ctrlr_ch->qpair->is_connected = false;
1722 	ctrlr.is_failed = true;
1723 
1724 	poll_threads();
1725 
1726 	CU_ASSERT(ctrlr_ch->qpair->is_connected == false);
1727 
1728 	spdk_put_io_channel(ch);
1729 
1730 	poll_threads();
1731 
1732 	rc = bdev_nvme_delete("nvme0", NULL);
1733 	CU_ASSERT(rc == 0);
1734 
1735 	poll_threads();
1736 
1737 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1738 }
1739 
1740 static void
1741 test_aer_cb(void)
1742 {
1743 	struct spdk_nvme_transport_id trid = {};
1744 	struct spdk_nvme_host_id hostid = {};
1745 	struct spdk_nvme_ctrlr *ctrlr;
1746 	struct nvme_ctrlr *nvme_ctrlr;
1747 	struct nvme_bdev *bdev;
1748 	const int STRING_SIZE = 32;
1749 	const char *attached_names[STRING_SIZE];
1750 	union spdk_nvme_async_event_completion event = {};
1751 	struct spdk_nvme_cpl cpl = {};
1752 	int rc;
1753 
1754 	set_thread(0);
1755 
1756 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1757 	ut_init_trid(&trid);
1758 
1759 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
1760 	 * namespaces are populated.
1761 	 */
1762 	ctrlr = ut_attach_ctrlr(&trid, 4, true);
1763 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1764 
1765 	ctrlr->ns[0].is_active = false;
1766 
1767 	g_ut_attach_ctrlr_status = 0;
1768 	g_ut_attach_bdev_count = 3;
1769 
1770 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1771 			      attach_ctrlr_done, NULL, NULL);
1772 	CU_ASSERT(rc == 0);
1773 
1774 	spdk_delay_us(1000);
1775 	poll_threads();
1776 
1777 	spdk_delay_us(10000);
1778 	poll_threads();
1779 
1780 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1781 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1782 
1783 	CU_ASSERT(nvme_ctrlr->num_ns == 4);
1784 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == false);
1785 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
1786 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == true);
1787 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
1788 
1789 	bdev = nvme_ctrlr->namespaces[3]->bdev;
1790 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1791 	CU_ASSERT(bdev->disk.blockcnt == 1024);
1792 
1793 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
1794 	 * change the size of the 4th namespace.
1795 	 */
1796 	ctrlr->ns[0].is_active = true;
1797 	ctrlr->ns[2].is_active = false;
1798 	ctrlr->nsdata[3].nsze = 2048;
1799 
1800 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1801 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
1802 	cpl.cdw0 = event.raw;
1803 
1804 	aer_cb(nvme_ctrlr, &cpl);
1805 
1806 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == true);
1807 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
1808 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == false);
1809 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
1810 	CU_ASSERT(bdev->disk.blockcnt == 2048);
1811 
1812 	/* Change ANA state of active namespaces. */
1813 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
1814 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
1815 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
1816 
1817 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1818 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
1819 	cpl.cdw0 = event.raw;
1820 
1821 	aer_cb(nvme_ctrlr, &cpl);
1822 
1823 	spdk_delay_us(10000);
1824 	poll_threads();
1825 
1826 	CU_ASSERT(nvme_ctrlr->namespaces[0]->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
1827 	CU_ASSERT(nvme_ctrlr->namespaces[1]->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
1828 	CU_ASSERT(nvme_ctrlr->namespaces[3]->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
1829 
1830 	rc = bdev_nvme_delete("nvme0", NULL);
1831 	CU_ASSERT(rc == 0);
1832 
1833 	poll_threads();
1834 
1835 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1836 }
1837 
1838 static void
1839 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1840 			enum spdk_bdev_io_type io_type)
1841 {
1842 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1843 	struct spdk_nvme_ns *ns = NULL;
1844 	struct spdk_nvme_qpair *qpair = NULL;
1845 
1846 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1847 
1848 	bdev_io->type = io_type;
1849 	bdev_io->internal.in_submit_request = true;
1850 
1851 	bdev_nvme_submit_request(ch, bdev_io);
1852 
1853 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1854 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
1855 
1856 	poll_threads();
1857 
1858 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1859 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1860 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1861 }
1862 
1863 static void
1864 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1865 		   enum spdk_bdev_io_type io_type)
1866 {
1867 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1868 	struct spdk_nvme_ns *ns = NULL;
1869 	struct spdk_nvme_qpair *qpair = NULL;
1870 
1871 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1872 
1873 	bdev_io->type = io_type;
1874 	bdev_io->internal.in_submit_request = true;
1875 
1876 	bdev_nvme_submit_request(ch, bdev_io);
1877 
1878 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1879 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1880 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1881 }
1882 
1883 static void
1884 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
1885 {
1886 	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
1887 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
1888 	struct ut_nvme_req *req;
1889 	struct spdk_nvme_ns *ns = NULL;
1890 	struct spdk_nvme_qpair *qpair = NULL;
1891 
1892 	CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
1893 
1894 	/* Only compare and write now. */
1895 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
1896 	bdev_io->internal.in_submit_request = true;
1897 
1898 	bdev_nvme_submit_request(ch, bdev_io);
1899 
1900 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1901 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
1902 	CU_ASSERT(bio->first_fused_submitted == true);
1903 
1904 	/* First outstanding request is compare operation. */
1905 	req = TAILQ_FIRST(&qpair->outstanding_reqs);
1906 	SPDK_CU_ASSERT_FATAL(req != NULL);
1907 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
1908 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
1909 
1910 	poll_threads();
1911 
1912 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1913 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1914 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1915 }
1916 
1917 static void
1918 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1919 			 struct spdk_nvme_ctrlr *ctrlr)
1920 {
1921 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
1922 	bdev_io->internal.in_submit_request = true;
1923 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1924 
1925 	bdev_nvme_submit_request(ch, bdev_io);
1926 
1927 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1928 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
1929 
1930 	spdk_delay_us(10000);
1931 	poll_thread_times(1, 1);
1932 
1933 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1934 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
1935 
1936 	poll_thread_times(0, 1);
1937 
1938 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1939 }
1940 
1941 static void
1942 test_submit_nvme_cmd(void)
1943 {
1944 	struct spdk_nvme_transport_id trid = {};
1945 	struct spdk_nvme_host_id hostid = {};
1946 	struct spdk_nvme_ctrlr *ctrlr;
1947 	struct nvme_ctrlr *nvme_ctrlr;
1948 	const int STRING_SIZE = 32;
1949 	const char *attached_names[STRING_SIZE];
1950 	struct nvme_bdev *bdev;
1951 	struct spdk_bdev_io *bdev_io;
1952 	struct spdk_io_channel *ch;
1953 	int rc;
1954 
1955 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1956 	ut_init_trid(&trid);
1957 
1958 	set_thread(1);
1959 
1960 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
1961 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1962 
1963 	g_ut_attach_ctrlr_status = 0;
1964 	g_ut_attach_bdev_count = 1;
1965 
1966 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1967 			      attach_ctrlr_done, NULL, NULL);
1968 	CU_ASSERT(rc == 0);
1969 
1970 	spdk_delay_us(1000);
1971 	poll_threads();
1972 
1973 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1974 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1975 
1976 	bdev = nvme_ctrlr->namespaces[0]->bdev;
1977 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1978 
1979 	set_thread(0);
1980 
1981 	ch = spdk_get_io_channel(bdev);
1982 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1983 
1984 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
1985 
1986 	bdev_io->u.bdev.iovs = NULL;
1987 
1988 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
1989 
1990 	ut_bdev_io_set_buf(bdev_io);
1991 
1992 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
1993 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
1994 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
1995 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
1996 
1997 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
1998 
1999 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2000 
2001 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2002 
2003 	free(bdev_io);
2004 
2005 	spdk_put_io_channel(ch);
2006 
2007 	poll_threads();
2008 
2009 	set_thread(1);
2010 
2011 	rc = bdev_nvme_delete("nvme0", NULL);
2012 	CU_ASSERT(rc == 0);
2013 
2014 	poll_threads();
2015 
2016 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2017 }
2018 
2019 static void
2020 test_remove_trid(void)
2021 {
2022 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
2023 	struct spdk_nvme_ctrlr ctrlr = {};
2024 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2025 	struct nvme_ctrlr_trid *ctrid;
2026 	int rc;
2027 
2028 	ut_init_trid(&trid1);
2029 	ut_init_trid2(&trid2);
2030 	ut_init_trid3(&trid3);
2031 
2032 	set_thread(0);
2033 
2034 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
2035 	CU_ASSERT(rc == 0);
2036 
2037 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2038 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2039 
2040 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
2041 	CU_ASSERT(rc == 0);
2042 
2043 	/* trid3 is not in the registered list. */
2044 	rc = bdev_nvme_delete("nvme0", &trid3);
2045 	CU_ASSERT(rc == -ENXIO);
2046 
2047 	/* trid2 is not used, and simply removed. */
2048 	rc = bdev_nvme_delete("nvme0", &trid2);
2049 	CU_ASSERT(rc == 0);
2050 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2051 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2052 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0);
2053 	}
2054 
2055 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
2056 	CU_ASSERT(rc == 0);
2057 
2058 	/* trid1 is currently used and trid3 is an alternative path.
2059 	 * If we remove trid1, path is changed to trid3.
2060 	 */
2061 	rc = bdev_nvme_delete("nvme0", &trid1);
2062 	CU_ASSERT(rc == 0);
2063 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2064 	CU_ASSERT(nvme_ctrlr->resetting == true);
2065 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2066 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0);
2067 	}
2068 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid3) == 0);
2069 
2070 	poll_threads();
2071 
2072 	CU_ASSERT(nvme_ctrlr->resetting == false);
2073 
2074 	/* trid3 is the current and only path. If we remove trid3, the corresponding
2075 	 * nvme_ctrlr is removed.
2076 	 */
2077 	rc = bdev_nvme_delete("nvme0", &trid3);
2078 	CU_ASSERT(rc == 0);
2079 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2080 
2081 	poll_threads();
2082 
2083 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2084 
2085 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
2086 	CU_ASSERT(rc == 0);
2087 
2088 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2089 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2090 
2091 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
2092 	CU_ASSERT(rc == 0);
2093 
2094 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2095 	rc = bdev_nvme_delete("nvme0", NULL);
2096 	CU_ASSERT(rc == 0);
2097 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2098 
2099 	poll_threads();
2100 
2101 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2102 }
2103 
2104 static void
2105 test_abort(void)
2106 {
2107 	struct spdk_nvme_transport_id trid = {};
2108 	struct spdk_nvme_host_id hostid = {};
2109 	struct spdk_nvme_ctrlr *ctrlr;
2110 	struct nvme_ctrlr *nvme_ctrlr;
2111 	const int STRING_SIZE = 32;
2112 	const char *attached_names[STRING_SIZE];
2113 	struct nvme_bdev *bdev;
2114 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2115 	struct spdk_io_channel *ch1, *ch2;
2116 	struct nvme_bdev_channel *nbdev_ch1;
2117 	struct nvme_ctrlr_channel *ctrlr_ch1;
2118 	int rc;
2119 
2120 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2121 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2122 	 * are submitted on thread 1. Both should succeed.
2123 	 */
2124 
2125 	ut_init_trid(&trid);
2126 
2127 	ctrlr = ut_attach_ctrlr(&trid, 1, false);
2128 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2129 
2130 	g_ut_attach_ctrlr_status = 0;
2131 	g_ut_attach_bdev_count = 1;
2132 
2133 	set_thread(1);
2134 
2135 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
2136 			      attach_ctrlr_done, NULL, NULL);
2137 	CU_ASSERT(rc == 0);
2138 
2139 	spdk_delay_us(1000);
2140 	poll_threads();
2141 
2142 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2143 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2144 
2145 	bdev = nvme_ctrlr->namespaces[0]->bdev;
2146 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2147 
2148 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2149 	ut_bdev_io_set_buf(write_io);
2150 
2151 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2152 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2153 
2154 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2155 
2156 	set_thread(0);
2157 
2158 	ch1 = spdk_get_io_channel(bdev);
2159 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2160 	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2161 	ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
2162 	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
2163 
2164 	set_thread(1);
2165 
2166 	ch2 = spdk_get_io_channel(bdev);
2167 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2168 
2169 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2170 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2171 
2172 	/* Aborting the already completed request should fail. */
2173 	write_io->internal.in_submit_request = true;
2174 	bdev_nvme_submit_request(ch1, write_io);
2175 	poll_threads();
2176 
2177 	CU_ASSERT(write_io->internal.in_submit_request == false);
2178 
2179 	abort_io->u.abort.bio_to_abort = write_io;
2180 	abort_io->internal.in_submit_request = true;
2181 
2182 	bdev_nvme_submit_request(ch1, abort_io);
2183 
2184 	poll_threads();
2185 
2186 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2187 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2188 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2189 
2190 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2191 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2192 
2193 	admin_io->internal.in_submit_request = true;
2194 	bdev_nvme_submit_request(ch1, admin_io);
2195 	spdk_delay_us(10000);
2196 	poll_threads();
2197 
2198 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2199 
2200 	abort_io->u.abort.bio_to_abort = admin_io;
2201 	abort_io->internal.in_submit_request = true;
2202 
2203 	bdev_nvme_submit_request(ch2, abort_io);
2204 
2205 	poll_threads();
2206 
2207 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2208 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2209 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2210 
2211 	/* Aborting the write request should succeed. */
2212 	write_io->internal.in_submit_request = true;
2213 	bdev_nvme_submit_request(ch1, write_io);
2214 
2215 	CU_ASSERT(write_io->internal.in_submit_request == true);
2216 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2217 
2218 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2219 	abort_io->u.abort.bio_to_abort = write_io;
2220 	abort_io->internal.in_submit_request = true;
2221 
2222 	bdev_nvme_submit_request(ch1, abort_io);
2223 
2224 	spdk_delay_us(10000);
2225 	poll_threads();
2226 
2227 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2228 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2229 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2230 	CU_ASSERT(write_io->internal.in_submit_request == false);
2231 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2232 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2233 
2234 	/* Aborting the admin request should succeed. */
2235 	admin_io->internal.in_submit_request = true;
2236 	bdev_nvme_submit_request(ch1, admin_io);
2237 
2238 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2239 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2240 
2241 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2242 	abort_io->u.abort.bio_to_abort = admin_io;
2243 	abort_io->internal.in_submit_request = true;
2244 
2245 	bdev_nvme_submit_request(ch2, abort_io);
2246 
2247 	spdk_delay_us(10000);
2248 	poll_threads();
2249 
2250 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2251 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2252 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2253 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2254 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2255 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2256 
2257 	set_thread(0);
2258 
2259 	spdk_put_io_channel(ch1);
2260 
2261 	set_thread(1);
2262 
2263 	spdk_put_io_channel(ch2);
2264 
2265 	poll_threads();
2266 
2267 	free(write_io);
2268 	free(admin_io);
2269 	free(abort_io);
2270 
2271 	set_thread(1);
2272 
2273 	rc = bdev_nvme_delete("nvme0", NULL);
2274 	CU_ASSERT(rc == 0);
2275 
2276 	poll_threads();
2277 
2278 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2279 }
2280 
2281 static void
2282 test_get_io_qpair(void)
2283 {
2284 	struct spdk_nvme_transport_id trid = {};
2285 	struct spdk_nvme_ctrlr ctrlr = {};
2286 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2287 	struct spdk_io_channel *ch;
2288 	struct nvme_ctrlr_channel *ctrlr_ch;
2289 	struct spdk_nvme_qpair *qpair;
2290 	int rc;
2291 
2292 	ut_init_trid(&trid);
2293 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2294 
2295 	set_thread(0);
2296 
2297 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
2298 	CU_ASSERT(rc == 0);
2299 
2300 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2301 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2302 
2303 	ch = spdk_get_io_channel(nvme_ctrlr);
2304 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2305 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2306 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2307 
2308 	qpair = bdev_nvme_get_io_qpair(ch);
2309 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2310 
2311 	spdk_put_io_channel(ch);
2312 
2313 	rc = bdev_nvme_delete("nvme0", NULL);
2314 	CU_ASSERT(rc == 0);
2315 
2316 	poll_threads();
2317 
2318 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2319 }
2320 
2321 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2322  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2323  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2324  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2325  */
2326 static void
2327 test_bdev_unregister(void)
2328 {
2329 	struct spdk_nvme_transport_id trid = {};
2330 	struct spdk_nvme_host_id hostid = {};
2331 	struct spdk_nvme_ctrlr *ctrlr;
2332 	struct nvme_ctrlr *nvme_ctrlr;
2333 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2334 	const int STRING_SIZE = 32;
2335 	const char *attached_names[STRING_SIZE];
2336 	struct nvme_bdev *bdev1, *bdev2;
2337 	int rc;
2338 
2339 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2340 	ut_init_trid(&trid);
2341 
2342 	ctrlr = ut_attach_ctrlr(&trid, 2, false);
2343 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2344 
2345 	g_ut_attach_ctrlr_status = 0;
2346 	g_ut_attach_bdev_count = 2;
2347 
2348 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
2349 			      attach_ctrlr_done, NULL, NULL);
2350 	CU_ASSERT(rc == 0);
2351 
2352 	spdk_delay_us(1000);
2353 	poll_threads();
2354 
2355 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2356 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2357 
2358 	nvme_ns1 = nvme_ctrlr->namespaces[0];
2359 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2360 
2361 	bdev1 = nvme_ns1->bdev;
2362 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2363 
2364 	nvme_ns2 = nvme_ctrlr->namespaces[1];
2365 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2366 
2367 	bdev2 = nvme_ns2->bdev;
2368 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2369 
2370 	bdev_nvme_destruct(&bdev1->disk);
2371 	bdev_nvme_destruct(&bdev2->disk);
2372 
2373 	poll_threads();
2374 
2375 	CU_ASSERT(nvme_ns1->bdev == NULL);
2376 	CU_ASSERT(nvme_ns2->bdev == NULL);
2377 
2378 	nvme_ctrlr->destruct = true;
2379 	_nvme_ctrlr_destruct(nvme_ctrlr);
2380 
2381 	poll_threads();
2382 
2383 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2384 }
2385 
2386 static void
2387 test_compare_ns(void)
2388 {
2389 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2390 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2391 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2392 
2393 	/* No IDs are defined. */
2394 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2395 
2396 	/* Only EUI64 are defined and not matched. */
2397 	nsdata1.eui64 = 0xABCDEF0123456789;
2398 	nsdata2.eui64 = 0xBBCDEF0123456789;
2399 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2400 
2401 	/* Only EUI64 are defined and matched. */
2402 	nsdata2.eui64 = 0xABCDEF0123456789;
2403 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2404 
2405 	/* Only NGUID are defined and not matched. */
2406 	nsdata1.eui64 = 0x0;
2407 	nsdata2.eui64 = 0x0;
2408 	nsdata1.nguid[0] = 0x12;
2409 	nsdata2.nguid[0] = 0x10;
2410 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2411 
2412 	/* Only NGUID are defined and matched. */
2413 	nsdata2.nguid[0] = 0x12;
2414 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2415 
2416 	/* Only UUID are defined and not matched. */
2417 	nsdata1.nguid[0] = 0x0;
2418 	nsdata2.nguid[0] = 0x0;
2419 	ns1.uuid.u.raw[0] = 0xAA;
2420 	ns2.uuid.u.raw[0] = 0xAB;
2421 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2422 
2423 	/* Only UUID are defined and matched. */
2424 	ns1.uuid.u.raw[0] = 0xAB;
2425 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2426 
2427 	/* All EUI64, NGUID, and UUID are defined and matched. */
2428 	nsdata1.eui64 = 0x123456789ABCDEF;
2429 	nsdata2.eui64 = 0x123456789ABCDEF;
2430 	nsdata1.nguid[15] = 0x34;
2431 	nsdata2.nguid[15] = 0x34;
2432 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2433 }
2434 
2435 static void
2436 test_init_ana_log_page(void)
2437 {
2438 	struct spdk_nvme_transport_id trid = {};
2439 	struct spdk_nvme_host_id hostid = {};
2440 	struct spdk_nvme_ctrlr *ctrlr;
2441 	struct nvme_ctrlr *nvme_ctrlr;
2442 	const int STRING_SIZE = 32;
2443 	const char *attached_names[STRING_SIZE];
2444 	int rc;
2445 
2446 	set_thread(0);
2447 
2448 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2449 	ut_init_trid(&trid);
2450 
2451 	ctrlr = ut_attach_ctrlr(&trid, 5, true);
2452 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2453 
2454 	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2455 	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2456 	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2457 	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2458 	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2459 
2460 	g_ut_attach_ctrlr_status = 0;
2461 	g_ut_attach_bdev_count = 5;
2462 
2463 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
2464 			      attach_ctrlr_done, NULL, NULL);
2465 	CU_ASSERT(rc == 0);
2466 
2467 	spdk_delay_us(1000);
2468 	poll_threads();
2469 
2470 	spdk_delay_us(10000);
2471 	poll_threads();
2472 
2473 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2474 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2475 
2476 	CU_ASSERT(nvme_ctrlr->num_ns == 5);
2477 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == true);
2478 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
2479 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == true);
2480 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
2481 	CU_ASSERT(nvme_ctrlr->namespaces[4]->populated == true);
2482 	CU_ASSERT(nvme_ctrlr->namespaces[0]->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2483 	CU_ASSERT(nvme_ctrlr->namespaces[1]->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2484 	CU_ASSERT(nvme_ctrlr->namespaces[2]->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2485 	CU_ASSERT(nvme_ctrlr->namespaces[3]->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2486 	CU_ASSERT(nvme_ctrlr->namespaces[4]->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2487 	CU_ASSERT(nvme_ctrlr->namespaces[0]->bdev != NULL);
2488 	CU_ASSERT(nvme_ctrlr->namespaces[1]->bdev != NULL);
2489 	CU_ASSERT(nvme_ctrlr->namespaces[2]->bdev != NULL);
2490 	CU_ASSERT(nvme_ctrlr->namespaces[3]->bdev != NULL);
2491 	CU_ASSERT(nvme_ctrlr->namespaces[4]->bdev != NULL);
2492 
2493 	rc = bdev_nvme_delete("nvme0", NULL);
2494 	CU_ASSERT(rc == 0);
2495 
2496 	poll_threads();
2497 
2498 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2499 }
2500 
2501 static void
2502 init_accel(void)
2503 {
2504 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2505 				sizeof(int), "accel_p");
2506 }
2507 
2508 static void
2509 fini_accel(void)
2510 {
2511 	spdk_io_device_unregister(g_accel_p, NULL);
2512 }
2513 
2514 int
2515 main(int argc, const char **argv)
2516 {
2517 	CU_pSuite	suite = NULL;
2518 	unsigned int	num_failures;
2519 
2520 	CU_set_error_action(CUEA_ABORT);
2521 	CU_initialize_registry();
2522 
2523 	suite = CU_add_suite("nvme", NULL, NULL);
2524 
2525 	CU_ADD_TEST(suite, test_create_ctrlr);
2526 	CU_ADD_TEST(suite, test_reset_ctrlr);
2527 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
2528 	CU_ADD_TEST(suite, test_failover_ctrlr);
2529 	CU_ADD_TEST(suite, test_pending_reset);
2530 	CU_ADD_TEST(suite, test_attach_ctrlr);
2531 	CU_ADD_TEST(suite, test_reconnect_qpair);
2532 	CU_ADD_TEST(suite, test_aer_cb);
2533 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
2534 	CU_ADD_TEST(suite, test_remove_trid);
2535 	CU_ADD_TEST(suite, test_abort);
2536 	CU_ADD_TEST(suite, test_get_io_qpair);
2537 	CU_ADD_TEST(suite, test_bdev_unregister);
2538 	CU_ADD_TEST(suite, test_compare_ns);
2539 	CU_ADD_TEST(suite, test_init_ana_log_page);
2540 
2541 	CU_basic_set_mode(CU_BRM_VERBOSE);
2542 
2543 	allocate_threads(3);
2544 	set_thread(0);
2545 	bdev_nvme_library_init();
2546 	init_accel();
2547 
2548 	CU_basic_run_tests();
2549 
2550 	set_thread(0);
2551 	bdev_nvme_library_fini();
2552 	fini_accel();
2553 	free_threads();
2554 
2555 	num_failures = CU_get_number_of_failures();
2556 	CU_cleanup_registry();
2557 
2558 	return num_failures;
2559 }
2560