xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision 78ecd30d8e4650007c80a053011116b85f3f17ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk_cunit.h"
36 #include "spdk/thread.h"
37 #include "spdk/bdev_module.h"
38 #include "spdk/bdev_module.h"
39 
40 #include "common/lib/ut_multithread.c"
41 
42 #include "bdev/nvme/bdev_nvme.c"
43 #include "bdev/nvme/common.c"
44 
45 #include "unit/lib/json_mock.c"
46 
47 static void *g_accel_p = (void *)0xdeadbeaf;
48 
49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
50 	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
51 	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
52 	     spdk_nvme_remove_cb remove_cb), NULL);
53 
54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
55 		enum spdk_nvme_transport_type trtype));
56 
57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
58 	    NULL);
59 
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 
62 DEFINE_STUB_V(spdk_nvme_ctrlr_get_default_ctrlr_opts, (struct spdk_nvme_ctrlr_opts *opts,
63 		size_t opts_size));
64 
65 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
66 		struct spdk_nvme_transport_id *trid), 0);
67 
68 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
69 		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
70 
71 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
72 
73 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
74 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
75 
76 struct spdk_io_channel *
77 spdk_accel_engine_get_io_channel(void)
78 {
79 	return spdk_get_io_channel(g_accel_p);
80 }
81 
82 void
83 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
84 		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
85 {
86 	/* Avoid warning that opts is used uninitialised */
87 	memset(opts, 0, opts_size);
88 }
89 
90 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
91 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
92 
93 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
94 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
95 
96 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
97 		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
98 
99 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
100 		uint64_t timeout_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
101 
102 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
103 
104 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
105 		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
106 
107 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
108 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
109 		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
110 
111 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
112 		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
113 		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
114 
115 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
116 
117 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
118 
119 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
120 
121 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
122 
123 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
124 
125 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
126 
127 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
128 	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
129 
130 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
131 
132 DEFINE_STUB(spdk_nvme_ns_get_ana_state, enum spdk_nvme_ana_state,
133 	    (const struct spdk_nvme_ns *ns), 0);
134 
135 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi,
136 	    (const struct spdk_nvme_ns *ns), 0);
137 
138 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
139 		char *name, size_t *size), 0);
140 
141 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
142 	    (struct spdk_nvme_ns *ns), 0);
143 
144 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
145 	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
146 
147 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
148 	    (struct spdk_nvme_ns *ns), 0);
149 
150 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
151 	    (struct spdk_nvme_ns *ns), 0);
152 
153 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
154 	    (struct spdk_nvme_ns *ns), 0);
155 
156 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
157 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
158 	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
159 	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
160 
161 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
162 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
163 	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
164 	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
165 	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
166 
167 DEFINE_STUB(spdk_nvme_zns_report_zones, int,
168 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
169 	     void *payload, uint32_t payload_size, uint64_t slba,
170 	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
171 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
172 
173 DEFINE_STUB(spdk_nvme_zns_close_zone, int,
174 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
175 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
176 
177 DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
178 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
179 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
180 
181 DEFINE_STUB(spdk_nvme_zns_open_zone, int,
182 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
183 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
184 
185 DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
186 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
187 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
188 
189 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
190 
191 DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
192 	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
193 	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
194 
195 DEFINE_STUB_V(spdk_bdev_module_finish_done, (void));
196 
197 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
198 
199 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
200 
201 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
202 
203 DEFINE_STUB_V(bdev_ocssd_populate_namespace, (struct nvme_ctrlr *nvme_ctrlr,
204 		struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *ctx));
205 
206 DEFINE_STUB_V(bdev_ocssd_depopulate_namespace, (struct nvme_ns *nvme_ns));
207 
208 DEFINE_STUB_V(bdev_ocssd_namespace_config_json, (struct spdk_json_write_ctx *w,
209 		struct nvme_ns *nvme_ns));
210 
211 DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_ctrlr_channel *ioch), 0);
212 
213 DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_ctrlr_channel *ioch));
214 
215 DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_ctrlr *nvme_ctrlr), 0);
216 
217 DEFINE_STUB_V(bdev_ocssd_fini_ctrlr, (struct nvme_ctrlr *nvme_ctrlr));
218 
219 DEFINE_STUB_V(bdev_ocssd_handle_chunk_notification, (struct nvme_ctrlr *nvme_ctrlr));
220 
221 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
222 		struct iovec *iov,
223 		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
224 
225 
226 struct ut_nvme_req {
227 	uint16_t			opc;
228 	spdk_nvme_cmd_cb		cb_fn;
229 	void				*cb_arg;
230 	struct spdk_nvme_cpl		cpl;
231 	TAILQ_ENTRY(ut_nvme_req)	tailq;
232 };
233 
234 struct spdk_nvme_ns {
235 	struct spdk_nvme_ctrlr		*ctrlr;
236 	uint32_t			id;
237 	bool				is_active;
238 	struct spdk_uuid		uuid;
239 };
240 
241 struct spdk_nvme_qpair {
242 	struct spdk_nvme_ctrlr		*ctrlr;
243 	bool				is_connected;
244 	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
245 	uint32_t			num_outstanding_reqs;
246 	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
247 	struct spdk_nvme_poll_group	*poll_group;
248 	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
249 };
250 
251 struct spdk_nvme_ctrlr {
252 	uint32_t			num_ns;
253 	struct spdk_nvme_ns		*ns;
254 	struct spdk_nvme_ns_data	*nsdata;
255 	struct spdk_nvme_qpair		adminq;
256 	struct spdk_nvme_ctrlr_data	cdata;
257 	bool				attached;
258 	bool				is_failed;
259 	bool				fail_reset;
260 	struct spdk_nvme_transport_id	trid;
261 	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
262 	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
263 	struct spdk_nvme_ctrlr_opts	opts;
264 };
265 
266 struct spdk_nvme_poll_group {
267 	void				*ctx;
268 	struct spdk_nvme_accel_fn_table	accel_fn_table;
269 	TAILQ_HEAD(, spdk_nvme_qpair)	qpairs;
270 };
271 
272 struct spdk_nvme_probe_ctx {
273 	struct spdk_nvme_transport_id	trid;
274 	void				*cb_ctx;
275 	spdk_nvme_attach_cb		attach_cb;
276 	struct spdk_nvme_ctrlr		*init_ctrlr;
277 };
278 
279 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
280 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
281 			g_ut_attached_ctrlrs);
282 static int g_ut_attach_ctrlr_status;
283 static size_t g_ut_attach_bdev_count;
284 static int g_ut_register_bdev_status;
285 
286 static void
287 ut_init_trid(struct spdk_nvme_transport_id *trid)
288 {
289 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
290 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
291 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
292 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
293 }
294 
295 static void
296 ut_init_trid2(struct spdk_nvme_transport_id *trid)
297 {
298 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
299 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
300 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
301 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
302 }
303 
304 static void
305 ut_init_trid3(struct spdk_nvme_transport_id *trid)
306 {
307 	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
308 	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
309 	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
310 	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
311 }
312 
313 static int
314 cmp_int(int a, int b)
315 {
316 	return a - b;
317 }
318 
319 int
320 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
321 			       const struct spdk_nvme_transport_id *trid2)
322 {
323 	int cmp;
324 
325 	/* We assume trtype is TCP for now. */
326 	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
327 
328 	cmp = cmp_int(trid1->trtype, trid2->trtype);
329 	if (cmp) {
330 		return cmp;
331 	}
332 
333 	cmp = strcasecmp(trid1->traddr, trid2->traddr);
334 	if (cmp) {
335 		return cmp;
336 	}
337 
338 	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
339 	if (cmp) {
340 		return cmp;
341 	}
342 
343 	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
344 	if (cmp) {
345 		return cmp;
346 	}
347 
348 	cmp = strcmp(trid1->subnqn, trid2->subnqn);
349 	if (cmp) {
350 		return cmp;
351 	}
352 
353 	return 0;
354 }
355 
356 static struct spdk_nvme_ctrlr *
357 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns)
358 {
359 	struct spdk_nvme_ctrlr *ctrlr;
360 	uint32_t i;
361 
362 	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
363 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
364 			/* There is a ctrlr whose trid matches. */
365 			return NULL;
366 		}
367 	}
368 
369 	ctrlr = calloc(1, sizeof(*ctrlr));
370 	if (ctrlr == NULL) {
371 		return NULL;
372 	}
373 
374 	ctrlr->attached = true;
375 	ctrlr->adminq.ctrlr = ctrlr;
376 	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
377 
378 	if (num_ns != 0) {
379 		ctrlr->num_ns = num_ns;
380 		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
381 		if (ctrlr->ns == NULL) {
382 			free(ctrlr);
383 			return NULL;
384 		}
385 
386 		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
387 		if (ctrlr->nsdata == NULL) {
388 			free(ctrlr->ns);
389 			free(ctrlr);
390 			return NULL;
391 		}
392 
393 		for (i = 0; i < num_ns; i++) {
394 			ctrlr->ns[i].id = i + 1;
395 			ctrlr->ns[i].ctrlr = ctrlr;
396 			ctrlr->ns[i].is_active = true;
397 			ctrlr->nsdata[i].nsze = 1024;
398 		}
399 	}
400 
401 	ctrlr->trid = *trid;
402 	TAILQ_INIT(&ctrlr->active_io_qpairs);
403 
404 	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
405 
406 	return ctrlr;
407 }
408 
409 static void
410 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
411 {
412 	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
413 
414 	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
415 	free(ctrlr->nsdata);
416 	free(ctrlr->ns);
417 	free(ctrlr);
418 }
419 
420 static int
421 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
422 		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
423 {
424 	struct ut_nvme_req *req;
425 
426 	req = calloc(1, sizeof(*req));
427 	if (req == NULL) {
428 		return -ENOMEM;
429 	}
430 
431 	req->opc = opc;
432 	req->cb_fn = cb_fn;
433 	req->cb_arg = cb_arg;
434 
435 	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
436 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
437 
438 	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
439 	qpair->num_outstanding_reqs++;
440 
441 	return 0;
442 }
443 
444 static struct spdk_bdev_io *
445 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
446 		 struct spdk_io_channel *ch)
447 {
448 	struct spdk_bdev_io *bdev_io;
449 
450 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
451 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
452 	bdev_io->type = type;
453 	bdev_io->bdev = &nbdev->disk;
454 	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
455 
456 	return bdev_io;
457 }
458 
459 static void
460 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
461 {
462 	bdev_io->u.bdev.iovs = &bdev_io->iov;
463 	bdev_io->u.bdev.iovcnt = 1;
464 
465 	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
466 	bdev_io->iov.iov_len = 4096;
467 }
468 
469 static void
470 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
471 {
472 	if (ctrlr->is_failed) {
473 		free(ctrlr);
474 		return;
475 	}
476 
477 	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
478 
479 	if (probe_ctx->attach_cb) {
480 		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
481 	}
482 }
483 
484 int
485 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
486 {
487 	struct spdk_nvme_ctrlr *ctrlr, *tmp;
488 
489 	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
490 		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
491 			continue;
492 		}
493 		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
494 		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
495 	}
496 
497 	free(probe_ctx);
498 
499 	return 0;
500 }
501 
502 struct spdk_nvme_probe_ctx *
503 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
504 			const struct spdk_nvme_ctrlr_opts *opts,
505 			spdk_nvme_attach_cb attach_cb)
506 {
507 	struct spdk_nvme_probe_ctx *probe_ctx;
508 
509 	if (trid == NULL) {
510 		return NULL;
511 	}
512 
513 	probe_ctx = calloc(1, sizeof(*probe_ctx));
514 	if (probe_ctx == NULL) {
515 		return NULL;
516 	}
517 
518 	probe_ctx->trid = *trid;
519 	probe_ctx->cb_ctx = (void *)opts;
520 	probe_ctx->attach_cb = attach_cb;
521 
522 	return probe_ctx;
523 }
524 
525 int
526 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
527 {
528 	if (ctrlr->attached) {
529 		ut_detach_ctrlr(ctrlr);
530 	}
531 
532 	return 0;
533 }
534 
535 const struct spdk_nvme_ctrlr_data *
536 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
537 {
538 	return &ctrlr->cdata;
539 }
540 
541 uint32_t
542 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
543 {
544 	return ctrlr->num_ns;
545 }
546 
547 struct spdk_nvme_ns *
548 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
549 {
550 	if (nsid < 1 || nsid > ctrlr->num_ns) {
551 		return NULL;
552 	}
553 
554 	return &ctrlr->ns[nsid - 1];
555 }
556 
557 bool
558 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
559 {
560 	if (nsid < 1 || nsid > ctrlr->num_ns) {
561 		return false;
562 	}
563 
564 	return ctrlr->ns[nsid - 1].is_active;
565 }
566 
567 union spdk_nvme_csts_register
568 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
569 {
570 	union spdk_nvme_csts_register csts;
571 
572 	csts.raw = 0;
573 
574 	return csts;
575 }
576 
577 union spdk_nvme_vs_register
578 	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
579 {
580 	union spdk_nvme_vs_register vs;
581 
582 	vs.raw = 0;
583 
584 	return vs;
585 }
586 
587 struct spdk_nvme_qpair *
588 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
589 			       const struct spdk_nvme_io_qpair_opts *user_opts,
590 			       size_t opts_size)
591 {
592 	struct spdk_nvme_qpair *qpair;
593 
594 	qpair = calloc(1, sizeof(*qpair));
595 	if (qpair == NULL) {
596 		return NULL;
597 	}
598 
599 	qpair->ctrlr = ctrlr;
600 	TAILQ_INIT(&qpair->outstanding_reqs);
601 	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
602 
603 	return qpair;
604 }
605 
606 int
607 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
608 				 struct spdk_nvme_qpair *qpair)
609 {
610 	if (qpair->is_connected) {
611 		return -EISCONN;
612 	}
613 
614 	qpair->is_connected = true;
615 
616 	return 0;
617 }
618 
619 int
620 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
621 {
622 	struct spdk_nvme_ctrlr *ctrlr;
623 
624 	ctrlr = qpair->ctrlr;
625 
626 	if (ctrlr->is_failed) {
627 		return -ENXIO;
628 	}
629 	qpair->is_connected = true;
630 
631 	return 0;
632 }
633 
634 void
635 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
636 {
637 	qpair->is_connected = false;
638 }
639 
640 int
641 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
642 {
643 	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
644 
645 	qpair->is_connected = false;
646 
647 	if (qpair->poll_group != NULL) {
648 		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
649 	}
650 
651 	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
652 
653 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
654 
655 	free(qpair);
656 
657 	return 0;
658 }
659 
660 int
661 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
662 {
663 	if (ctrlr->fail_reset) {
664 		return -EIO;
665 	}
666 
667 	ctrlr->is_failed = false;
668 
669 	return 0;
670 }
671 
672 void
673 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
674 {
675 	ctrlr->is_failed = true;
676 }
677 
678 int
679 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
680 			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
681 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
682 {
683 	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
684 }
685 
686 int
687 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
688 			      void *cmd_cb_arg,
689 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
690 {
691 	struct ut_nvme_req *req = NULL, *abort_req;
692 
693 	if (qpair == NULL) {
694 		qpair = &ctrlr->adminq;
695 	}
696 
697 	abort_req = calloc(1, sizeof(*abort_req));
698 	if (abort_req == NULL) {
699 		return -ENOMEM;
700 	}
701 
702 	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
703 		if (req->cb_arg == cmd_cb_arg) {
704 			break;
705 		}
706 	}
707 
708 	if (req == NULL) {
709 		free(abort_req);
710 		return -ENOENT;
711 	}
712 
713 	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
714 	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
715 
716 	abort_req->opc = SPDK_NVME_OPC_ABORT;
717 	abort_req->cb_fn = cb_fn;
718 	abort_req->cb_arg = cb_arg;
719 
720 	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
721 	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
722 	abort_req->cpl.cdw0 = 0;
723 
724 	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
725 	ctrlr->adminq.num_outstanding_reqs++;
726 
727 	return 0;
728 }
729 
730 int32_t
731 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
732 {
733 	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
734 }
735 
736 uint32_t
737 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
738 {
739 	return ns->id;
740 }
741 
742 struct spdk_nvme_ctrlr *
743 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
744 {
745 	return ns->ctrlr;
746 }
747 
748 static inline struct spdk_nvme_ns_data *
749 _nvme_ns_get_data(struct spdk_nvme_ns *ns)
750 {
751 	return &ns->ctrlr->nsdata[ns->id - 1];
752 }
753 
754 const struct spdk_nvme_ns_data *
755 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
756 {
757 	return _nvme_ns_get_data(ns);
758 }
759 
760 uint64_t
761 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
762 {
763 	return _nvme_ns_get_data(ns)->nsze;
764 }
765 
766 const struct spdk_uuid *
767 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
768 {
769 	return &ns->uuid;
770 }
771 
772 int
773 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
774 			      void *metadata, uint64_t lba, uint32_t lba_count,
775 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
776 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
777 {
778 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
779 }
780 
781 int
782 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
783 			       void *buffer, void *metadata, uint64_t lba,
784 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
785 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
786 {
787 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
788 }
789 
790 int
791 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
792 			       uint64_t lba, uint32_t lba_count,
793 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
794 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
795 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
796 			       uint16_t apptag_mask, uint16_t apptag)
797 {
798 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
799 }
800 
801 int
802 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
803 				uint64_t lba, uint32_t lba_count,
804 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
805 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
806 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
807 				uint16_t apptag_mask, uint16_t apptag)
808 {
809 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
810 }
811 
812 int
813 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
814 				  uint64_t lba, uint32_t lba_count,
815 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
816 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
817 				  spdk_nvme_req_next_sge_cb next_sge_fn,
818 				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
819 {
820 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
821 }
822 
823 int
824 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
825 				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
826 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
827 {
828 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
829 }
830 
831 int
832 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
833 			      uint64_t lba, uint32_t lba_count,
834 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
835 			      uint32_t io_flags)
836 {
837 	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
838 }
839 
840 struct spdk_nvme_poll_group *
841 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
842 {
843 	struct spdk_nvme_poll_group *group;
844 
845 	group = calloc(1, sizeof(*group));
846 	if (group == NULL) {
847 		return NULL;
848 	}
849 
850 	group->ctx = ctx;
851 	if (table != NULL) {
852 		group->accel_fn_table = *table;
853 	}
854 	TAILQ_INIT(&group->qpairs);
855 
856 	return group;
857 }
858 
859 int
860 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
861 {
862 	if (!TAILQ_EMPTY(&group->qpairs)) {
863 		return -EBUSY;
864 	}
865 
866 	free(group);
867 
868 	return 0;
869 }
870 
871 int32_t
872 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
873 				    uint32_t max_completions)
874 {
875 	struct ut_nvme_req *req, *tmp;
876 	uint32_t num_completions = 0;
877 
878 	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
879 		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
880 		qpair->num_outstanding_reqs--;
881 
882 		req->cb_fn(req->cb_arg, &req->cpl);
883 
884 		free(req);
885 		num_completions++;
886 	}
887 
888 	return num_completions;
889 }
890 
891 int64_t
892 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
893 		uint32_t completions_per_qpair,
894 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
895 {
896 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
897 	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
898 
899 	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
900 
901 	if (disconnected_qpair_cb == NULL) {
902 		return -EINVAL;
903 	}
904 
905 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
906 		if (qpair->is_connected) {
907 			local_completions = spdk_nvme_qpair_process_completions(qpair,
908 					    completions_per_qpair);
909 			if (local_completions < 0 && error_reason == 0) {
910 				error_reason = local_completions;
911 			} else {
912 				num_completions += local_completions;
913 				assert(num_completions >= 0);
914 			}
915 		}
916 	}
917 
918 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) {
919 		if (!qpair->is_connected) {
920 			disconnected_qpair_cb(qpair, group->ctx);
921 		}
922 	}
923 
924 	return error_reason ? error_reason : num_completions;
925 }
926 
927 int
928 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
929 			 struct spdk_nvme_qpair *qpair)
930 {
931 	CU_ASSERT(!qpair->is_connected);
932 
933 	qpair->poll_group = group;
934 	TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq);
935 
936 	return 0;
937 }
938 
939 int
940 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
941 			    struct spdk_nvme_qpair *qpair)
942 {
943 	CU_ASSERT(!qpair->is_connected);
944 
945 	TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq);
946 
947 	return 0;
948 }
949 
950 int
951 spdk_bdev_register(struct spdk_bdev *bdev)
952 {
953 	return g_ut_register_bdev_status;
954 }
955 
956 void
957 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
958 {
959 	int rc;
960 
961 	rc = bdev->fn_table->destruct(bdev->ctxt);
962 	if (rc <= 0 && cb_fn != NULL) {
963 		cb_fn(cb_arg, rc);
964 	}
965 }
966 
967 int
968 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
969 {
970 	bdev->blockcnt = size;
971 
972 	return 0;
973 }
974 
975 struct spdk_io_channel *
976 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
977 {
978 	return (struct spdk_io_channel *)bdev_io->internal.ch;
979 }
980 
981 void
982 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
983 {
984 	bdev_io->internal.status = status;
985 	bdev_io->internal.in_submit_request = false;
986 }
987 
988 void
989 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
990 {
991 	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
992 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
993 	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
994 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
995 	} else {
996 		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
997 	}
998 
999 	bdev_io->internal.error.nvme.cdw0 = cdw0;
1000 	bdev_io->internal.error.nvme.sct = sct;
1001 	bdev_io->internal.error.nvme.sc = sc;
1002 
1003 	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1004 }
1005 
1006 void
1007 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1008 {
1009 	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1010 
1011 	ut_bdev_io_set_buf(bdev_io);
1012 
1013 	cb(ch, bdev_io, true);
1014 }
1015 
1016 static void
1017 test_create_ctrlr(void)
1018 {
1019 	struct spdk_nvme_transport_id trid = {};
1020 	struct spdk_nvme_ctrlr ctrlr = {};
1021 	int rc;
1022 
1023 	ut_init_trid(&trid);
1024 
1025 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1026 	CU_ASSERT(rc == 0);
1027 
1028 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1029 
1030 	rc = bdev_nvme_delete("nvme0", NULL);
1031 	CU_ASSERT(rc == 0);
1032 
1033 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1034 
1035 	poll_threads();
1036 
1037 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1038 }
1039 
1040 static void
1041 test_reset_ctrlr(void)
1042 {
1043 	struct spdk_nvme_transport_id trid = {};
1044 	struct spdk_nvme_ctrlr ctrlr = {};
1045 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1046 	struct nvme_ctrlr_trid *curr_trid;
1047 	struct spdk_io_channel *ch1, *ch2;
1048 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1049 	int rc;
1050 
1051 	ut_init_trid(&trid);
1052 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1053 
1054 	set_thread(0);
1055 
1056 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1057 	CU_ASSERT(rc == 0);
1058 
1059 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1060 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1061 
1062 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1063 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1064 
1065 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1066 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1067 
1068 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1069 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1070 
1071 	set_thread(1);
1072 
1073 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1074 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1075 
1076 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1077 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1078 
1079 	/* Reset starts from thread 1. */
1080 	set_thread(1);
1081 
1082 	/* Case 1: ctrlr is already being destructed. */
1083 	nvme_ctrlr->destruct = true;
1084 
1085 	rc = bdev_nvme_reset(nvme_ctrlr);
1086 	CU_ASSERT(rc == -EBUSY);
1087 
1088 	/* Case 2: reset is in progress. */
1089 	nvme_ctrlr->destruct = false;
1090 	nvme_ctrlr->resetting = true;
1091 
1092 	rc = bdev_nvme_reset(nvme_ctrlr);
1093 	CU_ASSERT(rc == -EAGAIN);
1094 
1095 	/* Case 3: reset completes successfully. */
1096 	nvme_ctrlr->resetting = false;
1097 	curr_trid->is_failed = true;
1098 	ctrlr.is_failed = true;
1099 
1100 	rc = bdev_nvme_reset(nvme_ctrlr);
1101 	CU_ASSERT(rc == 0);
1102 	CU_ASSERT(nvme_ctrlr->resetting == true);
1103 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1104 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1105 
1106 	poll_thread_times(0, 1);
1107 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1108 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1109 
1110 	poll_thread_times(1, 1);
1111 	CU_ASSERT(ctrlr_ch1->qpair == NULL);
1112 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1113 	CU_ASSERT(ctrlr.is_failed == true);
1114 
1115 	poll_thread_times(1, 1);
1116 	CU_ASSERT(ctrlr.is_failed == false);
1117 
1118 	poll_thread_times(0, 1);
1119 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1120 	CU_ASSERT(ctrlr_ch2->qpair == NULL);
1121 
1122 	poll_thread_times(1, 1);
1123 	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1124 	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1125 	CU_ASSERT(nvme_ctrlr->resetting == true);
1126 	CU_ASSERT(curr_trid->is_failed == true);
1127 
1128 	poll_thread_times(1, 1);
1129 	CU_ASSERT(nvme_ctrlr->resetting == false);
1130 	CU_ASSERT(curr_trid->is_failed == false);
1131 
1132 	spdk_put_io_channel(ch2);
1133 
1134 	set_thread(0);
1135 
1136 	spdk_put_io_channel(ch1);
1137 
1138 	poll_threads();
1139 
1140 	rc = bdev_nvme_delete("nvme0", NULL);
1141 	CU_ASSERT(rc == 0);
1142 
1143 	poll_threads();
1144 
1145 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1146 }
1147 
1148 static void
1149 test_race_between_reset_and_destruct_ctrlr(void)
1150 {
1151 	struct spdk_nvme_transport_id trid = {};
1152 	struct spdk_nvme_ctrlr ctrlr = {};
1153 	struct nvme_ctrlr *nvme_ctrlr;
1154 	struct spdk_io_channel *ch1, *ch2;
1155 	int rc;
1156 
1157 	ut_init_trid(&trid);
1158 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1159 
1160 	set_thread(0);
1161 
1162 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1163 	CU_ASSERT(rc == 0);
1164 
1165 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1166 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1167 
1168 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1169 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1170 
1171 	set_thread(1);
1172 
1173 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1174 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1175 
1176 	/* Reset starts from thread 1. */
1177 	set_thread(1);
1178 
1179 	rc = bdev_nvme_reset(nvme_ctrlr);
1180 	CU_ASSERT(rc == 0);
1181 	CU_ASSERT(nvme_ctrlr->resetting == true);
1182 
1183 	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1184 	set_thread(0);
1185 
1186 	rc = bdev_nvme_delete("nvme0", NULL);
1187 	CU_ASSERT(rc == 0);
1188 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1189 	CU_ASSERT(nvme_ctrlr->destruct == true);
1190 	CU_ASSERT(nvme_ctrlr->resetting == true);
1191 
1192 	poll_threads();
1193 
1194 	/* Reset completed but ctrlr is not still destructed yet. */
1195 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1196 	CU_ASSERT(nvme_ctrlr->destruct == true);
1197 	CU_ASSERT(nvme_ctrlr->resetting == false);
1198 
1199 	/* New reset request is rejected. */
1200 	rc = bdev_nvme_reset(nvme_ctrlr);
1201 	CU_ASSERT(rc == -EBUSY);
1202 
1203 	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1204 	 * However there are two channels and destruct is not completed yet.
1205 	 */
1206 	poll_threads();
1207 
1208 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1209 
1210 	set_thread(0);
1211 
1212 	spdk_put_io_channel(ch1);
1213 
1214 	set_thread(1);
1215 
1216 	spdk_put_io_channel(ch2);
1217 
1218 	poll_threads();
1219 
1220 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1221 }
1222 
1223 static void
1224 test_failover_ctrlr(void)
1225 {
1226 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1227 	struct spdk_nvme_ctrlr ctrlr = {};
1228 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1229 	struct nvme_ctrlr_trid *curr_trid, *next_trid;
1230 	struct spdk_io_channel *ch1, *ch2;
1231 	int rc;
1232 
1233 	ut_init_trid(&trid1);
1234 	ut_init_trid2(&trid2);
1235 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1236 
1237 	set_thread(0);
1238 
1239 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
1240 	CU_ASSERT(rc == 0);
1241 
1242 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1243 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1244 
1245 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1246 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1247 
1248 	set_thread(1);
1249 
1250 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1251 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1252 
1253 	/* First, test one trid case. */
1254 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1255 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1256 
1257 	/* Failover starts from thread 1. */
1258 	set_thread(1);
1259 
1260 	/* Case 1: ctrlr is already being destructed. */
1261 	nvme_ctrlr->destruct = true;
1262 
1263 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1264 	CU_ASSERT(rc == 0);
1265 	CU_ASSERT(curr_trid->is_failed == false);
1266 
1267 	/* Case 2: reset is in progress. */
1268 	nvme_ctrlr->destruct = false;
1269 	nvme_ctrlr->resetting = true;
1270 
1271 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1272 	CU_ASSERT(rc == 0);
1273 
1274 	/* Case 3: failover is in progress. */
1275 	nvme_ctrlr->failover_in_progress = true;
1276 
1277 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1278 	CU_ASSERT(rc == 0);
1279 	CU_ASSERT(curr_trid->is_failed == false);
1280 
1281 	/* Case 4: reset completes successfully. */
1282 	nvme_ctrlr->resetting = false;
1283 	nvme_ctrlr->failover_in_progress = false;
1284 
1285 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1286 	CU_ASSERT(rc == 0);
1287 
1288 	CU_ASSERT(nvme_ctrlr->resetting == true);
1289 	CU_ASSERT(curr_trid->is_failed == true);
1290 
1291 	poll_threads();
1292 
1293 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1294 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1295 
1296 	CU_ASSERT(nvme_ctrlr->resetting == false);
1297 	CU_ASSERT(curr_trid->is_failed == false);
1298 
1299 	set_thread(0);
1300 
1301 	/* Second, test two trids case. */
1302 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1303 	CU_ASSERT(rc == 0);
1304 
1305 	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1306 	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1307 	CU_ASSERT(&curr_trid->trid == nvme_ctrlr->connected_trid);
1308 	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1309 
1310 	/* Failover starts from thread 1. */
1311 	set_thread(1);
1312 
1313 	/* Case 5: reset is in progress. */
1314 	nvme_ctrlr->resetting = true;
1315 
1316 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1317 	CU_ASSERT(rc == -EAGAIN);
1318 
1319 	/* Case 5: failover is in progress. */
1320 	nvme_ctrlr->failover_in_progress = true;
1321 
1322 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1323 	CU_ASSERT(rc == 0);
1324 
1325 	/* Case 6: failover completes successfully. */
1326 	nvme_ctrlr->resetting = false;
1327 	nvme_ctrlr->failover_in_progress = false;
1328 
1329 	rc = bdev_nvme_failover(nvme_ctrlr, false);
1330 	CU_ASSERT(rc == 0);
1331 
1332 	CU_ASSERT(nvme_ctrlr->resetting == true);
1333 	CU_ASSERT(nvme_ctrlr->failover_in_progress == true);
1334 
1335 	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1336 	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1337 	CU_ASSERT(next_trid != curr_trid);
1338 	CU_ASSERT(&next_trid->trid == nvme_ctrlr->connected_trid);
1339 	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1340 
1341 	poll_threads();
1342 
1343 	CU_ASSERT(nvme_ctrlr->resetting == false);
1344 	CU_ASSERT(nvme_ctrlr->failover_in_progress == false);
1345 
1346 	spdk_put_io_channel(ch2);
1347 
1348 	set_thread(0);
1349 
1350 	spdk_put_io_channel(ch1);
1351 
1352 	poll_threads();
1353 
1354 	rc = bdev_nvme_delete("nvme0", NULL);
1355 	CU_ASSERT(rc == 0);
1356 
1357 	poll_threads();
1358 
1359 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1360 }
1361 
1362 static void
1363 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1364 {
1365 	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1366 	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1367 }
1368 
1369 static void
1370 test_pending_reset(void)
1371 {
1372 	struct spdk_nvme_transport_id trid = {};
1373 	struct spdk_nvme_host_id hostid = {};
1374 	struct spdk_nvme_ctrlr *ctrlr;
1375 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1376 	const int STRING_SIZE = 32;
1377 	const char *attached_names[STRING_SIZE];
1378 	struct nvme_bdev *bdev;
1379 	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1380 	struct spdk_io_channel *ch1, *ch2;
1381 	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1382 	int rc;
1383 
1384 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1385 	ut_init_trid(&trid);
1386 
1387 	set_thread(0);
1388 
1389 	ctrlr = ut_attach_ctrlr(&trid, 1);
1390 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1391 
1392 	g_ut_attach_ctrlr_status = 0;
1393 	g_ut_attach_bdev_count = 1;
1394 
1395 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1396 			      attach_ctrlr_done, NULL, NULL);
1397 	CU_ASSERT(rc == 0);
1398 
1399 	spdk_delay_us(1000);
1400 	poll_threads();
1401 
1402 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1403 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1404 
1405 	bdev = nvme_ctrlr->namespaces[0]->bdev;
1406 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1407 
1408 	ch1 = spdk_get_io_channel(nvme_ctrlr);
1409 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1410 
1411 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1412 
1413 	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1414 	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1415 
1416 	set_thread(1);
1417 
1418 	ch2 = spdk_get_io_channel(nvme_ctrlr);
1419 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1420 
1421 	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1422 
1423 	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1424 	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1425 
1426 	/* The first reset request is submitted on thread 1, and the second reset request
1427 	 * is submitted on thread 0 while processing the first request.
1428 	 */
1429 	bdev_nvme_submit_request(ch2, first_bdev_io);
1430 	CU_ASSERT(nvme_ctrlr->resetting == true);
1431 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1432 
1433 	set_thread(0);
1434 
1435 	bdev_nvme_submit_request(ch1, second_bdev_io);
1436 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1437 
1438 	poll_threads();
1439 
1440 	CU_ASSERT(nvme_ctrlr->resetting == false);
1441 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1442 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1443 
1444 	/* The first reset request is submitted on thread 1, and the second reset request
1445 	 * is submitted on thread 0 while processing the first request.
1446 	 *
1447 	 * The difference from the above scenario is that the controller is removed while
1448 	 * processing the first request. Hence both reset requests should fail.
1449 	 */
1450 	set_thread(1);
1451 
1452 	bdev_nvme_submit_request(ch2, first_bdev_io);
1453 	CU_ASSERT(nvme_ctrlr->resetting == true);
1454 	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1455 
1456 	set_thread(0);
1457 
1458 	bdev_nvme_submit_request(ch1, second_bdev_io);
1459 	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1460 
1461 	ctrlr->fail_reset = true;
1462 
1463 	poll_threads();
1464 
1465 	CU_ASSERT(nvme_ctrlr->resetting == false);
1466 	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1467 	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1468 
1469 	spdk_put_io_channel(ch1);
1470 
1471 	set_thread(1);
1472 
1473 	spdk_put_io_channel(ch2);
1474 
1475 	poll_threads();
1476 
1477 	set_thread(0);
1478 
1479 	rc = bdev_nvme_delete("nvme0", NULL);
1480 	CU_ASSERT(rc == 0);
1481 
1482 	poll_threads();
1483 
1484 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1485 
1486 	free(first_bdev_io);
1487 	free(second_bdev_io);
1488 }
1489 
1490 static void
1491 test_attach_ctrlr(void)
1492 {
1493 	struct spdk_nvme_transport_id trid = {};
1494 	struct spdk_nvme_host_id hostid = {};
1495 	struct spdk_nvme_ctrlr *ctrlr;
1496 	struct nvme_ctrlr *nvme_ctrlr;
1497 	const int STRING_SIZE = 32;
1498 	const char *attached_names[STRING_SIZE];
1499 	struct nvme_bdev *nbdev;
1500 	int rc;
1501 
1502 	set_thread(0);
1503 
1504 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1505 	ut_init_trid(&trid);
1506 
1507 	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1508 	 * by probe polling.
1509 	 */
1510 	ctrlr = ut_attach_ctrlr(&trid, 0);
1511 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1512 
1513 	ctrlr->is_failed = true;
1514 	g_ut_attach_ctrlr_status = -EIO;
1515 	g_ut_attach_bdev_count = 0;
1516 
1517 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1518 			      attach_ctrlr_done, NULL, NULL);
1519 	CU_ASSERT(rc == 0);
1520 
1521 	spdk_delay_us(1000);
1522 	poll_threads();
1523 
1524 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1525 
1526 	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1527 	ctrlr = ut_attach_ctrlr(&trid, 0);
1528 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1529 
1530 	g_ut_attach_ctrlr_status = 0;
1531 
1532 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1533 			      attach_ctrlr_done, NULL, NULL);
1534 	CU_ASSERT(rc == 0);
1535 
1536 	spdk_delay_us(1000);
1537 	poll_threads();
1538 
1539 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1540 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1541 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1542 	CU_ASSERT(nvme_ctrlr->num_ns == 0);
1543 
1544 	rc = bdev_nvme_delete("nvme0", NULL);
1545 	CU_ASSERT(rc == 0);
1546 
1547 	poll_threads();
1548 
1549 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1550 
1551 	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1552 	 * one nvme_bdev is created.
1553 	 */
1554 	ctrlr = ut_attach_ctrlr(&trid, 1);
1555 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1556 
1557 	g_ut_attach_bdev_count = 1;
1558 
1559 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1560 			      attach_ctrlr_done, NULL, NULL);
1561 	CU_ASSERT(rc == 0);
1562 
1563 	spdk_delay_us(1000);
1564 	poll_threads();
1565 
1566 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1567 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1568 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1569 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1570 
1571 	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
1572 	attached_names[0] = NULL;
1573 
1574 	nbdev = nvme_ctrlr->namespaces[0]->bdev;
1575 	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
1576 	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
1577 
1578 	rc = bdev_nvme_delete("nvme0", NULL);
1579 	CU_ASSERT(rc == 0);
1580 
1581 	poll_threads();
1582 
1583 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1584 
1585 	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
1586 	 * created because creating one nvme_bdev failed.
1587 	 */
1588 	ctrlr = ut_attach_ctrlr(&trid, 1);
1589 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1590 
1591 	g_ut_register_bdev_status = -EINVAL;
1592 	g_ut_attach_bdev_count = 0;
1593 
1594 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1595 			      attach_ctrlr_done, NULL, NULL);
1596 	CU_ASSERT(rc == 0);
1597 
1598 	spdk_delay_us(1000);
1599 	poll_threads();
1600 
1601 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1602 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1603 	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1604 	CU_ASSERT(nvme_ctrlr->num_ns == 1);
1605 
1606 	CU_ASSERT(attached_names[0] == NULL);
1607 
1608 	rc = bdev_nvme_delete("nvme0", NULL);
1609 	CU_ASSERT(rc == 0);
1610 
1611 	poll_threads();
1612 
1613 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1614 
1615 	g_ut_register_bdev_status = 0;
1616 }
1617 
1618 static void
1619 test_reconnect_qpair(void)
1620 {
1621 	struct spdk_nvme_transport_id trid = {};
1622 	struct spdk_nvme_ctrlr ctrlr = {};
1623 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1624 	struct spdk_io_channel *ch;
1625 	struct nvme_ctrlr_channel *ctrlr_ch;
1626 	int rc;
1627 
1628 	set_thread(0);
1629 
1630 	ut_init_trid(&trid);
1631 	TAILQ_INIT(&ctrlr.active_io_qpairs);
1632 
1633 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
1634 	CU_ASSERT(rc == 0);
1635 
1636 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1637 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1638 
1639 	ch = spdk_get_io_channel(nvme_ctrlr);
1640 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1641 
1642 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
1643 	CU_ASSERT(ctrlr_ch->qpair != NULL);
1644 	CU_ASSERT(ctrlr_ch->group != NULL);
1645 	CU_ASSERT(ctrlr_ch->group->group != NULL);
1646 	CU_ASSERT(ctrlr_ch->group->poller != NULL);
1647 
1648 	/* Test if the disconnected qpair is reconnected. */
1649 	ctrlr_ch->qpair->is_connected = false;
1650 
1651 	poll_threads();
1652 
1653 	CU_ASSERT(ctrlr_ch->qpair->is_connected == true);
1654 
1655 	/* If the ctrlr is failed, reconnecting qpair should fail too. */
1656 	ctrlr_ch->qpair->is_connected = false;
1657 	ctrlr.is_failed = true;
1658 
1659 	poll_threads();
1660 
1661 	CU_ASSERT(ctrlr_ch->qpair->is_connected == false);
1662 
1663 	spdk_put_io_channel(ch);
1664 
1665 	poll_threads();
1666 
1667 	rc = bdev_nvme_delete("nvme0", NULL);
1668 	CU_ASSERT(rc == 0);
1669 
1670 	poll_threads();
1671 
1672 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1673 }
1674 
1675 static void
1676 test_aer_cb(void)
1677 {
1678 	struct spdk_nvme_transport_id trid = {};
1679 	struct spdk_nvme_host_id hostid = {};
1680 	struct spdk_nvme_ctrlr *ctrlr;
1681 	struct nvme_ctrlr *nvme_ctrlr;
1682 	struct nvme_bdev *bdev;
1683 	const int STRING_SIZE = 32;
1684 	const char *attached_names[STRING_SIZE];
1685 	union spdk_nvme_async_event_completion event = {};
1686 	struct spdk_nvme_cpl cpl = {};
1687 	int rc;
1688 
1689 	set_thread(0);
1690 
1691 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1692 	ut_init_trid(&trid);
1693 
1694 	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
1695 	 * namespaces are populated.
1696 	 */
1697 	ctrlr = ut_attach_ctrlr(&trid, 4);
1698 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1699 
1700 	ctrlr->ns[0].is_active = false;
1701 
1702 	g_ut_attach_ctrlr_status = 0;
1703 	g_ut_attach_bdev_count = 3;
1704 
1705 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1706 			      attach_ctrlr_done, NULL, NULL);
1707 	CU_ASSERT(rc == 0);
1708 
1709 	spdk_delay_us(1000);
1710 	poll_threads();
1711 
1712 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1713 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1714 
1715 	CU_ASSERT(nvme_ctrlr->num_ns == 4);
1716 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == false);
1717 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
1718 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == true);
1719 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
1720 
1721 	bdev = nvme_ctrlr->namespaces[3]->bdev;
1722 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1723 	CU_ASSERT(bdev->disk.blockcnt == 1024);
1724 
1725 	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
1726 	 * change the size of the 4th namespace.
1727 	 */
1728 	ctrlr->ns[0].is_active = true;
1729 	ctrlr->ns[2].is_active = false;
1730 	ctrlr->nsdata[3].nsze = 2048;
1731 
1732 	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
1733 	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
1734 	cpl.cdw0 = event.raw;
1735 
1736 	aer_cb(nvme_ctrlr, &cpl);
1737 
1738 	CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == true);
1739 	CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
1740 	CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == false);
1741 	CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
1742 	CU_ASSERT(bdev->disk.blockcnt == 2048);
1743 
1744 	rc = bdev_nvme_delete("nvme0", NULL);
1745 	CU_ASSERT(rc == 0);
1746 
1747 	poll_threads();
1748 
1749 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1750 }
1751 
1752 static void
1753 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1754 			enum spdk_bdev_io_type io_type)
1755 {
1756 	struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
1757 	struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
1758 	struct spdk_nvme_ns *ns = NULL;
1759 	struct spdk_nvme_qpair *qpair = NULL;
1760 
1761 	CU_ASSERT(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair));
1762 
1763 	bdev_io->type = io_type;
1764 	bdev_io->internal.in_submit_request = true;
1765 
1766 	bdev_nvme_submit_request(ch, bdev_io);
1767 
1768 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1769 	CU_ASSERT(qpair->num_outstanding_reqs == 1);
1770 
1771 	poll_threads();
1772 
1773 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1774 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1775 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1776 }
1777 
1778 static void
1779 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1780 		   enum spdk_bdev_io_type io_type)
1781 {
1782 	struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
1783 	struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
1784 	struct spdk_nvme_ns *ns = NULL;
1785 	struct spdk_nvme_qpair *qpair = NULL;
1786 
1787 	CU_ASSERT(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair));
1788 
1789 	bdev_io->type = io_type;
1790 	bdev_io->internal.in_submit_request = true;
1791 
1792 	bdev_nvme_submit_request(ch, bdev_io);
1793 
1794 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1795 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1796 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1797 }
1798 
1799 static void
1800 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
1801 {
1802 	struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
1803 	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
1804 	struct ut_nvme_req *req;
1805 	struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
1806 	struct spdk_nvme_ns *ns = NULL;
1807 	struct spdk_nvme_qpair *qpair = NULL;
1808 
1809 	CU_ASSERT(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair));
1810 
1811 	/* Only compare and write now. */
1812 	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
1813 	bdev_io->internal.in_submit_request = true;
1814 
1815 	bdev_nvme_submit_request(ch, bdev_io);
1816 
1817 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1818 	CU_ASSERT(qpair->num_outstanding_reqs == 2);
1819 	CU_ASSERT(bio->first_fused_submitted == true);
1820 
1821 	/* First outstanding request is compare operation. */
1822 	req = TAILQ_FIRST(&ctrlr_ch->qpair->outstanding_reqs);
1823 	SPDK_CU_ASSERT_FATAL(req != NULL);
1824 	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
1825 	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
1826 
1827 	poll_threads();
1828 
1829 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1830 	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1831 	CU_ASSERT(qpair->num_outstanding_reqs == 0);
1832 }
1833 
1834 static void
1835 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1836 			 struct spdk_nvme_ctrlr *ctrlr)
1837 {
1838 	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
1839 	bdev_io->internal.in_submit_request = true;
1840 	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1841 
1842 	bdev_nvme_submit_request(ch, bdev_io);
1843 
1844 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1845 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
1846 
1847 	spdk_delay_us(10000);
1848 	poll_thread_times(1, 1);
1849 
1850 	CU_ASSERT(bdev_io->internal.in_submit_request == true);
1851 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
1852 
1853 	poll_thread_times(0, 1);
1854 
1855 	CU_ASSERT(bdev_io->internal.in_submit_request == false);
1856 }
1857 
1858 static void
1859 test_submit_nvme_cmd(void)
1860 {
1861 	struct spdk_nvme_transport_id trid = {};
1862 	struct spdk_nvme_host_id hostid = {};
1863 	struct spdk_nvme_ctrlr *ctrlr;
1864 	struct nvme_ctrlr *nvme_ctrlr;
1865 	const int STRING_SIZE = 32;
1866 	const char *attached_names[STRING_SIZE];
1867 	struct nvme_bdev *bdev;
1868 	struct spdk_bdev_io *bdev_io;
1869 	struct spdk_io_channel *ch;
1870 	int rc;
1871 
1872 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1873 	ut_init_trid(&trid);
1874 
1875 	set_thread(1);
1876 
1877 	ctrlr = ut_attach_ctrlr(&trid, 1);
1878 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1879 
1880 	g_ut_attach_ctrlr_status = 0;
1881 	g_ut_attach_bdev_count = 1;
1882 
1883 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
1884 			      attach_ctrlr_done, NULL, NULL);
1885 	CU_ASSERT(rc == 0);
1886 
1887 	spdk_delay_us(1000);
1888 	poll_threads();
1889 
1890 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1891 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1892 
1893 	bdev = nvme_ctrlr->namespaces[0]->bdev;
1894 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1895 
1896 	set_thread(0);
1897 
1898 	ch = spdk_get_io_channel(nvme_ctrlr);
1899 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1900 
1901 	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
1902 
1903 	bdev_io->u.bdev.iovs = NULL;
1904 
1905 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
1906 
1907 	ut_bdev_io_set_buf(bdev_io);
1908 
1909 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
1910 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
1911 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
1912 	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
1913 
1914 	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
1915 
1916 	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
1917 
1918 	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
1919 
1920 	free(bdev_io);
1921 
1922 	spdk_put_io_channel(ch);
1923 
1924 	poll_threads();
1925 
1926 	set_thread(1);
1927 
1928 	rc = bdev_nvme_delete("nvme0", NULL);
1929 	CU_ASSERT(rc == 0);
1930 
1931 	poll_threads();
1932 
1933 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1934 }
1935 
1936 static void
1937 test_remove_trid(void)
1938 {
1939 	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1940 	struct spdk_nvme_ctrlr ctrlr = {};
1941 	struct nvme_ctrlr *nvme_ctrlr = NULL;
1942 	struct nvme_ctrlr_trid *ctrid;
1943 	int rc;
1944 
1945 	ut_init_trid(&trid1);
1946 	ut_init_trid2(&trid2);
1947 	ut_init_trid3(&trid3);
1948 
1949 	set_thread(0);
1950 
1951 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
1952 	CU_ASSERT(rc == 0);
1953 
1954 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1955 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1956 
1957 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1958 	CU_ASSERT(rc == 0);
1959 
1960 	/* trid3 is not in the registered list. */
1961 	rc = bdev_nvme_delete("nvme0", &trid3);
1962 	CU_ASSERT(rc == -ENXIO);
1963 
1964 	/* trid2 is not used, and simply removed. */
1965 	rc = bdev_nvme_delete("nvme0", &trid2);
1966 	CU_ASSERT(rc == 0);
1967 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1968 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
1969 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0);
1970 	}
1971 
1972 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1973 	CU_ASSERT(rc == 0);
1974 
1975 	/* trid1 is currently used and trid3 is an alternative path.
1976 	 * If we remove trid1, path is changed to trid3.
1977 	 */
1978 	rc = bdev_nvme_delete("nvme0", &trid1);
1979 	CU_ASSERT(rc == 0);
1980 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1981 	CU_ASSERT(nvme_ctrlr->resetting == true);
1982 	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
1983 		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0);
1984 	}
1985 	CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid3) == 0);
1986 
1987 	poll_threads();
1988 
1989 	CU_ASSERT(nvme_ctrlr->resetting == false);
1990 
1991 	/* trid3 is the current and only path. If we remove trid3, the corresponding
1992 	 * nvme_ctrlr is removed.
1993 	 */
1994 	rc = bdev_nvme_delete("nvme0", &trid3);
1995 	CU_ASSERT(rc == 0);
1996 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1997 
1998 	poll_threads();
1999 
2000 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2001 
2002 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
2003 	CU_ASSERT(rc == 0);
2004 
2005 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2006 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2007 
2008 	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
2009 	CU_ASSERT(rc == 0);
2010 
2011 	/* If trid is not specified, nvme_ctrlr itself is removed. */
2012 	rc = bdev_nvme_delete("nvme0", NULL);
2013 	CU_ASSERT(rc == 0);
2014 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2015 
2016 	poll_threads();
2017 
2018 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2019 }
2020 
2021 static void
2022 test_abort(void)
2023 {
2024 	struct spdk_nvme_transport_id trid = {};
2025 	struct spdk_nvme_host_id hostid = {};
2026 	struct spdk_nvme_ctrlr *ctrlr;
2027 	struct nvme_ctrlr *nvme_ctrlr;
2028 	const int STRING_SIZE = 32;
2029 	const char *attached_names[STRING_SIZE];
2030 	struct nvme_bdev *bdev;
2031 	struct spdk_bdev_io *write_io, *admin_io, *abort_io;
2032 	struct spdk_io_channel *ch1, *ch2;
2033 	struct nvme_ctrlr_channel *ctrlr_ch1;
2034 	int rc;
2035 
2036 	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2037 	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2038 	 * are submitted on thread 1. Both should succeed.
2039 	 */
2040 
2041 	ut_init_trid(&trid);
2042 
2043 	ctrlr = ut_attach_ctrlr(&trid, 1);
2044 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2045 
2046 	g_ut_attach_ctrlr_status = 0;
2047 	g_ut_attach_bdev_count = 1;
2048 
2049 	set_thread(1);
2050 
2051 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
2052 			      attach_ctrlr_done, NULL, NULL);
2053 	CU_ASSERT(rc == 0);
2054 
2055 	spdk_delay_us(1000);
2056 	poll_threads();
2057 
2058 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2059 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2060 
2061 	bdev = nvme_ctrlr->namespaces[0]->bdev;
2062 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2063 
2064 	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2065 	ut_bdev_io_set_buf(write_io);
2066 
2067 	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2068 	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2069 
2070 	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2071 
2072 	set_thread(0);
2073 
2074 	ch1 = spdk_get_io_channel(nvme_ctrlr);
2075 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2076 	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
2077 
2078 	set_thread(1);
2079 
2080 	ch2 = spdk_get_io_channel(nvme_ctrlr);
2081 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2082 
2083 	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2084 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2085 
2086 	/* Aborting the already completed request should fail. */
2087 	write_io->internal.in_submit_request = true;
2088 	bdev_nvme_submit_request(ch1, write_io);
2089 	poll_threads();
2090 
2091 	CU_ASSERT(write_io->internal.in_submit_request == false);
2092 
2093 	abort_io->u.abort.bio_to_abort = write_io;
2094 	abort_io->internal.in_submit_request = true;
2095 
2096 	bdev_nvme_submit_request(ch1, abort_io);
2097 
2098 	poll_threads();
2099 
2100 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2101 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2102 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2103 
2104 	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2105 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2106 
2107 	admin_io->internal.in_submit_request = true;
2108 	bdev_nvme_submit_request(ch1, admin_io);
2109 	spdk_delay_us(10000);
2110 	poll_threads();
2111 
2112 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2113 
2114 	abort_io->u.abort.bio_to_abort = admin_io;
2115 	abort_io->internal.in_submit_request = true;
2116 
2117 	bdev_nvme_submit_request(ch2, abort_io);
2118 
2119 	poll_threads();
2120 
2121 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2122 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2123 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2124 
2125 	/* Aborting the write request should succeed. */
2126 	write_io->internal.in_submit_request = true;
2127 	bdev_nvme_submit_request(ch1, write_io);
2128 
2129 	CU_ASSERT(write_io->internal.in_submit_request == true);
2130 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
2131 
2132 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2133 	abort_io->u.abort.bio_to_abort = write_io;
2134 	abort_io->internal.in_submit_request = true;
2135 
2136 	bdev_nvme_submit_request(ch1, abort_io);
2137 
2138 	spdk_delay_us(10000);
2139 	poll_threads();
2140 
2141 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2142 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2143 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2144 	CU_ASSERT(write_io->internal.in_submit_request == false);
2145 	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2146 	CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
2147 
2148 	/* Aborting the admin request should succeed. */
2149 	admin_io->internal.in_submit_request = true;
2150 	bdev_nvme_submit_request(ch1, admin_io);
2151 
2152 	CU_ASSERT(admin_io->internal.in_submit_request == true);
2153 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2154 
2155 	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2156 	abort_io->u.abort.bio_to_abort = admin_io;
2157 	abort_io->internal.in_submit_request = true;
2158 
2159 	bdev_nvme_submit_request(ch2, abort_io);
2160 
2161 	spdk_delay_us(10000);
2162 	poll_threads();
2163 
2164 	CU_ASSERT(abort_io->internal.in_submit_request == false);
2165 	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2166 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2167 	CU_ASSERT(admin_io->internal.in_submit_request == false);
2168 	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2169 	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2170 
2171 	set_thread(0);
2172 
2173 	spdk_put_io_channel(ch1);
2174 
2175 	set_thread(1);
2176 
2177 	spdk_put_io_channel(ch2);
2178 
2179 	poll_threads();
2180 
2181 	free(write_io);
2182 	free(admin_io);
2183 	free(abort_io);
2184 
2185 	set_thread(1);
2186 
2187 	rc = bdev_nvme_delete("nvme0", NULL);
2188 	CU_ASSERT(rc == 0);
2189 
2190 	poll_threads();
2191 
2192 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2193 }
2194 
2195 static void
2196 test_get_io_qpair(void)
2197 {
2198 	struct spdk_nvme_transport_id trid = {};
2199 	struct spdk_nvme_ctrlr ctrlr = {};
2200 	struct nvme_ctrlr *nvme_ctrlr = NULL;
2201 	struct spdk_io_channel *ch;
2202 	struct nvme_ctrlr_channel *ctrlr_ch;
2203 	struct spdk_nvme_qpair *qpair;
2204 	int rc;
2205 
2206 	ut_init_trid(&trid);
2207 	TAILQ_INIT(&ctrlr.active_io_qpairs);
2208 
2209 	set_thread(0);
2210 
2211 	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
2212 	CU_ASSERT(rc == 0);
2213 
2214 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2215 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2216 
2217 	ch = spdk_get_io_channel(nvme_ctrlr);
2218 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2219 	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2220 	CU_ASSERT(ctrlr_ch->qpair != NULL);
2221 
2222 	qpair = bdev_nvme_get_io_qpair(ch);
2223 	CU_ASSERT(qpair == ctrlr_ch->qpair);
2224 
2225 	spdk_put_io_channel(ch);
2226 
2227 	rc = bdev_nvme_delete("nvme0", NULL);
2228 	CU_ASSERT(rc == 0);
2229 
2230 	poll_threads();
2231 
2232 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2233 }
2234 
2235 /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2236  * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2237  * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2238  * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2239  */
2240 static void
2241 test_bdev_unregister(void)
2242 {
2243 	struct spdk_nvme_transport_id trid = {};
2244 	struct spdk_nvme_host_id hostid = {};
2245 	struct spdk_nvme_ctrlr *ctrlr;
2246 	struct nvme_ctrlr *nvme_ctrlr;
2247 	struct nvme_ns *nvme_ns1, *nvme_ns2;
2248 	const int STRING_SIZE = 32;
2249 	const char *attached_names[STRING_SIZE];
2250 	struct nvme_bdev *bdev1, *bdev2;
2251 	int rc;
2252 
2253 	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2254 	ut_init_trid(&trid);
2255 
2256 	ctrlr = ut_attach_ctrlr(&trid, 2);
2257 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2258 
2259 	g_ut_attach_ctrlr_status = 0;
2260 	g_ut_attach_bdev_count = 2;
2261 
2262 	rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0,
2263 			      attach_ctrlr_done, NULL, NULL);
2264 	CU_ASSERT(rc == 0);
2265 
2266 	spdk_delay_us(1000);
2267 	poll_threads();
2268 
2269 	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2270 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2271 
2272 	nvme_ns1 = nvme_ctrlr->namespaces[0];
2273 	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2274 
2275 	bdev1 = nvme_ns1->bdev;
2276 	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2277 
2278 	nvme_ns2 = nvme_ctrlr->namespaces[1];
2279 	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2280 
2281 	bdev2 = nvme_ns2->bdev;
2282 	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2283 
2284 	bdev_nvme_destruct(&bdev1->disk);
2285 	bdev_nvme_destruct(&bdev2->disk);
2286 
2287 	poll_threads();
2288 
2289 	CU_ASSERT(nvme_ns1->bdev == NULL);
2290 	CU_ASSERT(nvme_ns2->bdev == NULL);
2291 
2292 	nvme_ctrlr->destruct = true;
2293 	_nvme_ctrlr_destruct(nvme_ctrlr);
2294 
2295 	poll_threads();
2296 
2297 	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2298 }
2299 
2300 static void
2301 test_compare_ns(void)
2302 {
2303 	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2304 	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2305 	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2306 
2307 	/* No IDs are defined. */
2308 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2309 
2310 	/* Only EUI64 are defined and not matched. */
2311 	nsdata1.eui64 = 0xABCDEF0123456789;
2312 	nsdata2.eui64 = 0xBBCDEF0123456789;
2313 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2314 
2315 	/* Only EUI64 are defined and matched. */
2316 	nsdata2.eui64 = 0xABCDEF0123456789;
2317 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2318 
2319 	/* Only NGUID are defined and not matched. */
2320 	nsdata1.eui64 = 0x0;
2321 	nsdata2.eui64 = 0x0;
2322 	nsdata1.nguid[0] = 0x12;
2323 	nsdata2.nguid[0] = 0x10;
2324 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2325 
2326 	/* Only NGUID are defined and matched. */
2327 	nsdata2.nguid[0] = 0x12;
2328 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2329 
2330 	/* Only UUID are defined and not matched. */
2331 	nsdata1.nguid[0] = 0x0;
2332 	nsdata2.nguid[0] = 0x0;
2333 	ns1.uuid.u.raw[0] = 0xAA;
2334 	ns2.uuid.u.raw[0] = 0xAB;
2335 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2336 
2337 	/* Only UUID are defined and matched. */
2338 	ns1.uuid.u.raw[0] = 0xAB;
2339 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2340 
2341 	/* All EUI64, NGUID, and UUID are defined and matched. */
2342 	nsdata1.eui64 = 0x123456789ABCDEF;
2343 	nsdata2.eui64 = 0x123456789ABCDEF;
2344 	nsdata1.nguid[15] = 0x34;
2345 	nsdata2.nguid[15] = 0x34;
2346 	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2347 }
2348 
2349 static void
2350 init_accel(void)
2351 {
2352 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
2353 				sizeof(int), "accel_p");
2354 }
2355 
2356 static void
2357 fini_accel(void)
2358 {
2359 	spdk_io_device_unregister(g_accel_p, NULL);
2360 }
2361 
2362 int
2363 main(int argc, const char **argv)
2364 {
2365 	CU_pSuite	suite = NULL;
2366 	unsigned int	num_failures;
2367 
2368 	CU_set_error_action(CUEA_ABORT);
2369 	CU_initialize_registry();
2370 
2371 	suite = CU_add_suite("nvme", NULL, NULL);
2372 
2373 	CU_ADD_TEST(suite, test_create_ctrlr);
2374 	CU_ADD_TEST(suite, test_reset_ctrlr);
2375 	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
2376 	CU_ADD_TEST(suite, test_failover_ctrlr);
2377 	CU_ADD_TEST(suite, test_pending_reset);
2378 	CU_ADD_TEST(suite, test_attach_ctrlr);
2379 	CU_ADD_TEST(suite, test_reconnect_qpair);
2380 	CU_ADD_TEST(suite, test_aer_cb);
2381 	CU_ADD_TEST(suite, test_submit_nvme_cmd);
2382 	CU_ADD_TEST(suite, test_remove_trid);
2383 	CU_ADD_TEST(suite, test_abort);
2384 	CU_ADD_TEST(suite, test_get_io_qpair);
2385 	CU_ADD_TEST(suite, test_bdev_unregister);
2386 	CU_ADD_TEST(suite, test_compare_ns);
2387 
2388 	CU_basic_set_mode(CU_BRM_VERBOSE);
2389 
2390 	allocate_threads(3);
2391 	set_thread(0);
2392 	bdev_nvme_library_init();
2393 	init_accel();
2394 
2395 	CU_basic_run_tests();
2396 
2397 	set_thread(0);
2398 	bdev_nvme_library_fini();
2399 	fini_accel();
2400 	free_threads();
2401 
2402 	num_failures = CU_get_number_of_failures();
2403 	CU_cleanup_registry();
2404 
2405 	return num_failures;
2406 }
2407