xref: /spdk/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c (revision a6dbe3721eb3b5990707fc3e378c95e505dd8ab5)
1  /*   SPDX-License-Identifier: BSD-3-Clause
2   *   Copyright (C) 2021 Intel Corporation.
3   *   All rights reserved.
4   *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5   */
6  
7  #include "spdk/stdinc.h"
8  #include "spdk_cunit.h"
9  #include "spdk/thread.h"
10  #include "spdk/bdev_module.h"
11  #include "spdk/bdev_module.h"
12  
13  #include "common/lib/ut_multithread.c"
14  
15  #include "bdev/nvme/bdev_nvme.c"
16  
17  #include "unit/lib/json_mock.c"
18  
19  static void *g_accel_p = (void *)0xdeadbeaf;
20  
21  DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *,
22  	    (const struct spdk_nvme_transport_id *trid, void *cb_ctx,
23  	     spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
24  	     spdk_nvme_remove_cb remove_cb), NULL);
25  
26  DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
27  		enum spdk_nvme_transport_type trtype));
28  
29  DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype),
30  	    NULL);
31  
32  DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
33  
34  DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr,
35  		struct spdk_nvme_transport_id *trid), 0);
36  
37  DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr,
38  		spdk_nvme_remove_cb remove_cb, void *remove_ctx));
39  
40  DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0);
41  
42  DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
43  DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
44  
45  DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int,
46  	    (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0);
47  
48  DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int);
49  int
50  spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
51  				   struct spdk_memory_domain **domains, int array_size)
52  {
53  	int i, min_array_size;
54  
55  	if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) {
56  		min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size);
57  		for (i = 0; i < min_array_size; i++) {
58  			domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5;
59  		}
60  	}
61  	HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains);
62  
63  	return 0;
64  }
65  
66  struct spdk_io_channel *
67  spdk_accel_get_io_channel(void)
68  {
69  	return spdk_get_io_channel(g_accel_p);
70  }
71  
72  void
73  spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
74  		struct spdk_nvme_io_qpair_opts *opts, size_t opts_size)
75  {
76  	/* Avoid warning that opts is used uninitialised */
77  	memset(opts, 0, opts_size);
78  }
79  
80  DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *,
81  	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
82  
83  DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t,
84  	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
85  
86  DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *,
87  	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
88  
89  DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr,
90  		spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg));
91  
92  DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr,
93  		uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg));
94  
95  DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true);
96  
97  DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false);
98  
99  DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr,
100  		struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
101  
102  DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr,
103  		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
104  		uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
105  
106  DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr,
107  		struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf,
108  		uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
109  
110  DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name,
111  		size_t *size), 0);
112  
113  DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
114  
115  DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
116  
117  DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
118  
119  DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0);
120  
121  DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false);
122  
123  DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0);
124  
125  DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value,
126  	    enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0);
127  
128  DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0);
129  
130  DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
131  		char *name, size_t *size), 0);
132  
133  DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t,
134  	    (struct spdk_nvme_ns *ns), 0);
135  
136  DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t,
137  	    (const struct spdk_nvme_ctrlr *ctrlr), 0);
138  
139  DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t,
140  	    (struct spdk_nvme_ns *ns), 0);
141  
142  DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t,
143  	    (struct spdk_nvme_ns *ns), 0);
144  
145  DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t,
146  	    (struct spdk_nvme_ns *ns), 0);
147  
148  DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int,
149  	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata,
150  	     uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
151  	     uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0);
152  
153  DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int,
154  	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba,
155  	     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
156  	     spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn,
157  	     void *metadata, uint16_t apptag_mask, uint16_t apptag), 0);
158  
159  DEFINE_STUB(spdk_nvme_zns_report_zones, int,
160  	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
161  	     void *payload, uint32_t payload_size, uint64_t slba,
162  	     enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report,
163  	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
164  
165  DEFINE_STUB(spdk_nvme_zns_close_zone, int,
166  	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
167  	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
168  
169  DEFINE_STUB(spdk_nvme_zns_finish_zone, int,
170  	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
171  	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
172  
173  DEFINE_STUB(spdk_nvme_zns_open_zone, int,
174  	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
175  	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
176  
177  DEFINE_STUB(spdk_nvme_zns_reset_zone, int,
178  	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
179  	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
180  
181  DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL);
182  
183  DEFINE_STUB(spdk_nvme_zns_offline_zone, int,
184  	    (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba,
185  	     bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
186  
187  DEFINE_STUB_V(spdk_bdev_module_fini_done, (void));
188  
189  DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
190  
191  DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
192  
193  DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL);
194  
195  DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
196  
197  DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
198  		struct iovec *iov,
199  		uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
200  
201  DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr));
202  
203  struct ut_nvme_req {
204  	uint16_t			opc;
205  	spdk_nvme_cmd_cb		cb_fn;
206  	void				*cb_arg;
207  	struct spdk_nvme_cpl		cpl;
208  	TAILQ_ENTRY(ut_nvme_req)	tailq;
209  };
210  
211  struct spdk_nvme_ns {
212  	struct spdk_nvme_ctrlr		*ctrlr;
213  	uint32_t			id;
214  	bool				is_active;
215  	struct spdk_uuid		*uuid;
216  	enum spdk_nvme_ana_state	ana_state;
217  	enum spdk_nvme_csi		csi;
218  };
219  
220  struct spdk_nvme_qpair {
221  	struct spdk_nvme_ctrlr		*ctrlr;
222  	uint8_t				failure_reason;
223  	bool				is_connected;
224  	bool				in_completion_context;
225  	bool				delete_after_completion_context;
226  	TAILQ_HEAD(, ut_nvme_req)	outstanding_reqs;
227  	uint32_t			num_outstanding_reqs;
228  	TAILQ_ENTRY(spdk_nvme_qpair)	poll_group_tailq;
229  	struct spdk_nvme_poll_group	*poll_group;
230  	void				*poll_group_tailq_head;
231  	TAILQ_ENTRY(spdk_nvme_qpair)	tailq;
232  };
233  
234  struct spdk_nvme_ctrlr {
235  	uint32_t			num_ns;
236  	struct spdk_nvme_ns		*ns;
237  	struct spdk_nvme_ns_data	*nsdata;
238  	struct spdk_nvme_qpair		adminq;
239  	struct spdk_nvme_ctrlr_data	cdata;
240  	bool				attached;
241  	bool				is_failed;
242  	bool				fail_reset;
243  	bool				is_removed;
244  	struct spdk_nvme_transport_id	trid;
245  	TAILQ_HEAD(, spdk_nvme_qpair)	active_io_qpairs;
246  	TAILQ_ENTRY(spdk_nvme_ctrlr)	tailq;
247  	struct spdk_nvme_ctrlr_opts	opts;
248  };
249  
250  struct spdk_nvme_poll_group {
251  	void				*ctx;
252  	struct spdk_nvme_accel_fn_table	accel_fn_table;
253  	TAILQ_HEAD(, spdk_nvme_qpair)	connected_qpairs;
254  	TAILQ_HEAD(, spdk_nvme_qpair)	disconnected_qpairs;
255  };
256  
257  struct spdk_nvme_probe_ctx {
258  	struct spdk_nvme_transport_id	trid;
259  	void				*cb_ctx;
260  	spdk_nvme_attach_cb		attach_cb;
261  	struct spdk_nvme_ctrlr		*init_ctrlr;
262  };
263  
264  uint32_t
265  spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
266  {
267  	uint32_t nsid;
268  
269  	for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) {
270  		if (ctrlr->ns[nsid - 1].is_active) {
271  			return nsid;
272  		}
273  	}
274  
275  	return 0;
276  }
277  
278  uint32_t
279  spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
280  {
281  	for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) {
282  		if (ctrlr->ns[nsid - 1].is_active) {
283  			return nsid;
284  		}
285  	}
286  
287  	return 0;
288  }
289  
290  static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs);
291  static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER(
292  			g_ut_attached_ctrlrs);
293  static int g_ut_attach_ctrlr_status;
294  static size_t g_ut_attach_bdev_count;
295  static int g_ut_register_bdev_status;
296  static struct spdk_bdev *g_ut_registered_bdev;
297  static uint16_t g_ut_cntlid;
298  static struct nvme_path_id g_any_path = {};
299  
300  static void
301  ut_init_trid(struct spdk_nvme_transport_id *trid)
302  {
303  	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
304  	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
305  	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
306  	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
307  }
308  
309  static void
310  ut_init_trid2(struct spdk_nvme_transport_id *trid)
311  {
312  	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
313  	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
314  	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
315  	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
316  }
317  
318  static void
319  ut_init_trid3(struct spdk_nvme_transport_id *trid)
320  {
321  	trid->trtype = SPDK_NVME_TRANSPORT_TCP;
322  	snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
323  	snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10");
324  	snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
325  }
326  
327  static int
328  cmp_int(int a, int b)
329  {
330  	return a - b;
331  }
332  
333  int
334  spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
335  			       const struct spdk_nvme_transport_id *trid2)
336  {
337  	int cmp;
338  
339  	/* We assume trtype is TCP for now. */
340  	CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP);
341  
342  	cmp = cmp_int(trid1->trtype, trid2->trtype);
343  	if (cmp) {
344  		return cmp;
345  	}
346  
347  	cmp = strcasecmp(trid1->traddr, trid2->traddr);
348  	if (cmp) {
349  		return cmp;
350  	}
351  
352  	cmp = cmp_int(trid1->adrfam, trid2->adrfam);
353  	if (cmp) {
354  		return cmp;
355  	}
356  
357  	cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
358  	if (cmp) {
359  		return cmp;
360  	}
361  
362  	cmp = strcmp(trid1->subnqn, trid2->subnqn);
363  	if (cmp) {
364  		return cmp;
365  	}
366  
367  	return 0;
368  }
369  
370  static struct spdk_nvme_ctrlr *
371  ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
372  		bool ana_reporting, bool multipath)
373  {
374  	struct spdk_nvme_ctrlr *ctrlr;
375  	uint32_t i;
376  
377  	TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) {
378  		if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
379  			/* There is a ctrlr whose trid matches. */
380  			return NULL;
381  		}
382  	}
383  
384  	ctrlr = calloc(1, sizeof(*ctrlr));
385  	if (ctrlr == NULL) {
386  		return NULL;
387  	}
388  
389  	ctrlr->attached = true;
390  	ctrlr->adminq.ctrlr = ctrlr;
391  	TAILQ_INIT(&ctrlr->adminq.outstanding_reqs);
392  	ctrlr->adminq.is_connected = true;
393  
394  	if (num_ns != 0) {
395  		ctrlr->num_ns = num_ns;
396  		ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns));
397  		if (ctrlr->ns == NULL) {
398  			free(ctrlr);
399  			return NULL;
400  		}
401  
402  		ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data));
403  		if (ctrlr->nsdata == NULL) {
404  			free(ctrlr->ns);
405  			free(ctrlr);
406  			return NULL;
407  		}
408  
409  		for (i = 0; i < num_ns; i++) {
410  			ctrlr->ns[i].id = i + 1;
411  			ctrlr->ns[i].ctrlr = ctrlr;
412  			ctrlr->ns[i].is_active = true;
413  			ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
414  			ctrlr->nsdata[i].nsze = 1024;
415  			ctrlr->nsdata[i].nmic.can_share = multipath;
416  		}
417  
418  		ctrlr->cdata.nn = num_ns;
419  		ctrlr->cdata.mnan = num_ns;
420  		ctrlr->cdata.nanagrpid = num_ns;
421  	}
422  
423  	ctrlr->cdata.cntlid = ++g_ut_cntlid;
424  	ctrlr->cdata.cmic.multi_ctrlr = multipath;
425  	ctrlr->cdata.cmic.ana_reporting = ana_reporting;
426  	ctrlr->trid = *trid;
427  	TAILQ_INIT(&ctrlr->active_io_qpairs);
428  
429  	TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq);
430  
431  	return ctrlr;
432  }
433  
434  static void
435  ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
436  {
437  	CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs));
438  
439  	TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq);
440  	free(ctrlr->nsdata);
441  	free(ctrlr->ns);
442  	free(ctrlr);
443  }
444  
445  static int
446  ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
447  		       uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
448  {
449  	struct ut_nvme_req *req;
450  
451  	req = calloc(1, sizeof(*req));
452  	if (req == NULL) {
453  		return -ENOMEM;
454  	}
455  
456  	req->opc = opc;
457  	req->cb_fn = cb_fn;
458  	req->cb_arg = cb_arg;
459  
460  	req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
461  	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
462  
463  	TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq);
464  	qpair->num_outstanding_reqs++;
465  
466  	return 0;
467  }
468  
469  static struct ut_nvme_req *
470  ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg)
471  {
472  	struct ut_nvme_req *req;
473  
474  	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
475  		if (req->cb_arg == cb_arg) {
476  			break;
477  		}
478  	}
479  
480  	return req;
481  }
482  
483  static struct spdk_bdev_io *
484  ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev,
485  		 struct spdk_io_channel *ch)
486  {
487  	struct spdk_bdev_io *bdev_io;
488  
489  	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
490  	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
491  	bdev_io->type = type;
492  	bdev_io->bdev = &nbdev->disk;
493  	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
494  
495  	return bdev_io;
496  }
497  
498  static void
499  ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io)
500  {
501  	bdev_io->u.bdev.iovs = &bdev_io->iov;
502  	bdev_io->u.bdev.iovcnt = 1;
503  
504  	bdev_io->iov.iov_base = (void *)0xFEEDBEEF;
505  	bdev_io->iov.iov_len = 4096;
506  }
507  
508  static void
509  nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx)
510  {
511  	if (ctrlr->is_failed) {
512  		free(ctrlr);
513  		return;
514  	}
515  
516  	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts));
517  	if (probe_ctx->cb_ctx) {
518  		ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx;
519  	}
520  
521  	TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq);
522  
523  	if (probe_ctx->attach_cb) {
524  		probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
525  	}
526  }
527  
528  int
529  spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx)
530  {
531  	struct spdk_nvme_ctrlr *ctrlr, *tmp;
532  
533  	TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) {
534  		if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) {
535  			continue;
536  		}
537  		TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq);
538  		nvme_ctrlr_poll_internal(ctrlr, probe_ctx);
539  	}
540  
541  	free(probe_ctx);
542  
543  	return 0;
544  }
545  
546  struct spdk_nvme_probe_ctx *
547  spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
548  			const struct spdk_nvme_ctrlr_opts *opts,
549  			spdk_nvme_attach_cb attach_cb)
550  {
551  	struct spdk_nvme_probe_ctx *probe_ctx;
552  
553  	if (trid == NULL) {
554  		return NULL;
555  	}
556  
557  	probe_ctx = calloc(1, sizeof(*probe_ctx));
558  	if (probe_ctx == NULL) {
559  		return NULL;
560  	}
561  
562  	probe_ctx->trid = *trid;
563  	probe_ctx->cb_ctx = (void *)opts;
564  	probe_ctx->attach_cb = attach_cb;
565  
566  	return probe_ctx;
567  }
568  
569  int
570  spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
571  {
572  	if (ctrlr->attached) {
573  		ut_detach_ctrlr(ctrlr);
574  	}
575  
576  	return 0;
577  }
578  
579  int
580  spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx)
581  {
582  	SPDK_CU_ASSERT_FATAL(ctx != NULL);
583  	*(struct spdk_nvme_ctrlr **)ctx = ctrlr;
584  
585  	return 0;
586  }
587  
588  int
589  spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx)
590  {
591  	return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx);
592  }
593  
594  void
595  spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
596  {
597  	memset(opts, 0, opts_size);
598  
599  	snprintf(opts->hostnqn, sizeof(opts->hostnqn),
600  		 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003");
601  }
602  
603  const struct spdk_nvme_ctrlr_data *
604  spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
605  {
606  	return &ctrlr->cdata;
607  }
608  
609  uint32_t
610  spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
611  {
612  	return ctrlr->num_ns;
613  }
614  
615  struct spdk_nvme_ns *
616  spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
617  {
618  	if (nsid < 1 || nsid > ctrlr->num_ns) {
619  		return NULL;
620  	}
621  
622  	return &ctrlr->ns[nsid - 1];
623  }
624  
625  bool
626  spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
627  {
628  	if (nsid < 1 || nsid > ctrlr->num_ns) {
629  		return false;
630  	}
631  
632  	return ctrlr->ns[nsid - 1].is_active;
633  }
634  
635  union spdk_nvme_csts_register
636  	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
637  {
638  	union spdk_nvme_csts_register csts;
639  
640  	csts.raw = 0;
641  
642  	return csts;
643  }
644  
645  union spdk_nvme_vs_register
646  	spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
647  {
648  	union spdk_nvme_vs_register vs;
649  
650  	vs.raw = 0;
651  
652  	return vs;
653  }
654  
655  struct spdk_nvme_qpair *
656  spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
657  			       const struct spdk_nvme_io_qpair_opts *user_opts,
658  			       size_t opts_size)
659  {
660  	struct spdk_nvme_qpair *qpair;
661  
662  	qpair = calloc(1, sizeof(*qpair));
663  	if (qpair == NULL) {
664  		return NULL;
665  	}
666  
667  	qpair->ctrlr = ctrlr;
668  	TAILQ_INIT(&qpair->outstanding_reqs);
669  	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
670  
671  	return qpair;
672  }
673  
674  static void
675  nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
676  {
677  	struct spdk_nvme_poll_group *group = qpair->poll_group;
678  
679  	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
680  
681  	qpair->poll_group_tailq_head = &group->connected_qpairs;
682  	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
683  	TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq);
684  }
685  
686  static void
687  nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
688  {
689  	struct spdk_nvme_poll_group *group = qpair->poll_group;
690  
691  	CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs);
692  
693  	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
694  	TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq);
695  	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
696  }
697  
698  int
699  spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
700  				 struct spdk_nvme_qpair *qpair)
701  {
702  	if (qpair->is_connected) {
703  		return -EISCONN;
704  	}
705  
706  	qpair->is_connected = true;
707  	qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
708  
709  	if (qpair->poll_group) {
710  		nvme_poll_group_connect_qpair(qpair);
711  	}
712  
713  	return 0;
714  }
715  
716  void
717  spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
718  {
719  	if (!qpair->is_connected) {
720  		return;
721  	}
722  
723  	qpair->is_connected = false;
724  
725  	if (qpair->poll_group != NULL) {
726  		nvme_poll_group_disconnect_qpair(qpair);
727  	}
728  }
729  
730  int
731  spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
732  {
733  	SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL);
734  
735  	if (qpair->in_completion_context) {
736  		qpair->delete_after_completion_context = true;
737  		return 0;
738  	}
739  
740  	spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
741  
742  	if (qpair->poll_group != NULL) {
743  		spdk_nvme_poll_group_remove(qpair->poll_group, qpair);
744  	}
745  
746  	TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq);
747  
748  	CU_ASSERT(qpair->num_outstanding_reqs == 0);
749  
750  	free(qpair);
751  
752  	return 0;
753  }
754  
755  int
756  spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
757  {
758  	if (ctrlr->fail_reset) {
759  		ctrlr->is_failed = true;
760  		return -EIO;
761  	}
762  
763  	ctrlr->adminq.is_connected = true;
764  	return 0;
765  }
766  
767  void
768  spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
769  {
770  }
771  
772  int
773  spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
774  {
775  	if (ctrlr->is_removed) {
776  		return -ENXIO;
777  	}
778  
779  	ctrlr->adminq.is_connected = false;
780  	ctrlr->is_failed = false;
781  
782  	return 0;
783  }
784  
785  void
786  spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
787  {
788  	ctrlr->is_failed = true;
789  }
790  
791  bool
792  spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
793  {
794  	return ctrlr->is_failed;
795  }
796  
797  spdk_nvme_qp_failure_reason
798  spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
799  {
800  	return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq);
801  }
802  
803  #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
804  				 sizeof(uint32_t))
805  static void
806  ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length)
807  {
808  	struct spdk_nvme_ana_page ana_hdr;
809  	char _ana_desc[UT_ANA_DESC_SIZE];
810  	struct spdk_nvme_ana_group_descriptor *ana_desc;
811  	struct spdk_nvme_ns *ns;
812  	uint32_t i;
813  
814  	memset(&ana_hdr, 0, sizeof(ana_hdr));
815  	ana_hdr.num_ana_group_desc = ctrlr->num_ns;
816  
817  	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length);
818  	memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr));
819  
820  	buf += sizeof(ana_hdr);
821  	length -= sizeof(ana_hdr);
822  
823  	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
824  
825  	for (i = 0; i < ctrlr->num_ns; i++) {
826  		ns = &ctrlr->ns[i];
827  
828  		if (!ns->is_active) {
829  			continue;
830  		}
831  
832  		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
833  
834  		ana_desc->ana_group_id = ns->id;
835  		ana_desc->num_of_nsid = 1;
836  		ana_desc->ana_state = ns->ana_state;
837  		ana_desc->nsid[0] = ns->id;
838  
839  		SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length);
840  		memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE);
841  
842  		buf += UT_ANA_DESC_SIZE;
843  		length -= UT_ANA_DESC_SIZE;
844  	}
845  }
846  
847  int
848  spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
849  				 uint8_t log_page, uint32_t nsid,
850  				 void *payload, uint32_t payload_size,
851  				 uint64_t offset,
852  				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
853  {
854  	if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) {
855  		SPDK_CU_ASSERT_FATAL(offset == 0);
856  		ut_create_ana_log_page(ctrlr, payload, payload_size);
857  	}
858  
859  	return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE,
860  				      cb_fn, cb_arg);
861  }
862  
863  int
864  spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
865  			      struct spdk_nvme_cmd *cmd, void *buf, uint32_t len,
866  			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
867  {
868  	return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg);
869  }
870  
871  int
872  spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
873  			      void *cmd_cb_arg,
874  			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
875  {
876  	struct ut_nvme_req *req = NULL, *abort_req;
877  
878  	if (qpair == NULL) {
879  		qpair = &ctrlr->adminq;
880  	}
881  
882  	abort_req = calloc(1, sizeof(*abort_req));
883  	if (abort_req == NULL) {
884  		return -ENOMEM;
885  	}
886  
887  	TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) {
888  		if (req->cb_arg == cmd_cb_arg) {
889  			break;
890  		}
891  	}
892  
893  	if (req == NULL) {
894  		free(abort_req);
895  		return -ENOENT;
896  	}
897  
898  	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
899  	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
900  
901  	abort_req->opc = SPDK_NVME_OPC_ABORT;
902  	abort_req->cb_fn = cb_fn;
903  	abort_req->cb_arg = cb_arg;
904  
905  	abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
906  	abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
907  	abort_req->cpl.cdw0 = 0;
908  
909  	TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq);
910  	ctrlr->adminq.num_outstanding_reqs++;
911  
912  	return 0;
913  }
914  
915  int32_t
916  spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
917  {
918  	return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
919  }
920  
921  uint32_t
922  spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
923  {
924  	return ns->id;
925  }
926  
927  struct spdk_nvme_ctrlr *
928  spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns)
929  {
930  	return ns->ctrlr;
931  }
932  
933  static inline struct spdk_nvme_ns_data *
934  _nvme_ns_get_data(struct spdk_nvme_ns *ns)
935  {
936  	return &ns->ctrlr->nsdata[ns->id - 1];
937  }
938  
939  const struct spdk_nvme_ns_data *
940  spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
941  {
942  	return _nvme_ns_get_data(ns);
943  }
944  
945  uint64_t
946  spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
947  {
948  	return _nvme_ns_get_data(ns)->nsze;
949  }
950  
951  const struct spdk_uuid *
952  spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns)
953  {
954  	return ns->uuid;
955  }
956  
957  enum spdk_nvme_csi
958  spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) {
959  	return ns->csi;
960  }
961  
962  int
963  spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
964  			      void *metadata, uint64_t lba, uint32_t lba_count,
965  			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
966  			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
967  {
968  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
969  }
970  
971  int
972  spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
973  			       void *buffer, void *metadata, uint64_t lba,
974  			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
975  			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
976  {
977  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
978  }
979  
980  int
981  spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
982  			       uint64_t lba, uint32_t lba_count,
983  			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
984  			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
985  			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
986  			       uint16_t apptag_mask, uint16_t apptag)
987  {
988  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
989  }
990  
991  int
992  spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
993  				uint64_t lba, uint32_t lba_count,
994  				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
995  				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
996  				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
997  				uint16_t apptag_mask, uint16_t apptag)
998  {
999  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1000  }
1001  
1002  static bool g_ut_readv_ext_called;
1003  int
1004  spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1005  			   uint64_t lba, uint32_t lba_count,
1006  			   spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1007  			   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1008  			   spdk_nvme_req_next_sge_cb next_sge_fn,
1009  			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1010  {
1011  	g_ut_readv_ext_called = true;
1012  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg);
1013  }
1014  
1015  static bool g_ut_writev_ext_called;
1016  int
1017  spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1018  			    uint64_t lba, uint32_t lba_count,
1019  			    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1020  			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1021  			    spdk_nvme_req_next_sge_cb next_sge_fn,
1022  			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1023  {
1024  	g_ut_writev_ext_called = true;
1025  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg);
1026  }
1027  
1028  int
1029  spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1030  				  uint64_t lba, uint32_t lba_count,
1031  				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1032  				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1033  				  spdk_nvme_req_next_sge_cb next_sge_fn,
1034  				  void *metadata, uint16_t apptag_mask, uint16_t apptag)
1035  {
1036  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg);
1037  }
1038  
1039  int
1040  spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1041  				    uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1042  				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1043  {
1044  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg);
1045  }
1046  
1047  int
1048  spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1049  			      uint64_t lba, uint32_t lba_count,
1050  			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1051  			      uint32_t io_flags)
1052  {
1053  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg);
1054  }
1055  
1056  int
1057  spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1058  		      const struct spdk_nvme_scc_source_range *ranges,
1059  		      uint16_t num_ranges, uint64_t dest_lba,
1060  		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1061  {
1062  	return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg);
1063  }
1064  
1065  struct spdk_nvme_poll_group *
1066  spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
1067  {
1068  	struct spdk_nvme_poll_group *group;
1069  
1070  	group = calloc(1, sizeof(*group));
1071  	if (group == NULL) {
1072  		return NULL;
1073  	}
1074  
1075  	group->ctx = ctx;
1076  	if (table != NULL) {
1077  		group->accel_fn_table = *table;
1078  	}
1079  	TAILQ_INIT(&group->connected_qpairs);
1080  	TAILQ_INIT(&group->disconnected_qpairs);
1081  
1082  	return group;
1083  }
1084  
1085  int
1086  spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
1087  {
1088  	if (!TAILQ_EMPTY(&group->connected_qpairs) ||
1089  	    !TAILQ_EMPTY(&group->disconnected_qpairs)) {
1090  		return -EBUSY;
1091  	}
1092  
1093  	free(group);
1094  
1095  	return 0;
1096  }
1097  
1098  spdk_nvme_qp_failure_reason
1099  spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair)
1100  {
1101  	return qpair->failure_reason;
1102  }
1103  
1104  int32_t
1105  spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
1106  				    uint32_t max_completions)
1107  {
1108  	struct ut_nvme_req *req, *tmp;
1109  	uint32_t num_completions = 0;
1110  
1111  	if (!qpair->is_connected) {
1112  		return -ENXIO;
1113  	}
1114  
1115  	qpair->in_completion_context = true;
1116  
1117  	TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) {
1118  		TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq);
1119  		qpair->num_outstanding_reqs--;
1120  
1121  		req->cb_fn(req->cb_arg, &req->cpl);
1122  
1123  		free(req);
1124  		num_completions++;
1125  	}
1126  
1127  	qpair->in_completion_context = false;
1128  	if (qpair->delete_after_completion_context) {
1129  		spdk_nvme_ctrlr_free_io_qpair(qpair);
1130  	}
1131  
1132  	return num_completions;
1133  }
1134  
1135  int64_t
1136  spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
1137  		uint32_t completions_per_qpair,
1138  		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
1139  {
1140  	struct spdk_nvme_qpair *qpair, *tmp_qpair;
1141  	int64_t local_completions = 0, error_reason = 0, num_completions = 0;
1142  
1143  	SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0);
1144  
1145  	if (disconnected_qpair_cb == NULL) {
1146  		return -EINVAL;
1147  	}
1148  
1149  	TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) {
1150  		disconnected_qpair_cb(qpair, group->ctx);
1151  	}
1152  
1153  	TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) {
1154  		if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
1155  			spdk_nvme_ctrlr_disconnect_io_qpair(qpair);
1156  			/* Bump the number of completions so this counts as "busy" */
1157  			num_completions++;
1158  			continue;
1159  		}
1160  
1161  		local_completions = spdk_nvme_qpair_process_completions(qpair,
1162  				    completions_per_qpair);
1163  		if (local_completions < 0 && error_reason == 0) {
1164  			error_reason = local_completions;
1165  		} else {
1166  			num_completions += local_completions;
1167  			assert(num_completions >= 0);
1168  		}
1169  	}
1170  
1171  	return error_reason ? error_reason : num_completions;
1172  }
1173  
1174  int
1175  spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group,
1176  			 struct spdk_nvme_qpair *qpair)
1177  {
1178  	CU_ASSERT(!qpair->is_connected);
1179  
1180  	qpair->poll_group = group;
1181  	qpair->poll_group_tailq_head = &group->disconnected_qpairs;
1182  	TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq);
1183  
1184  	return 0;
1185  }
1186  
1187  int
1188  spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group,
1189  			    struct spdk_nvme_qpair *qpair)
1190  {
1191  	CU_ASSERT(!qpair->is_connected);
1192  
1193  	if (qpair->poll_group == NULL) {
1194  		return -ENOENT;
1195  	}
1196  
1197  	CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs);
1198  
1199  	TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq);
1200  
1201  	qpair->poll_group = NULL;
1202  	qpair->poll_group_tailq_head = NULL;
1203  
1204  	return 0;
1205  }
1206  
1207  int
1208  spdk_bdev_register(struct spdk_bdev *bdev)
1209  {
1210  	g_ut_registered_bdev = bdev;
1211  
1212  	return g_ut_register_bdev_status;
1213  }
1214  
1215  void
1216  spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
1217  {
1218  	int rc;
1219  
1220  	rc = bdev->fn_table->destruct(bdev->ctxt);
1221  
1222  	if (bdev == g_ut_registered_bdev) {
1223  		g_ut_registered_bdev = NULL;
1224  	}
1225  
1226  	if (rc <= 0 && cb_fn != NULL) {
1227  		cb_fn(cb_arg, rc);
1228  	}
1229  }
1230  
1231  int
1232  spdk_bdev_open_ext(const char *bdev_name, bool write,
1233  		   spdk_bdev_event_cb_t event_cb, void *event_ctx,
1234  		   struct spdk_bdev_desc **desc)
1235  {
1236  	if (g_ut_registered_bdev == NULL ||
1237  	    strcmp(g_ut_registered_bdev->name, bdev_name) != 0) {
1238  		return -ENODEV;
1239  	}
1240  
1241  	*desc = (struct spdk_bdev_desc *)g_ut_registered_bdev;
1242  
1243  	return 0;
1244  }
1245  
1246  struct spdk_bdev *
1247  spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
1248  {
1249  	return (struct spdk_bdev *)desc;
1250  }
1251  
1252  int
1253  spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
1254  {
1255  	bdev->blockcnt = size;
1256  
1257  	return 0;
1258  }
1259  
1260  struct spdk_io_channel *
1261  spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
1262  {
1263  	return (struct spdk_io_channel *)bdev_io->internal.ch;
1264  }
1265  
1266  void
1267  spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
1268  {
1269  	bdev_io->internal.status = status;
1270  	bdev_io->internal.in_submit_request = false;
1271  }
1272  
1273  void
1274  spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
1275  {
1276  	if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
1277  		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1278  	} else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
1279  		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
1280  	} else {
1281  		bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
1282  	}
1283  
1284  	bdev_io->internal.error.nvme.cdw0 = cdw0;
1285  	bdev_io->internal.error.nvme.sct = sct;
1286  	bdev_io->internal.error.nvme.sc = sc;
1287  
1288  	spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
1289  }
1290  
1291  void
1292  spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1293  {
1294  	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1295  
1296  	ut_bdev_io_set_buf(bdev_io);
1297  
1298  	cb(ch, bdev_io, true);
1299  }
1300  
1301  static void
1302  test_create_ctrlr(void)
1303  {
1304  	struct spdk_nvme_transport_id trid = {};
1305  	struct spdk_nvme_ctrlr ctrlr = {};
1306  	int rc;
1307  
1308  	ut_init_trid(&trid);
1309  
1310  	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1311  	CU_ASSERT(rc == 0);
1312  
1313  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1314  
1315  	rc = bdev_nvme_delete("nvme0", &g_any_path);
1316  	CU_ASSERT(rc == 0);
1317  
1318  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
1319  
1320  	poll_threads();
1321  	spdk_delay_us(1000);
1322  	poll_threads();
1323  
1324  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1325  }
1326  
1327  static void
1328  ut_check_hotplug_on_reset(void *cb_arg, bool success)
1329  {
1330  	bool *detect_remove = cb_arg;
1331  
1332  	CU_ASSERT(success == false);
1333  	SPDK_CU_ASSERT_FATAL(detect_remove != NULL);
1334  
1335  	*detect_remove = true;
1336  }
1337  
1338  static void
1339  test_reset_ctrlr(void)
1340  {
1341  	struct spdk_nvme_transport_id trid = {};
1342  	struct spdk_nvme_ctrlr ctrlr = {};
1343  	struct nvme_ctrlr *nvme_ctrlr = NULL;
1344  	struct nvme_path_id *curr_trid;
1345  	struct spdk_io_channel *ch1, *ch2;
1346  	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1347  	bool detect_remove;
1348  	int rc;
1349  
1350  	ut_init_trid(&trid);
1351  	TAILQ_INIT(&ctrlr.active_io_qpairs);
1352  
1353  	set_thread(0);
1354  
1355  	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1356  	CU_ASSERT(rc == 0);
1357  
1358  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1359  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1360  
1361  	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1362  	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1363  
1364  	ch1 = spdk_get_io_channel(nvme_ctrlr);
1365  	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1366  
1367  	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
1368  	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1369  
1370  	set_thread(1);
1371  
1372  	ch2 = spdk_get_io_channel(nvme_ctrlr);
1373  	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1374  
1375  	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
1376  	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1377  
1378  	/* Reset starts from thread 1. */
1379  	set_thread(1);
1380  
1381  	/* Case 1: ctrlr is already being destructed. */
1382  	nvme_ctrlr->destruct = true;
1383  
1384  	rc = bdev_nvme_reset(nvme_ctrlr);
1385  	CU_ASSERT(rc == -ENXIO);
1386  
1387  	/* Case 2: reset is in progress. */
1388  	nvme_ctrlr->destruct = false;
1389  	nvme_ctrlr->resetting = true;
1390  
1391  	rc = bdev_nvme_reset(nvme_ctrlr);
1392  	CU_ASSERT(rc == -EBUSY);
1393  
1394  	/* Case 3: reset completes successfully. */
1395  	nvme_ctrlr->resetting = false;
1396  	curr_trid->is_failed = true;
1397  	ctrlr.is_failed = true;
1398  
1399  	rc = bdev_nvme_reset(nvme_ctrlr);
1400  	CU_ASSERT(rc == 0);
1401  	CU_ASSERT(nvme_ctrlr->resetting == true);
1402  	CU_ASSERT(ctrlr_ch1->qpair != NULL);
1403  	CU_ASSERT(ctrlr_ch2->qpair != NULL);
1404  
1405  	poll_thread_times(0, 3);
1406  	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1407  	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1408  
1409  	poll_thread_times(0, 1);
1410  	poll_thread_times(1, 1);
1411  	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
1412  	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1413  	CU_ASSERT(ctrlr.is_failed == true);
1414  
1415  	poll_thread_times(1, 1);
1416  	poll_thread_times(0, 1);
1417  	CU_ASSERT(ctrlr.is_failed == false);
1418  	CU_ASSERT(ctrlr.adminq.is_connected == false);
1419  
1420  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1421  	poll_thread_times(0, 2);
1422  	CU_ASSERT(ctrlr.adminq.is_connected == true);
1423  
1424  	poll_thread_times(0, 1);
1425  	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1426  	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
1427  
1428  	poll_thread_times(1, 1);
1429  	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
1430  	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
1431  	CU_ASSERT(nvme_ctrlr->resetting == true);
1432  	CU_ASSERT(curr_trid->is_failed == true);
1433  
1434  	poll_thread_times(0, 2);
1435  	CU_ASSERT(nvme_ctrlr->resetting == true);
1436  	poll_thread_times(1, 1);
1437  	CU_ASSERT(nvme_ctrlr->resetting == true);
1438  	poll_thread_times(0, 1);
1439  	CU_ASSERT(nvme_ctrlr->resetting == false);
1440  	CU_ASSERT(curr_trid->is_failed == false);
1441  
1442  	/* Case 4: ctrlr is already removed. */
1443  	ctrlr.is_removed = true;
1444  
1445  	rc = bdev_nvme_reset(nvme_ctrlr);
1446  	CU_ASSERT(rc == 0);
1447  
1448  	detect_remove = false;
1449  	nvme_ctrlr->reset_cb_fn = ut_check_hotplug_on_reset;
1450  	nvme_ctrlr->reset_cb_arg = &detect_remove;
1451  
1452  	poll_threads();
1453  
1454  	CU_ASSERT(nvme_ctrlr->reset_cb_fn == NULL);
1455  	CU_ASSERT(nvme_ctrlr->reset_cb_arg == NULL);
1456  	CU_ASSERT(detect_remove == true);
1457  
1458  	ctrlr.is_removed = false;
1459  
1460  	spdk_put_io_channel(ch2);
1461  
1462  	set_thread(0);
1463  
1464  	spdk_put_io_channel(ch1);
1465  
1466  	poll_threads();
1467  
1468  	rc = bdev_nvme_delete("nvme0", &g_any_path);
1469  	CU_ASSERT(rc == 0);
1470  
1471  	poll_threads();
1472  	spdk_delay_us(1000);
1473  	poll_threads();
1474  
1475  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1476  }
1477  
1478  static void
1479  test_race_between_reset_and_destruct_ctrlr(void)
1480  {
1481  	struct spdk_nvme_transport_id trid = {};
1482  	struct spdk_nvme_ctrlr ctrlr = {};
1483  	struct nvme_ctrlr *nvme_ctrlr;
1484  	struct spdk_io_channel *ch1, *ch2;
1485  	int rc;
1486  
1487  	ut_init_trid(&trid);
1488  	TAILQ_INIT(&ctrlr.active_io_qpairs);
1489  
1490  	set_thread(0);
1491  
1492  	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
1493  	CU_ASSERT(rc == 0);
1494  
1495  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1496  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1497  
1498  	ch1 = spdk_get_io_channel(nvme_ctrlr);
1499  	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1500  
1501  	set_thread(1);
1502  
1503  	ch2 = spdk_get_io_channel(nvme_ctrlr);
1504  	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1505  
1506  	/* Reset starts from thread 1. */
1507  	set_thread(1);
1508  
1509  	rc = bdev_nvme_reset(nvme_ctrlr);
1510  	CU_ASSERT(rc == 0);
1511  	CU_ASSERT(nvme_ctrlr->resetting == true);
1512  
1513  	/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
1514  	set_thread(0);
1515  
1516  	rc = bdev_nvme_delete("nvme0", &g_any_path);
1517  	CU_ASSERT(rc == 0);
1518  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1519  	CU_ASSERT(nvme_ctrlr->destruct == true);
1520  	CU_ASSERT(nvme_ctrlr->resetting == true);
1521  
1522  	poll_threads();
1523  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1524  	poll_threads();
1525  
1526  	/* Reset completed but ctrlr is not still destructed yet. */
1527  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1528  	CU_ASSERT(nvme_ctrlr->destruct == true);
1529  	CU_ASSERT(nvme_ctrlr->resetting == false);
1530  
1531  	/* New reset request is rejected. */
1532  	rc = bdev_nvme_reset(nvme_ctrlr);
1533  	CU_ASSERT(rc == -ENXIO);
1534  
1535  	/* Additional polling called spdk_io_device_unregister() to ctrlr,
1536  	 * However there are two channels and destruct is not completed yet.
1537  	 */
1538  	poll_threads();
1539  
1540  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
1541  
1542  	set_thread(0);
1543  
1544  	spdk_put_io_channel(ch1);
1545  
1546  	set_thread(1);
1547  
1548  	spdk_put_io_channel(ch2);
1549  
1550  	poll_threads();
1551  	spdk_delay_us(1000);
1552  	poll_threads();
1553  
1554  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1555  }
1556  
1557  static void
1558  test_failover_ctrlr(void)
1559  {
1560  	struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
1561  	struct spdk_nvme_ctrlr ctrlr = {};
1562  	struct nvme_ctrlr *nvme_ctrlr = NULL;
1563  	struct nvme_path_id *curr_trid, *next_trid;
1564  	struct spdk_io_channel *ch1, *ch2;
1565  	int rc;
1566  
1567  	ut_init_trid(&trid1);
1568  	ut_init_trid2(&trid2);
1569  	TAILQ_INIT(&ctrlr.active_io_qpairs);
1570  
1571  	set_thread(0);
1572  
1573  	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1574  	CU_ASSERT(rc == 0);
1575  
1576  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1577  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1578  
1579  	ch1 = spdk_get_io_channel(nvme_ctrlr);
1580  	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1581  
1582  	set_thread(1);
1583  
1584  	ch2 = spdk_get_io_channel(nvme_ctrlr);
1585  	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1586  
1587  	/* First, test one trid case. */
1588  	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1589  	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1590  
1591  	/* Failover starts from thread 1. */
1592  	set_thread(1);
1593  
1594  	/* Case 1: ctrlr is already being destructed. */
1595  	nvme_ctrlr->destruct = true;
1596  
1597  	rc = bdev_nvme_failover(nvme_ctrlr, false);
1598  	CU_ASSERT(rc == -ENXIO);
1599  	CU_ASSERT(curr_trid->is_failed == false);
1600  
1601  	/* Case 2: reset is in progress. */
1602  	nvme_ctrlr->destruct = false;
1603  	nvme_ctrlr->resetting = true;
1604  
1605  	rc = bdev_nvme_failover(nvme_ctrlr, false);
1606  	CU_ASSERT(rc == -EBUSY);
1607  
1608  	/* Case 3: reset completes successfully. */
1609  	nvme_ctrlr->resetting = false;
1610  
1611  	rc = bdev_nvme_failover(nvme_ctrlr, false);
1612  	CU_ASSERT(rc == 0);
1613  
1614  	CU_ASSERT(nvme_ctrlr->resetting == true);
1615  	CU_ASSERT(curr_trid->is_failed == true);
1616  
1617  	poll_threads();
1618  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1619  	poll_threads();
1620  
1621  	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1622  	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1623  
1624  	CU_ASSERT(nvme_ctrlr->resetting == false);
1625  	CU_ASSERT(curr_trid->is_failed == false);
1626  
1627  	set_thread(0);
1628  
1629  	/* Second, test two trids case. */
1630  	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1631  	CU_ASSERT(rc == 0);
1632  
1633  	curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1634  	SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
1635  	CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id);
1636  	CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
1637  
1638  	/* Failover starts from thread 1. */
1639  	set_thread(1);
1640  
1641  	/* Case 4: reset is in progress. */
1642  	nvme_ctrlr->resetting = true;
1643  
1644  	rc = bdev_nvme_failover(nvme_ctrlr, false);
1645  	CU_ASSERT(rc == -EBUSY);
1646  
1647  	/* Case 5: failover completes successfully. */
1648  	nvme_ctrlr->resetting = false;
1649  
1650  	rc = bdev_nvme_failover(nvme_ctrlr, false);
1651  	CU_ASSERT(rc == 0);
1652  
1653  	CU_ASSERT(nvme_ctrlr->resetting == true);
1654  
1655  	next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
1656  	SPDK_CU_ASSERT_FATAL(next_trid != NULL);
1657  	CU_ASSERT(next_trid != curr_trid);
1658  	CU_ASSERT(next_trid == nvme_ctrlr->active_path_id);
1659  	CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
1660  
1661  	poll_threads();
1662  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1663  	poll_threads();
1664  
1665  	CU_ASSERT(nvme_ctrlr->resetting == false);
1666  
1667  	spdk_put_io_channel(ch2);
1668  
1669  	set_thread(0);
1670  
1671  	spdk_put_io_channel(ch1);
1672  
1673  	poll_threads();
1674  
1675  	rc = bdev_nvme_delete("nvme0", &g_any_path);
1676  	CU_ASSERT(rc == 0);
1677  
1678  	poll_threads();
1679  	spdk_delay_us(1000);
1680  	poll_threads();
1681  
1682  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1683  }
1684  
1685  /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following.
1686   *
1687   * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was
1688   * disconnected and reset ctrlr failed repeatedly before starting failover from trid1
1689   * to trid2. While processing the failed reset, trid3 was added. trid1 should
1690   * have been active, i.e., the head of the list until the failover completed.
1691   * However trid3 was inserted to the head of the list by mistake.
1692   *
1693   * I/O qpairs have smaller polling period than admin qpair. When a connection is
1694   * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error
1695   * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr
1696   * may be executed repeatedly before failover is executed. Hence this bug is real.
1697   *
1698   * The following test verifies the fix.
1699   */
1700  static void
1701  test_race_between_failover_and_add_secondary_trid(void)
1702  {
1703  	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
1704  	struct spdk_nvme_ctrlr ctrlr = {};
1705  	struct nvme_ctrlr *nvme_ctrlr = NULL;
1706  	struct nvme_path_id *path_id1, *path_id2, *path_id3;
1707  	struct spdk_io_channel *ch1, *ch2;
1708  	int rc;
1709  
1710  	ut_init_trid(&trid1);
1711  	ut_init_trid2(&trid2);
1712  	ut_init_trid3(&trid3);
1713  	TAILQ_INIT(&ctrlr.active_io_qpairs);
1714  
1715  	set_thread(0);
1716  
1717  	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
1718  	CU_ASSERT(rc == 0);
1719  
1720  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1721  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1722  
1723  	ch1 = spdk_get_io_channel(nvme_ctrlr);
1724  	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1725  
1726  	set_thread(1);
1727  
1728  	ch2 = spdk_get_io_channel(nvme_ctrlr);
1729  	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1730  
1731  	set_thread(0);
1732  
1733  	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
1734  	CU_ASSERT(rc == 0);
1735  
1736  	path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids);
1737  	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
1738  	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1739  	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1740  	path_id2 = TAILQ_NEXT(path_id1, link);
1741  	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
1742  	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1743  
1744  	ctrlr.fail_reset = true;
1745  
1746  	rc = bdev_nvme_reset(nvme_ctrlr);
1747  	CU_ASSERT(rc == 0);
1748  
1749  	poll_threads();
1750  
1751  	CU_ASSERT(path_id1->is_failed == true);
1752  	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1753  
1754  	rc = bdev_nvme_reset(nvme_ctrlr);
1755  	CU_ASSERT(rc == 0);
1756  
1757  	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
1758  	CU_ASSERT(rc == 0);
1759  
1760  	CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids));
1761  	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
1762  	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0);
1763  	CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link));
1764  	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0);
1765  	path_id3 = TAILQ_NEXT(path_id2, link);
1766  	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
1767  	CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0);
1768  
1769  	poll_threads();
1770  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1771  	poll_threads();
1772  
1773  	spdk_put_io_channel(ch1);
1774  
1775  	set_thread(1);
1776  
1777  	spdk_put_io_channel(ch2);
1778  
1779  	poll_threads();
1780  
1781  	set_thread(0);
1782  
1783  	rc = bdev_nvme_delete("nvme0", &g_any_path);
1784  	CU_ASSERT(rc == 0);
1785  
1786  	poll_threads();
1787  	spdk_delay_us(1000);
1788  	poll_threads();
1789  
1790  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1791  }
1792  
1793  static void
1794  attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc)
1795  {
1796  	CU_ASSERT(rc == g_ut_attach_ctrlr_status);
1797  	CU_ASSERT(bdev_count == g_ut_attach_bdev_count);
1798  }
1799  
1800  static void
1801  test_pending_reset(void)
1802  {
1803  	struct spdk_nvme_transport_id trid = {};
1804  	struct spdk_nvme_ctrlr *ctrlr;
1805  	struct nvme_ctrlr *nvme_ctrlr = NULL;
1806  	const int STRING_SIZE = 32;
1807  	const char *attached_names[STRING_SIZE];
1808  	struct nvme_bdev *bdev;
1809  	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
1810  	struct spdk_io_channel *ch1, *ch2;
1811  	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
1812  	struct nvme_io_path *io_path1, *io_path2;
1813  	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
1814  	int rc;
1815  
1816  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1817  	ut_init_trid(&trid);
1818  
1819  	set_thread(0);
1820  
1821  	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
1822  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1823  
1824  	g_ut_attach_ctrlr_status = 0;
1825  	g_ut_attach_bdev_count = 1;
1826  
1827  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1828  			      attach_ctrlr_done, NULL, NULL, NULL, false);
1829  	CU_ASSERT(rc == 0);
1830  
1831  	spdk_delay_us(1000);
1832  	poll_threads();
1833  
1834  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1835  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1836  
1837  	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
1838  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
1839  
1840  	ch1 = spdk_get_io_channel(bdev);
1841  	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1842  
1843  	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
1844  	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
1845  	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
1846  	ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
1847  	SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
1848  
1849  	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
1850  	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1851  
1852  	set_thread(1);
1853  
1854  	ch2 = spdk_get_io_channel(bdev);
1855  	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1856  
1857  	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
1858  	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
1859  	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
1860  	ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
1861  	SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
1862  
1863  	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
1864  	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1865  
1866  	/* The first reset request is submitted on thread 1, and the second reset request
1867  	 * is submitted on thread 0 while processing the first request.
1868  	 */
1869  	bdev_nvme_submit_request(ch2, first_bdev_io);
1870  	CU_ASSERT(nvme_ctrlr->resetting == true);
1871  	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1872  
1873  	set_thread(0);
1874  
1875  	bdev_nvme_submit_request(ch1, second_bdev_io);
1876  	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1877  
1878  	poll_threads();
1879  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1880  	poll_threads();
1881  
1882  	CU_ASSERT(nvme_ctrlr->resetting == false);
1883  	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1884  	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1885  
1886  	/* The first reset request is submitted on thread 1, and the second reset request
1887  	 * is submitted on thread 0 while processing the first request.
1888  	 *
1889  	 * The difference from the above scenario is that the controller is removed while
1890  	 * processing the first request. Hence both reset requests should fail.
1891  	 */
1892  	set_thread(1);
1893  
1894  	bdev_nvme_submit_request(ch2, first_bdev_io);
1895  	CU_ASSERT(nvme_ctrlr->resetting == true);
1896  	CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
1897  
1898  	set_thread(0);
1899  
1900  	bdev_nvme_submit_request(ch1, second_bdev_io);
1901  	CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
1902  
1903  	ctrlr->fail_reset = true;
1904  
1905  	poll_threads();
1906  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
1907  	poll_threads();
1908  
1909  	CU_ASSERT(nvme_ctrlr->resetting == false);
1910  	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1911  	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1912  
1913  	spdk_put_io_channel(ch1);
1914  
1915  	set_thread(1);
1916  
1917  	spdk_put_io_channel(ch2);
1918  
1919  	poll_threads();
1920  
1921  	set_thread(0);
1922  
1923  	rc = bdev_nvme_delete("nvme0", &g_any_path);
1924  	CU_ASSERT(rc == 0);
1925  
1926  	poll_threads();
1927  	spdk_delay_us(1000);
1928  	poll_threads();
1929  
1930  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1931  
1932  	free(first_bdev_io);
1933  	free(second_bdev_io);
1934  }
1935  
1936  static void
1937  test_attach_ctrlr(void)
1938  {
1939  	struct spdk_nvme_transport_id trid = {};
1940  	struct spdk_nvme_ctrlr *ctrlr;
1941  	struct nvme_ctrlr *nvme_ctrlr;
1942  	const int STRING_SIZE = 32;
1943  	const char *attached_names[STRING_SIZE];
1944  	struct nvme_bdev *nbdev;
1945  	int rc;
1946  
1947  	set_thread(0);
1948  
1949  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
1950  	ut_init_trid(&trid);
1951  
1952  	/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
1953  	 * by probe polling.
1954  	 */
1955  	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1956  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1957  
1958  	ctrlr->is_failed = true;
1959  	g_ut_attach_ctrlr_status = -EIO;
1960  	g_ut_attach_bdev_count = 0;
1961  
1962  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1963  			      attach_ctrlr_done, NULL, NULL, NULL, false);
1964  	CU_ASSERT(rc == 0);
1965  
1966  	spdk_delay_us(1000);
1967  	poll_threads();
1968  
1969  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1970  
1971  	/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
1972  	ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
1973  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
1974  
1975  	g_ut_attach_ctrlr_status = 0;
1976  
1977  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
1978  			      attach_ctrlr_done, NULL, NULL, NULL, false);
1979  	CU_ASSERT(rc == 0);
1980  
1981  	spdk_delay_us(1000);
1982  	poll_threads();
1983  
1984  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
1985  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
1986  	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
1987  
1988  	rc = bdev_nvme_delete("nvme0", &g_any_path);
1989  	CU_ASSERT(rc == 0);
1990  
1991  	poll_threads();
1992  	spdk_delay_us(1000);
1993  	poll_threads();
1994  
1995  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
1996  
1997  	/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
1998  	 * one nvme_bdev is created.
1999  	 */
2000  	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2001  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2002  
2003  	g_ut_attach_bdev_count = 1;
2004  
2005  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2006  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2007  	CU_ASSERT(rc == 0);
2008  
2009  	spdk_delay_us(1000);
2010  	poll_threads();
2011  
2012  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2013  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2014  	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2015  
2016  	CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
2017  	attached_names[0] = NULL;
2018  
2019  	nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2020  	SPDK_CU_ASSERT_FATAL(nbdev != NULL);
2021  	CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
2022  
2023  	rc = bdev_nvme_delete("nvme0", &g_any_path);
2024  	CU_ASSERT(rc == 0);
2025  
2026  	poll_threads();
2027  	spdk_delay_us(1000);
2028  	poll_threads();
2029  
2030  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2031  
2032  	/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
2033  	 * created because creating one nvme_bdev failed.
2034  	 */
2035  	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2036  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2037  
2038  	g_ut_register_bdev_status = -EINVAL;
2039  	g_ut_attach_bdev_count = 0;
2040  
2041  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2042  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2043  	CU_ASSERT(rc == 0);
2044  
2045  	spdk_delay_us(1000);
2046  	poll_threads();
2047  
2048  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2049  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2050  	CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
2051  
2052  	CU_ASSERT(attached_names[0] == NULL);
2053  
2054  	rc = bdev_nvme_delete("nvme0", &g_any_path);
2055  	CU_ASSERT(rc == 0);
2056  
2057  	poll_threads();
2058  	spdk_delay_us(1000);
2059  	poll_threads();
2060  
2061  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2062  
2063  	g_ut_register_bdev_status = 0;
2064  }
2065  
2066  static void
2067  test_aer_cb(void)
2068  {
2069  	struct spdk_nvme_transport_id trid = {};
2070  	struct spdk_nvme_ctrlr *ctrlr;
2071  	struct nvme_ctrlr *nvme_ctrlr;
2072  	struct nvme_bdev *bdev;
2073  	const int STRING_SIZE = 32;
2074  	const char *attached_names[STRING_SIZE];
2075  	union spdk_nvme_async_event_completion event = {};
2076  	struct spdk_nvme_cpl cpl = {};
2077  	int rc;
2078  
2079  	set_thread(0);
2080  
2081  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2082  	ut_init_trid(&trid);
2083  
2084  	/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
2085  	 * namespaces are populated.
2086  	 */
2087  	ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
2088  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2089  
2090  	ctrlr->ns[0].is_active = false;
2091  
2092  	g_ut_attach_ctrlr_status = 0;
2093  	g_ut_attach_bdev_count = 3;
2094  
2095  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2096  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2097  	CU_ASSERT(rc == 0);
2098  
2099  	spdk_delay_us(1000);
2100  	poll_threads();
2101  
2102  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2103  	poll_threads();
2104  
2105  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2106  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2107  
2108  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL);
2109  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2110  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2111  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2112  
2113  	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev;
2114  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2115  	CU_ASSERT(bdev->disk.blockcnt == 1024);
2116  
2117  	/* Dynamically populate 1st namespace and depopulate 3rd namespace, and
2118  	 * change the size of the 4th namespace.
2119  	 */
2120  	ctrlr->ns[0].is_active = true;
2121  	ctrlr->ns[2].is_active = false;
2122  	ctrlr->nsdata[3].nsze = 2048;
2123  
2124  	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2125  	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
2126  	cpl.cdw0 = event.raw;
2127  
2128  	aer_cb(nvme_ctrlr, &cpl);
2129  
2130  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2131  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2132  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL);
2133  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2134  	CU_ASSERT(bdev->disk.blockcnt == 2048);
2135  
2136  	/* Change ANA state of active namespaces. */
2137  	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2138  	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2139  	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2140  
2141  	event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE;
2142  	event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
2143  	cpl.cdw0 = event.raw;
2144  
2145  	aer_cb(nvme_ctrlr, &cpl);
2146  
2147  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2148  	poll_threads();
2149  
2150  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2151  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2152  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2153  
2154  	rc = bdev_nvme_delete("nvme0", &g_any_path);
2155  	CU_ASSERT(rc == 0);
2156  
2157  	poll_threads();
2158  	spdk_delay_us(1000);
2159  	poll_threads();
2160  
2161  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2162  }
2163  
2164  static void
2165  ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2166  			enum spdk_bdev_io_type io_type)
2167  {
2168  	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2169  	struct nvme_io_path *io_path;
2170  	struct spdk_nvme_qpair *qpair;
2171  
2172  	io_path = bdev_nvme_find_io_path(nbdev_ch);
2173  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2174  	qpair = io_path->qpair->qpair;
2175  	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2176  
2177  	bdev_io->type = io_type;
2178  	bdev_io->internal.in_submit_request = true;
2179  
2180  	bdev_nvme_submit_request(ch, bdev_io);
2181  
2182  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2183  	CU_ASSERT(qpair->num_outstanding_reqs == 1);
2184  
2185  	poll_threads();
2186  
2187  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2188  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2189  	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2190  }
2191  
2192  static void
2193  ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2194  		   enum spdk_bdev_io_type io_type)
2195  {
2196  	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2197  	struct nvme_io_path *io_path;
2198  	struct spdk_nvme_qpair *qpair;
2199  
2200  	io_path = bdev_nvme_find_io_path(nbdev_ch);
2201  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2202  	qpair = io_path->qpair->qpair;
2203  	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2204  
2205  	bdev_io->type = io_type;
2206  	bdev_io->internal.in_submit_request = true;
2207  
2208  	bdev_nvme_submit_request(ch, bdev_io);
2209  
2210  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2211  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2212  	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2213  }
2214  
2215  static void
2216  ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
2217  {
2218  	struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
2219  	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
2220  	struct ut_nvme_req *req;
2221  	struct nvme_io_path *io_path;
2222  	struct spdk_nvme_qpair *qpair;
2223  
2224  	io_path = bdev_nvme_find_io_path(nbdev_ch);
2225  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
2226  	qpair = io_path->qpair->qpair;
2227  	SPDK_CU_ASSERT_FATAL(qpair != NULL);
2228  
2229  	/* Only compare and write now. */
2230  	bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
2231  	bdev_io->internal.in_submit_request = true;
2232  
2233  	bdev_nvme_submit_request(ch, bdev_io);
2234  
2235  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2236  	CU_ASSERT(qpair->num_outstanding_reqs == 2);
2237  	CU_ASSERT(bio->first_fused_submitted == true);
2238  
2239  	/* First outstanding request is compare operation. */
2240  	req = TAILQ_FIRST(&qpair->outstanding_reqs);
2241  	SPDK_CU_ASSERT_FATAL(req != NULL);
2242  	CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
2243  	req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
2244  
2245  	poll_threads();
2246  
2247  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2248  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2249  	CU_ASSERT(qpair->num_outstanding_reqs == 0);
2250  }
2251  
2252  static void
2253  ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
2254  			 struct spdk_nvme_ctrlr *ctrlr)
2255  {
2256  	bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
2257  	bdev_io->internal.in_submit_request = true;
2258  	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2259  
2260  	bdev_nvme_submit_request(ch, bdev_io);
2261  
2262  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2263  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2264  
2265  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2266  	poll_thread_times(1, 1);
2267  
2268  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
2269  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2270  
2271  	poll_thread_times(0, 1);
2272  
2273  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
2274  }
2275  
2276  static void
2277  test_submit_nvme_cmd(void)
2278  {
2279  	struct spdk_nvme_transport_id trid = {};
2280  	struct spdk_nvme_ctrlr *ctrlr;
2281  	struct nvme_ctrlr *nvme_ctrlr;
2282  	const int STRING_SIZE = 32;
2283  	const char *attached_names[STRING_SIZE];
2284  	struct nvme_bdev *bdev;
2285  	struct spdk_bdev_io *bdev_io;
2286  	struct spdk_io_channel *ch;
2287  	struct spdk_bdev_ext_io_opts ext_io_opts = {};
2288  	int rc;
2289  
2290  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2291  	ut_init_trid(&trid);
2292  
2293  	set_thread(1);
2294  
2295  	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2296  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2297  
2298  	g_ut_attach_ctrlr_status = 0;
2299  	g_ut_attach_bdev_count = 1;
2300  
2301  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2302  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2303  	CU_ASSERT(rc == 0);
2304  
2305  	spdk_delay_us(1000);
2306  	poll_threads();
2307  
2308  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2309  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2310  
2311  	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2312  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2313  
2314  	set_thread(0);
2315  
2316  	ch = spdk_get_io_channel(bdev);
2317  	SPDK_CU_ASSERT_FATAL(ch != NULL);
2318  
2319  	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
2320  
2321  	bdev_io->u.bdev.iovs = NULL;
2322  
2323  	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2324  
2325  	ut_bdev_io_set_buf(bdev_io);
2326  
2327  	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2328  	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2329  	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE);
2330  	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP);
2331  
2332  	ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH);
2333  
2334  	ut_test_submit_fused_nvme_cmd(ch, bdev_io);
2335  
2336  	/* Verify that ext NVME API is called if bdev_io ext_opts is set */
2337  	bdev_io->u.bdev.ext_opts = &ext_io_opts;
2338  	g_ut_readv_ext_called = false;
2339  	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
2340  	CU_ASSERT(g_ut_readv_ext_called == true);
2341  	g_ut_readv_ext_called = false;
2342  
2343  	g_ut_writev_ext_called = false;
2344  	ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
2345  	CU_ASSERT(g_ut_writev_ext_called == true);
2346  	g_ut_writev_ext_called = false;
2347  	bdev_io->u.bdev.ext_opts = NULL;
2348  
2349  	ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
2350  
2351  	free(bdev_io);
2352  
2353  	spdk_put_io_channel(ch);
2354  
2355  	poll_threads();
2356  
2357  	set_thread(1);
2358  
2359  	rc = bdev_nvme_delete("nvme0", &g_any_path);
2360  	CU_ASSERT(rc == 0);
2361  
2362  	poll_threads();
2363  	spdk_delay_us(1000);
2364  	poll_threads();
2365  
2366  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2367  }
2368  
2369  static void
2370  test_add_remove_trid(void)
2371  {
2372  	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
2373  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
2374  	struct nvme_ctrlr *nvme_ctrlr = NULL;
2375  	const int STRING_SIZE = 32;
2376  	const char *attached_names[STRING_SIZE];
2377  	struct nvme_path_id *ctrid;
2378  	int rc;
2379  
2380  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2381  	ut_init_trid(&path1.trid);
2382  	ut_init_trid2(&path2.trid);
2383  	ut_init_trid3(&path3.trid);
2384  
2385  	set_thread(0);
2386  
2387  	g_ut_attach_ctrlr_status = 0;
2388  	g_ut_attach_bdev_count = 0;
2389  
2390  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2391  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2392  
2393  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2394  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2395  	CU_ASSERT(rc == 0);
2396  
2397  	spdk_delay_us(1000);
2398  	poll_threads();
2399  
2400  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2401  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2402  
2403  	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2404  
2405  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2406  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2407  
2408  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2409  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2410  	CU_ASSERT(rc == 0);
2411  
2412  	spdk_delay_us(1000);
2413  	poll_threads();
2414  
2415  	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2416  	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2417  		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2418  			break;
2419  		}
2420  	}
2421  	CU_ASSERT(ctrid != NULL);
2422  
2423  	/* trid3 is not in the registered list. */
2424  	rc = bdev_nvme_delete("nvme0", &path3);
2425  	CU_ASSERT(rc == -ENXIO);
2426  
2427  	/* trid2 is not used, and simply removed. */
2428  	rc = bdev_nvme_delete("nvme0", &path2);
2429  	CU_ASSERT(rc == 0);
2430  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2431  	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2432  		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0);
2433  	}
2434  
2435  	ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false);
2436  	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
2437  
2438  	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
2439  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2440  	CU_ASSERT(rc == 0);
2441  
2442  	spdk_delay_us(1000);
2443  	poll_threads();
2444  
2445  	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2446  	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2447  		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) {
2448  			break;
2449  		}
2450  	}
2451  	CU_ASSERT(ctrid != NULL);
2452  
2453  	/* path1 is currently used and path3 is an alternative path.
2454  	 * If we remove path1, path is changed to path3.
2455  	 */
2456  	rc = bdev_nvme_delete("nvme0", &path1);
2457  	CU_ASSERT(rc == 0);
2458  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2459  	CU_ASSERT(nvme_ctrlr->resetting == true);
2460  	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2461  		CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0);
2462  	}
2463  	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0);
2464  
2465  	poll_threads();
2466  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2467  	poll_threads();
2468  
2469  	CU_ASSERT(nvme_ctrlr->resetting == false);
2470  
2471  	/* path3 is the current and only path. If we remove path3, the corresponding
2472  	 * nvme_ctrlr is removed.
2473  	 */
2474  	rc = bdev_nvme_delete("nvme0", &path3);
2475  	CU_ASSERT(rc == 0);
2476  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2477  
2478  	poll_threads();
2479  	spdk_delay_us(1000);
2480  	poll_threads();
2481  
2482  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2483  
2484  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false);
2485  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
2486  
2487  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
2488  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2489  	CU_ASSERT(rc == 0);
2490  
2491  	spdk_delay_us(1000);
2492  	poll_threads();
2493  
2494  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2495  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2496  
2497  	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2498  
2499  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false);
2500  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
2501  
2502  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
2503  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2504  	CU_ASSERT(rc == 0);
2505  
2506  	spdk_delay_us(1000);
2507  	poll_threads();
2508  
2509  	CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0);
2510  	TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
2511  		if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) {
2512  			break;
2513  		}
2514  	}
2515  	CU_ASSERT(ctrid != NULL);
2516  
2517  	/* If trid is not specified, nvme_ctrlr itself is removed. */
2518  	rc = bdev_nvme_delete("nvme0", &g_any_path);
2519  	CU_ASSERT(rc == 0);
2520  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
2521  
2522  	poll_threads();
2523  	spdk_delay_us(1000);
2524  	poll_threads();
2525  
2526  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2527  }
2528  
2529  static void
2530  test_abort(void)
2531  {
2532  	struct spdk_nvme_transport_id trid = {};
2533  	struct nvme_ctrlr_opts opts = {};
2534  	struct spdk_nvme_ctrlr *ctrlr;
2535  	struct nvme_ctrlr *nvme_ctrlr;
2536  	const int STRING_SIZE = 32;
2537  	const char *attached_names[STRING_SIZE];
2538  	struct nvme_bdev *bdev;
2539  	struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io;
2540  	struct spdk_io_channel *ch1, *ch2;
2541  	struct nvme_bdev_channel *nbdev_ch1;
2542  	struct nvme_io_path *io_path1;
2543  	struct nvme_qpair *nvme_qpair1;
2544  	int rc;
2545  
2546  	/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
2547  	 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests
2548  	 * are submitted on thread 1. Both should succeed.
2549  	 */
2550  
2551  	ut_init_trid(&trid);
2552  
2553  	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
2554  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2555  
2556  	g_ut_attach_ctrlr_status = 0;
2557  	g_ut_attach_bdev_count = 1;
2558  
2559  	set_thread(1);
2560  
2561  	opts.ctrlr_loss_timeout_sec = -1;
2562  	opts.reconnect_delay_sec = 1;
2563  
2564  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2565  			      attach_ctrlr_done, NULL, NULL, &opts, false);
2566  	CU_ASSERT(rc == 0);
2567  
2568  	spdk_delay_us(1000);
2569  	poll_threads();
2570  
2571  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2572  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2573  
2574  	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
2575  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2576  
2577  	write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
2578  	ut_bdev_io_set_buf(write_io);
2579  
2580  	fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL);
2581  	ut_bdev_io_set_buf(fuse_io);
2582  
2583  	admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL);
2584  	admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
2585  
2586  	abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL);
2587  
2588  	set_thread(0);
2589  
2590  	ch1 = spdk_get_io_channel(bdev);
2591  	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
2592  	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
2593  	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
2594  	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
2595  	nvme_qpair1 = io_path1->qpair;
2596  	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
2597  
2598  	set_thread(1);
2599  
2600  	ch2 = spdk_get_io_channel(bdev);
2601  	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
2602  
2603  	write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2604  	fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2605  	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2606  
2607  	/* Aborting the already completed request should fail. */
2608  	write_io->internal.in_submit_request = true;
2609  	bdev_nvme_submit_request(ch1, write_io);
2610  	poll_threads();
2611  
2612  	CU_ASSERT(write_io->internal.in_submit_request == false);
2613  
2614  	abort_io->u.abort.bio_to_abort = write_io;
2615  	abort_io->internal.in_submit_request = true;
2616  
2617  	bdev_nvme_submit_request(ch1, abort_io);
2618  
2619  	poll_threads();
2620  
2621  	CU_ASSERT(abort_io->internal.in_submit_request == false);
2622  	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2623  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2624  
2625  	admin_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2626  	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2627  
2628  	admin_io->internal.in_submit_request = true;
2629  	bdev_nvme_submit_request(ch1, admin_io);
2630  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2631  	poll_threads();
2632  
2633  	CU_ASSERT(admin_io->internal.in_submit_request == false);
2634  
2635  	abort_io->u.abort.bio_to_abort = admin_io;
2636  	abort_io->internal.in_submit_request = true;
2637  
2638  	bdev_nvme_submit_request(ch2, abort_io);
2639  
2640  	poll_threads();
2641  
2642  	CU_ASSERT(abort_io->internal.in_submit_request == false);
2643  	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
2644  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2645  
2646  	/* Aborting the write request should succeed. */
2647  	write_io->internal.in_submit_request = true;
2648  	bdev_nvme_submit_request(ch1, write_io);
2649  
2650  	CU_ASSERT(write_io->internal.in_submit_request == true);
2651  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
2652  
2653  	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2654  	abort_io->u.abort.bio_to_abort = write_io;
2655  	abort_io->internal.in_submit_request = true;
2656  
2657  	bdev_nvme_submit_request(ch1, abort_io);
2658  
2659  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2660  	poll_threads();
2661  
2662  	CU_ASSERT(abort_io->internal.in_submit_request == false);
2663  	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2664  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2665  	CU_ASSERT(write_io->internal.in_submit_request == false);
2666  	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2667  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2668  
2669  	/* Aborting the fuse request should succeed. */
2670  	fuse_io->internal.in_submit_request = true;
2671  	bdev_nvme_submit_request(ch1, fuse_io);
2672  
2673  	CU_ASSERT(fuse_io->internal.in_submit_request == true);
2674  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2);
2675  
2676  	abort_io->u.abort.bio_to_abort = fuse_io;
2677  	abort_io->internal.in_submit_request = true;
2678  
2679  	bdev_nvme_submit_request(ch1, abort_io);
2680  
2681  	spdk_delay_us(10000);
2682  	poll_threads();
2683  
2684  	CU_ASSERT(abort_io->internal.in_submit_request == false);
2685  	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2686  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2687  	CU_ASSERT(fuse_io->internal.in_submit_request == false);
2688  	CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2689  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
2690  
2691  	/* Aborting the admin request should succeed. */
2692  	admin_io->internal.in_submit_request = true;
2693  	bdev_nvme_submit_request(ch1, admin_io);
2694  
2695  	CU_ASSERT(admin_io->internal.in_submit_request == true);
2696  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
2697  
2698  	abort_io->internal.ch = (struct spdk_bdev_channel *)ch2;
2699  	abort_io->u.abort.bio_to_abort = admin_io;
2700  	abort_io->internal.in_submit_request = true;
2701  
2702  	bdev_nvme_submit_request(ch2, abort_io);
2703  
2704  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2705  	poll_threads();
2706  
2707  	CU_ASSERT(abort_io->internal.in_submit_request == false);
2708  	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2709  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2710  	CU_ASSERT(admin_io->internal.in_submit_request == false);
2711  	CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2712  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2713  
2714  	set_thread(0);
2715  
2716  	/* If qpair is disconnected, it is freed and then reconnected via resetting
2717  	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
2718  	 * while resetting the nvme_ctrlr.
2719  	 */
2720  	nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2721  
2722  	poll_thread_times(0, 3);
2723  
2724  	CU_ASSERT(nvme_qpair1->qpair == NULL);
2725  	CU_ASSERT(nvme_ctrlr->resetting == true);
2726  
2727  	write_io->internal.in_submit_request = true;
2728  
2729  	bdev_nvme_submit_request(ch1, write_io);
2730  
2731  	CU_ASSERT(write_io->internal.in_submit_request == true);
2732  	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));
2733  
2734  	/* Aborting the queued write request should succeed immediately. */
2735  	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
2736  	abort_io->u.abort.bio_to_abort = write_io;
2737  	abort_io->internal.in_submit_request = true;
2738  
2739  	bdev_nvme_submit_request(ch1, abort_io);
2740  
2741  	CU_ASSERT(abort_io->internal.in_submit_request == false);
2742  	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
2743  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
2744  	CU_ASSERT(write_io->internal.in_submit_request == false);
2745  	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
2746  
2747  	poll_threads();
2748  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2749  	poll_threads();
2750  
2751  	spdk_put_io_channel(ch1);
2752  
2753  	set_thread(1);
2754  
2755  	spdk_put_io_channel(ch2);
2756  
2757  	poll_threads();
2758  
2759  	free(write_io);
2760  	free(fuse_io);
2761  	free(admin_io);
2762  	free(abort_io);
2763  
2764  	set_thread(1);
2765  
2766  	rc = bdev_nvme_delete("nvme0", &g_any_path);
2767  	CU_ASSERT(rc == 0);
2768  
2769  	poll_threads();
2770  	spdk_delay_us(1000);
2771  	poll_threads();
2772  
2773  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2774  }
2775  
2776  static void
2777  test_get_io_qpair(void)
2778  {
2779  	struct spdk_nvme_transport_id trid = {};
2780  	struct spdk_nvme_ctrlr ctrlr = {};
2781  	struct nvme_ctrlr *nvme_ctrlr = NULL;
2782  	struct spdk_io_channel *ch;
2783  	struct nvme_ctrlr_channel *ctrlr_ch;
2784  	struct spdk_nvme_qpair *qpair;
2785  	int rc;
2786  
2787  	ut_init_trid(&trid);
2788  	TAILQ_INIT(&ctrlr.active_io_qpairs);
2789  
2790  	set_thread(0);
2791  
2792  	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
2793  	CU_ASSERT(rc == 0);
2794  
2795  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2796  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2797  
2798  	ch = spdk_get_io_channel(nvme_ctrlr);
2799  	SPDK_CU_ASSERT_FATAL(ch != NULL);
2800  	ctrlr_ch = spdk_io_channel_get_ctx(ch);
2801  	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
2802  
2803  	qpair = bdev_nvme_get_io_qpair(ch);
2804  	CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
2805  
2806  	spdk_put_io_channel(ch);
2807  
2808  	rc = bdev_nvme_delete("nvme0", &g_any_path);
2809  	CU_ASSERT(rc == 0);
2810  
2811  	poll_threads();
2812  	spdk_delay_us(1000);
2813  	poll_threads();
2814  
2815  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2816  }
2817  
2818  /* Test a scenario that the bdev subsystem starts shutdown when there still exists
2819   * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a
2820   * test case to avoid regression for this scenario. spdk_bdev_unregister() calls
2821   * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly.
2822   */
2823  static void
2824  test_bdev_unregister(void)
2825  {
2826  	struct spdk_nvme_transport_id trid = {};
2827  	struct spdk_nvme_ctrlr *ctrlr;
2828  	struct nvme_ctrlr *nvme_ctrlr;
2829  	struct nvme_ns *nvme_ns1, *nvme_ns2;
2830  	const int STRING_SIZE = 32;
2831  	const char *attached_names[STRING_SIZE];
2832  	struct nvme_bdev *bdev1, *bdev2;
2833  	int rc;
2834  
2835  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2836  	ut_init_trid(&trid);
2837  
2838  	ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
2839  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2840  
2841  	g_ut_attach_ctrlr_status = 0;
2842  	g_ut_attach_bdev_count = 2;
2843  
2844  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2845  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2846  	CU_ASSERT(rc == 0);
2847  
2848  	spdk_delay_us(1000);
2849  	poll_threads();
2850  
2851  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2852  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2853  
2854  	nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1);
2855  	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
2856  
2857  	bdev1 = nvme_ns1->bdev;
2858  	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
2859  
2860  	nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2);
2861  	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
2862  
2863  	bdev2 = nvme_ns2->bdev;
2864  	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
2865  
2866  	bdev_nvme_destruct(&bdev1->disk);
2867  	bdev_nvme_destruct(&bdev2->disk);
2868  
2869  	poll_threads();
2870  
2871  	CU_ASSERT(nvme_ns1->bdev == NULL);
2872  	CU_ASSERT(nvme_ns2->bdev == NULL);
2873  
2874  	nvme_ctrlr->destruct = true;
2875  	_nvme_ctrlr_destruct(nvme_ctrlr);
2876  
2877  	poll_threads();
2878  	spdk_delay_us(1000);
2879  	poll_threads();
2880  
2881  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
2882  }
2883  
2884  static void
2885  test_compare_ns(void)
2886  {
2887  	struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {};
2888  	struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, };
2889  	struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, };
2890  	struct spdk_uuid uuid1 = { .u.raw = { 0xAA } };
2891  	struct spdk_uuid uuid2 = { .u.raw = { 0xAB } };
2892  
2893  	/* No IDs are defined. */
2894  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2895  
2896  	/* Only EUI64 are defined and not matched. */
2897  	nsdata1.eui64 = 0xABCDEF0123456789;
2898  	nsdata2.eui64 = 0xBBCDEF0123456789;
2899  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2900  
2901  	/* Only EUI64 are defined and matched. */
2902  	nsdata2.eui64 = 0xABCDEF0123456789;
2903  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2904  
2905  	/* Only NGUID are defined and not matched. */
2906  	nsdata1.eui64 = 0x0;
2907  	nsdata2.eui64 = 0x0;
2908  	nsdata1.nguid[0] = 0x12;
2909  	nsdata2.nguid[0] = 0x10;
2910  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2911  
2912  	/* Only NGUID are defined and matched. */
2913  	nsdata2.nguid[0] = 0x12;
2914  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2915  
2916  	/* Only UUID are defined and not matched. */
2917  	nsdata1.nguid[0] = 0x0;
2918  	nsdata2.nguid[0] = 0x0;
2919  	ns1.uuid = &uuid1;
2920  	ns2.uuid = &uuid2;
2921  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2922  
2923  	/* Only one UUID is defined. */
2924  	ns1.uuid = NULL;
2925  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2926  
2927  	/* Only UUID are defined and matched. */
2928  	ns1.uuid = &uuid2;
2929  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2930  
2931  	/* All EUI64, NGUID, and UUID are defined and matched. */
2932  	nsdata1.eui64 = 0x123456789ABCDEF;
2933  	nsdata2.eui64 = 0x123456789ABCDEF;
2934  	nsdata1.nguid[15] = 0x34;
2935  	nsdata2.nguid[15] = 0x34;
2936  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true);
2937  
2938  	/* CSI are not matched. */
2939  	ns1.csi = SPDK_NVME_CSI_ZNS;
2940  	CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false);
2941  }
2942  
2943  static void
2944  test_init_ana_log_page(void)
2945  {
2946  	struct spdk_nvme_transport_id trid = {};
2947  	struct spdk_nvme_ctrlr *ctrlr;
2948  	struct nvme_ctrlr *nvme_ctrlr;
2949  	const int STRING_SIZE = 32;
2950  	const char *attached_names[STRING_SIZE];
2951  	int rc;
2952  
2953  	set_thread(0);
2954  
2955  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
2956  	ut_init_trid(&trid);
2957  
2958  	ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
2959  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2960  
2961  	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2962  	ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
2963  	ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
2964  	ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
2965  	ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE;
2966  
2967  	g_ut_attach_ctrlr_status = 0;
2968  	g_ut_attach_bdev_count = 5;
2969  
2970  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
2971  			      attach_ctrlr_done, NULL, NULL, NULL, false);
2972  	CU_ASSERT(rc == 0);
2973  
2974  	spdk_delay_us(1000);
2975  	poll_threads();
2976  
2977  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
2978  	poll_threads();
2979  
2980  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
2981  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
2982  
2983  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL);
2984  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL);
2985  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL);
2986  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL);
2987  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL);
2988  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
2989  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
2990  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
2991  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE);
2992  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE);
2993  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL);
2994  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL);
2995  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL);
2996  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL);
2997  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL);
2998  
2999  	rc = bdev_nvme_delete("nvme0", &g_any_path);
3000  	CU_ASSERT(rc == 0);
3001  
3002  	poll_threads();
3003  	spdk_delay_us(1000);
3004  	poll_threads();
3005  
3006  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3007  }
3008  
3009  static void
3010  init_accel(void)
3011  {
3012  	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
3013  				sizeof(int), "accel_p");
3014  }
3015  
3016  static void
3017  fini_accel(void)
3018  {
3019  	spdk_io_device_unregister(g_accel_p, NULL);
3020  }
3021  
3022  static void
3023  test_get_memory_domains(void)
3024  {
3025  	struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
3026  	struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef };
3027  	struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 };
3028  	struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 };
3029  	struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
3030  	struct spdk_memory_domain *domains[4] = {};
3031  	int rc = 0;
3032  
3033  	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq);
3034  
3035  	/* nvme controller doesn't have memory domains */
3036  	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0);
3037  	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3038  	CU_ASSERT(rc == 0);
3039  	CU_ASSERT(domains[0] == NULL);
3040  	CU_ASSERT(domains[1] == NULL);
3041  
3042  	/* nvme controller has a memory domain */
3043  	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1);
3044  	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3045  	CU_ASSERT(rc == 1);
3046  	CU_ASSERT(domains[0] != NULL);
3047  	memset(domains, 0, sizeof(domains));
3048  
3049  	/* multipath, 2 controllers report 1 memory domain each */
3050  	TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq);
3051  
3052  	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
3053  	CU_ASSERT(rc == 2);
3054  	CU_ASSERT(domains[0] != NULL);
3055  	CU_ASSERT(domains[1] != NULL);
3056  	memset(domains, 0, sizeof(domains));
3057  
3058  	/* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */
3059  	rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2);
3060  	CU_ASSERT(rc == 2);
3061  
3062  	/* multipath, 2 controllers report 1 memory domain each, array_size = 0 */
3063  	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0);
3064  	CU_ASSERT(rc == 2);
3065  	CU_ASSERT(domains[0] == NULL);
3066  	CU_ASSERT(domains[1] == NULL);
3067  
3068  	/* multipath, 2 controllers report 1 memory domain each, array_size = 1 */
3069  	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1);
3070  	CU_ASSERT(rc == 2);
3071  	CU_ASSERT(domains[0] != NULL);
3072  	CU_ASSERT(domains[1] == NULL);
3073  	memset(domains, 0, sizeof(domains));
3074  
3075  	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */
3076  	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3077  	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4);
3078  	CU_ASSERT(rc == 4);
3079  	CU_ASSERT(domains[0] != NULL);
3080  	CU_ASSERT(domains[1] != NULL);
3081  	CU_ASSERT(domains[2] != NULL);
3082  	CU_ASSERT(domains[3] != NULL);
3083  	memset(domains, 0, sizeof(domains));
3084  
3085  	/* multipath, 2 controllers report 2 memory domain each (not possible, just for test)
3086  	 * Array size is less than the number of memory domains */
3087  	MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2);
3088  	rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3);
3089  	CU_ASSERT(rc == 4);
3090  	CU_ASSERT(domains[0] != NULL);
3091  	CU_ASSERT(domains[1] != NULL);
3092  	CU_ASSERT(domains[2] != NULL);
3093  	CU_ASSERT(domains[3] == NULL);
3094  	memset(domains, 0, sizeof(domains));
3095  
3096  	MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains);
3097  }
3098  
3099  static void
3100  test_reconnect_qpair(void)
3101  {
3102  	struct spdk_nvme_transport_id trid = {};
3103  	struct spdk_nvme_ctrlr *ctrlr;
3104  	struct nvme_ctrlr *nvme_ctrlr;
3105  	const int STRING_SIZE = 32;
3106  	const char *attached_names[STRING_SIZE];
3107  	struct nvme_bdev *bdev;
3108  	struct spdk_io_channel *ch1, *ch2;
3109  	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3110  	struct nvme_io_path *io_path1, *io_path2;
3111  	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
3112  	int rc;
3113  
3114  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3115  	ut_init_trid(&trid);
3116  
3117  	set_thread(0);
3118  
3119  	ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
3120  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
3121  
3122  	g_ut_attach_ctrlr_status = 0;
3123  	g_ut_attach_bdev_count = 1;
3124  
3125  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
3126  			      attach_ctrlr_done, NULL, NULL, NULL, false);
3127  	CU_ASSERT(rc == 0);
3128  
3129  	spdk_delay_us(1000);
3130  	poll_threads();
3131  
3132  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
3133  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
3134  
3135  	bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev;
3136  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3137  
3138  	ch1 = spdk_get_io_channel(bdev);
3139  	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3140  
3141  	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3142  	io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
3143  	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3144  	nvme_qpair1 = io_path1->qpair;
3145  	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
3146  
3147  	set_thread(1);
3148  
3149  	ch2 = spdk_get_io_channel(bdev);
3150  	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3151  
3152  	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3153  	io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
3154  	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3155  	nvme_qpair2 = io_path2->qpair;
3156  	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
3157  
3158  	/* If a qpair is disconnected, it is freed and then reconnected via
3159  	 * resetting the corresponding nvme_ctrlr.
3160  	 */
3161  	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3162  	ctrlr->is_failed = true;
3163  
3164  	poll_thread_times(1, 3);
3165  	CU_ASSERT(nvme_qpair1->qpair != NULL);
3166  	CU_ASSERT(nvme_qpair2->qpair == NULL);
3167  	CU_ASSERT(nvme_ctrlr->resetting == true);
3168  
3169  	poll_thread_times(0, 3);
3170  	CU_ASSERT(nvme_qpair1->qpair == NULL);
3171  	CU_ASSERT(nvme_qpair2->qpair == NULL);
3172  	CU_ASSERT(ctrlr->is_failed == true);
3173  
3174  	poll_thread_times(1, 2);
3175  	poll_thread_times(0, 1);
3176  	CU_ASSERT(ctrlr->is_failed == false);
3177  	CU_ASSERT(ctrlr->adminq.is_connected == false);
3178  
3179  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3180  	poll_thread_times(0, 2);
3181  	CU_ASSERT(ctrlr->adminq.is_connected == true);
3182  
3183  	poll_thread_times(0, 1);
3184  	poll_thread_times(1, 1);
3185  	CU_ASSERT(nvme_qpair1->qpair != NULL);
3186  	CU_ASSERT(nvme_qpair2->qpair != NULL);
3187  	CU_ASSERT(nvme_ctrlr->resetting == true);
3188  
3189  	poll_thread_times(0, 2);
3190  	poll_thread_times(1, 1);
3191  	poll_thread_times(0, 1);
3192  	CU_ASSERT(nvme_ctrlr->resetting == false);
3193  
3194  	poll_threads();
3195  
3196  	/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
3197  	 * fails, the qpair is just freed.
3198  	 */
3199  	nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
3200  	ctrlr->is_failed = true;
3201  	ctrlr->fail_reset = true;
3202  
3203  	poll_thread_times(1, 3);
3204  	CU_ASSERT(nvme_qpair1->qpair != NULL);
3205  	CU_ASSERT(nvme_qpair2->qpair == NULL);
3206  	CU_ASSERT(nvme_ctrlr->resetting == true);
3207  
3208  	poll_thread_times(0, 3);
3209  	poll_thread_times(1, 1);
3210  	CU_ASSERT(nvme_qpair1->qpair == NULL);
3211  	CU_ASSERT(nvme_qpair2->qpair == NULL);
3212  	CU_ASSERT(ctrlr->is_failed == true);
3213  
3214  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3215  	poll_thread_times(0, 3);
3216  	poll_thread_times(1, 1);
3217  	poll_thread_times(0, 1);
3218  	CU_ASSERT(ctrlr->is_failed == true);
3219  	CU_ASSERT(nvme_ctrlr->resetting == false);
3220  	CU_ASSERT(nvme_qpair1->qpair == NULL);
3221  	CU_ASSERT(nvme_qpair2->qpair == NULL);
3222  
3223  	poll_threads();
3224  
3225  	spdk_put_io_channel(ch2);
3226  
3227  	set_thread(0);
3228  
3229  	spdk_put_io_channel(ch1);
3230  
3231  	poll_threads();
3232  
3233  	rc = bdev_nvme_delete("nvme0", &g_any_path);
3234  	CU_ASSERT(rc == 0);
3235  
3236  	poll_threads();
3237  	spdk_delay_us(1000);
3238  	poll_threads();
3239  
3240  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
3241  }
3242  
3243  static void
3244  test_create_bdev_ctrlr(void)
3245  {
3246  	struct nvme_path_id path1 = {}, path2 = {};
3247  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3248  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3249  	const int STRING_SIZE = 32;
3250  	const char *attached_names[STRING_SIZE];
3251  	int rc;
3252  
3253  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3254  	ut_init_trid(&path1.trid);
3255  	ut_init_trid2(&path2.trid);
3256  
3257  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3258  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3259  
3260  	g_ut_attach_ctrlr_status = 0;
3261  	g_ut_attach_bdev_count = 0;
3262  
3263  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3264  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3265  	CU_ASSERT(rc == 0);
3266  
3267  	spdk_delay_us(1000);
3268  	poll_threads();
3269  
3270  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3271  	poll_threads();
3272  
3273  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3274  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3275  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3276  
3277  	/* cntlid is duplicated, and adding the second ctrlr should fail. */
3278  	g_ut_attach_ctrlr_status = -EINVAL;
3279  
3280  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3281  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3282  
3283  	ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
3284  
3285  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3286  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3287  	CU_ASSERT(rc == 0);
3288  
3289  	spdk_delay_us(1000);
3290  	poll_threads();
3291  
3292  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3293  	poll_threads();
3294  
3295  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3296  
3297  	/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
3298  	g_ut_attach_ctrlr_status = 0;
3299  
3300  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3301  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3302  
3303  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3304  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3305  	CU_ASSERT(rc == 0);
3306  
3307  	spdk_delay_us(1000);
3308  	poll_threads();
3309  
3310  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3311  	poll_threads();
3312  
3313  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3314  
3315  	/* Delete two ctrlrs at once. */
3316  	rc = bdev_nvme_delete("nvme0", &g_any_path);
3317  	CU_ASSERT(rc == 0);
3318  
3319  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3320  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3321  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3322  
3323  	poll_threads();
3324  	spdk_delay_us(1000);
3325  	poll_threads();
3326  
3327  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3328  
3329  	/* Add two ctrlrs and delete one by one. */
3330  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true);
3331  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3332  
3333  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true);
3334  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3335  
3336  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3337  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3338  	CU_ASSERT(rc == 0);
3339  
3340  	spdk_delay_us(1000);
3341  	poll_threads();
3342  
3343  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3344  	poll_threads();
3345  
3346  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3347  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3348  	CU_ASSERT(rc == 0);
3349  
3350  	spdk_delay_us(1000);
3351  	poll_threads();
3352  
3353  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3354  	poll_threads();
3355  
3356  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3357  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3358  
3359  	rc = bdev_nvme_delete("nvme0", &path1);
3360  	CU_ASSERT(rc == 0);
3361  
3362  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3363  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL);
3364  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3365  
3366  	poll_threads();
3367  	spdk_delay_us(1000);
3368  	poll_threads();
3369  
3370  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3371  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3372  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3373  
3374  	rc = bdev_nvme_delete("nvme0", &path2);
3375  	CU_ASSERT(rc == 0);
3376  
3377  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3378  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3379  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL);
3380  
3381  	poll_threads();
3382  	spdk_delay_us(1000);
3383  	poll_threads();
3384  
3385  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3386  }
3387  
3388  static struct nvme_ns *
3389  _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
3390  {
3391  	struct nvme_ns *nvme_ns;
3392  
3393  	TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
3394  		if (nvme_ns->ctrlr == nvme_ctrlr) {
3395  			return nvme_ns;
3396  		}
3397  	}
3398  
3399  	return NULL;
3400  }
3401  
3402  static void
3403  test_add_multi_ns_to_bdev(void)
3404  {
3405  	struct nvme_path_id path1 = {}, path2 = {};
3406  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3407  	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3408  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3409  	struct nvme_ns *nvme_ns1, *nvme_ns2;
3410  	struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
3411  	const int STRING_SIZE = 32;
3412  	const char *attached_names[STRING_SIZE];
3413  	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3414  	struct spdk_uuid uuid2 = { .u.raw = { 0x2 } };
3415  	struct spdk_uuid uuid3 = { .u.raw = { 0x3 } };
3416  	struct spdk_uuid uuid4 = { .u.raw = { 0x4 } };
3417  	struct spdk_uuid uuid44 = { .u.raw = { 0x44 } };
3418  	int rc;
3419  
3420  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3421  	ut_init_trid(&path1.trid);
3422  	ut_init_trid2(&path2.trid);
3423  
3424  	/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
3425  
3426  	/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
3427  	 * namespaces are populated.
3428  	 */
3429  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
3430  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3431  
3432  	ctrlr1->ns[1].is_active = false;
3433  	ctrlr1->ns[4].is_active = false;
3434  	ctrlr1->ns[0].uuid = &uuid1;
3435  	ctrlr1->ns[2].uuid = &uuid3;
3436  	ctrlr1->ns[3].uuid = &uuid4;
3437  
3438  	g_ut_attach_ctrlr_status = 0;
3439  	g_ut_attach_bdev_count = 3;
3440  
3441  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3442  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3443  	CU_ASSERT(rc == 0);
3444  
3445  	spdk_delay_us(1000);
3446  	poll_threads();
3447  
3448  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3449  	poll_threads();
3450  
3451  	/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
3452  	 * namespaces are populated. The uuid of 4th namespace is different, and hence
3453  	 * adding 4th namespace to a bdev should fail.
3454  	 */
3455  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
3456  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3457  
3458  	ctrlr2->ns[2].is_active = false;
3459  	ctrlr2->ns[4].is_active = false;
3460  	ctrlr2->ns[0].uuid = &uuid1;
3461  	ctrlr2->ns[1].uuid = &uuid2;
3462  	ctrlr2->ns[3].uuid = &uuid44;
3463  
3464  	g_ut_attach_ctrlr_status = 0;
3465  	g_ut_attach_bdev_count = 2;
3466  
3467  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3468  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3469  	CU_ASSERT(rc == 0);
3470  
3471  	spdk_delay_us(1000);
3472  	poll_threads();
3473  
3474  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3475  	poll_threads();
3476  
3477  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3478  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3479  
3480  	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3481  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3482  
3483  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
3484  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
3485  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
3486  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
3487  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
3488  
3489  	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3490  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3491  
3492  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
3493  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
3494  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
3495  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
3496  	CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
3497  
3498  	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3499  	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3500  	bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
3501  	SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
3502  	bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
3503  	SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
3504  	bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
3505  	SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
3506  	CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
3507  
3508  	CU_ASSERT(bdev1->ref == 2);
3509  	CU_ASSERT(bdev2->ref == 1);
3510  	CU_ASSERT(bdev3->ref == 1);
3511  	CU_ASSERT(bdev4->ref == 1);
3512  
3513  	/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
3514  	rc = bdev_nvme_delete("nvme0", &path1);
3515  	CU_ASSERT(rc == 0);
3516  
3517  	poll_threads();
3518  	spdk_delay_us(1000);
3519  	poll_threads();
3520  
3521  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr);
3522  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
3523  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
3524  
3525  	rc = bdev_nvme_delete("nvme0", &path2);
3526  	CU_ASSERT(rc == 0);
3527  
3528  	poll_threads();
3529  	spdk_delay_us(1000);
3530  	poll_threads();
3531  
3532  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3533  
3534  	/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
3535  	 * can be deleted when the bdev subsystem shutdown.
3536  	 */
3537  	g_ut_attach_bdev_count = 1;
3538  
3539  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3540  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3541  
3542  	ctrlr1->ns[0].uuid = &uuid1;
3543  
3544  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32,
3545  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3546  	CU_ASSERT(rc == 0);
3547  
3548  	spdk_delay_us(1000);
3549  	poll_threads();
3550  
3551  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3552  	poll_threads();
3553  
3554  	ut_init_trid2(&path2.trid);
3555  
3556  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3557  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3558  
3559  	ctrlr2->ns[0].uuid = &uuid1;
3560  
3561  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32,
3562  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3563  	CU_ASSERT(rc == 0);
3564  
3565  	spdk_delay_us(1000);
3566  	poll_threads();
3567  
3568  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3569  	poll_threads();
3570  
3571  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3572  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3573  
3574  	bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3575  	SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
3576  
3577  	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3578  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3579  
3580  	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3581  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3582  
3583  	/* Check if a nvme_bdev has two nvme_ns. */
3584  	nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
3585  	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3586  	CU_ASSERT(nvme_ns1->bdev == bdev1);
3587  
3588  	nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
3589  	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3590  	CU_ASSERT(nvme_ns2->bdev == bdev1);
3591  
3592  	/* Delete nvme_bdev first when the bdev subsystem shutdown. */
3593  	bdev_nvme_destruct(&bdev1->disk);
3594  
3595  	poll_threads();
3596  
3597  	CU_ASSERT(nvme_ns1->bdev == NULL);
3598  	CU_ASSERT(nvme_ns2->bdev == NULL);
3599  
3600  	nvme_ctrlr1->destruct = true;
3601  	_nvme_ctrlr_destruct(nvme_ctrlr1);
3602  
3603  	poll_threads();
3604  	spdk_delay_us(1000);
3605  	poll_threads();
3606  
3607  	nvme_ctrlr2->destruct = true;
3608  	_nvme_ctrlr_destruct(nvme_ctrlr2);
3609  
3610  	poll_threads();
3611  	spdk_delay_us(1000);
3612  	poll_threads();
3613  
3614  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3615  }
3616  
3617  static void
3618  test_add_multi_io_paths_to_nbdev_ch(void)
3619  {
3620  	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
3621  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
3622  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3623  	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
3624  	struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
3625  	const int STRING_SIZE = 32;
3626  	const char *attached_names[STRING_SIZE];
3627  	struct nvme_bdev *bdev;
3628  	struct spdk_io_channel *ch;
3629  	struct nvme_bdev_channel *nbdev_ch;
3630  	struct nvme_io_path *io_path1, *io_path2, *io_path3;
3631  	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3632  	int rc;
3633  
3634  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3635  	ut_init_trid(&path1.trid);
3636  	ut_init_trid2(&path2.trid);
3637  	ut_init_trid3(&path3.trid);
3638  	g_ut_attach_ctrlr_status = 0;
3639  	g_ut_attach_bdev_count = 1;
3640  
3641  	set_thread(1);
3642  
3643  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3644  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3645  
3646  	ctrlr1->ns[0].uuid = &uuid1;
3647  
3648  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3649  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3650  	CU_ASSERT(rc == 0);
3651  
3652  	spdk_delay_us(1000);
3653  	poll_threads();
3654  
3655  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3656  	poll_threads();
3657  
3658  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3659  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3660  
3661  	ctrlr2->ns[0].uuid = &uuid1;
3662  
3663  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3664  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3665  	CU_ASSERT(rc == 0);
3666  
3667  	spdk_delay_us(1000);
3668  	poll_threads();
3669  
3670  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3671  	poll_threads();
3672  
3673  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3674  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3675  
3676  	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3677  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3678  
3679  	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3680  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3681  
3682  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3683  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3684  
3685  	nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
3686  	SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
3687  
3688  	nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
3689  	SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
3690  
3691  	set_thread(0);
3692  
3693  	ch = spdk_get_io_channel(bdev);
3694  	SPDK_CU_ASSERT_FATAL(ch != NULL);
3695  	nbdev_ch = spdk_io_channel_get_ctx(ch);
3696  
3697  	io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
3698  	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
3699  
3700  	io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
3701  	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
3702  
3703  	set_thread(1);
3704  
3705  	/* Check if I/O path is dynamically added to nvme_bdev_channel. */
3706  	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
3707  	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
3708  
3709  	ctrlr3->ns[0].uuid = &uuid1;
3710  
3711  	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
3712  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3713  	CU_ASSERT(rc == 0);
3714  
3715  	spdk_delay_us(1000);
3716  	poll_threads();
3717  
3718  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3719  	poll_threads();
3720  
3721  	nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
3722  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
3723  
3724  	nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
3725  	SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
3726  
3727  	io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
3728  	SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
3729  
3730  	/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
3731  	rc = bdev_nvme_delete("nvme0", &path2);
3732  	CU_ASSERT(rc == 0);
3733  
3734  	poll_threads();
3735  	spdk_delay_us(1000);
3736  	poll_threads();
3737  
3738  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
3739  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
3740  	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
3741  
3742  	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
3743  	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
3744  	CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
3745  
3746  	set_thread(0);
3747  
3748  	spdk_put_io_channel(ch);
3749  
3750  	poll_threads();
3751  
3752  	set_thread(1);
3753  
3754  	rc = bdev_nvme_delete("nvme0", &g_any_path);
3755  	CU_ASSERT(rc == 0);
3756  
3757  	poll_threads();
3758  	spdk_delay_us(1000);
3759  	poll_threads();
3760  
3761  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3762  }
3763  
3764  static void
3765  test_admin_path(void)
3766  {
3767  	struct nvme_path_id path1 = {}, path2 = {};
3768  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3769  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3770  	const int STRING_SIZE = 32;
3771  	const char *attached_names[STRING_SIZE];
3772  	struct nvme_bdev *bdev;
3773  	struct spdk_io_channel *ch;
3774  	struct spdk_bdev_io *bdev_io;
3775  	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
3776  	int rc;
3777  
3778  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3779  	ut_init_trid(&path1.trid);
3780  	ut_init_trid2(&path2.trid);
3781  	g_ut_attach_ctrlr_status = 0;
3782  	g_ut_attach_bdev_count = 1;
3783  
3784  	set_thread(0);
3785  
3786  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3787  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3788  
3789  	ctrlr1->ns[0].uuid = &uuid1;
3790  
3791  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3792  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3793  	CU_ASSERT(rc == 0);
3794  
3795  	spdk_delay_us(1000);
3796  	poll_threads();
3797  
3798  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3799  	poll_threads();
3800  
3801  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3802  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3803  
3804  	ctrlr2->ns[0].uuid = &uuid1;
3805  
3806  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3807  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3808  	CU_ASSERT(rc == 0);
3809  
3810  	spdk_delay_us(1000);
3811  	poll_threads();
3812  
3813  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3814  	poll_threads();
3815  
3816  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3817  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3818  
3819  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3820  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3821  
3822  	ch = spdk_get_io_channel(bdev);
3823  	SPDK_CU_ASSERT_FATAL(ch != NULL);
3824  
3825  	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch);
3826  	bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
3827  
3828  	/* ctrlr1 is failed but ctrlr2 is not failed. admin command is
3829  	 * submitted to ctrlr2.
3830  	 */
3831  	ctrlr1->is_failed = true;
3832  	bdev_io->internal.in_submit_request = true;
3833  
3834  	bdev_nvme_submit_request(ch, bdev_io);
3835  
3836  	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3837  	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1);
3838  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
3839  
3840  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3841  	poll_threads();
3842  
3843  	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3844  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3845  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3846  
3847  	/* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */
3848  	ctrlr2->is_failed = true;
3849  	bdev_io->internal.in_submit_request = true;
3850  
3851  	bdev_nvme_submit_request(ch, bdev_io);
3852  
3853  	CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0);
3854  	CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0);
3855  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
3856  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
3857  
3858  	free(bdev_io);
3859  
3860  	spdk_put_io_channel(ch);
3861  
3862  	poll_threads();
3863  
3864  	rc = bdev_nvme_delete("nvme0", &g_any_path);
3865  	CU_ASSERT(rc == 0);
3866  
3867  	poll_threads();
3868  	spdk_delay_us(1000);
3869  	poll_threads();
3870  
3871  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
3872  }
3873  
3874  static struct nvme_io_path *
3875  ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
3876  			struct nvme_ctrlr *nvme_ctrlr)
3877  {
3878  	struct nvme_io_path *io_path;
3879  
3880  	STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
3881  		if (io_path->qpair->ctrlr == nvme_ctrlr) {
3882  			return io_path;
3883  		}
3884  	}
3885  
3886  	return NULL;
3887  }
3888  
3889  static void
3890  test_reset_bdev_ctrlr(void)
3891  {
3892  	struct nvme_path_id path1 = {}, path2 = {};
3893  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
3894  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
3895  	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
3896  	struct nvme_path_id *curr_path1, *curr_path2;
3897  	const int STRING_SIZE = 32;
3898  	const char *attached_names[STRING_SIZE];
3899  	struct nvme_bdev *bdev;
3900  	struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
3901  	struct nvme_bdev_io *first_bio;
3902  	struct spdk_io_channel *ch1, *ch2;
3903  	struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
3904  	struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22;
3905  	int rc;
3906  
3907  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
3908  	ut_init_trid(&path1.trid);
3909  	ut_init_trid2(&path2.trid);
3910  	g_ut_attach_ctrlr_status = 0;
3911  	g_ut_attach_bdev_count = 1;
3912  
3913  	set_thread(0);
3914  
3915  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
3916  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
3917  
3918  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
3919  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3920  	CU_ASSERT(rc == 0);
3921  
3922  	spdk_delay_us(1000);
3923  	poll_threads();
3924  
3925  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3926  	poll_threads();
3927  
3928  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
3929  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
3930  
3931  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
3932  			      attach_ctrlr_done, NULL, NULL, NULL, true);
3933  	CU_ASSERT(rc == 0);
3934  
3935  	spdk_delay_us(1000);
3936  	poll_threads();
3937  
3938  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
3939  	poll_threads();
3940  
3941  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
3942  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
3943  
3944  	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
3945  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
3946  
3947  	curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids);
3948  	SPDK_CU_ASSERT_FATAL(curr_path1 != NULL);
3949  
3950  	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
3951  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
3952  
3953  	curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids);
3954  	SPDK_CU_ASSERT_FATAL(curr_path2 != NULL);
3955  
3956  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
3957  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
3958  
3959  	set_thread(0);
3960  
3961  	ch1 = spdk_get_io_channel(bdev);
3962  	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
3963  
3964  	nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
3965  	io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1);
3966  	SPDK_CU_ASSERT_FATAL(io_path11 != NULL);
3967  	io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2);
3968  	SPDK_CU_ASSERT_FATAL(io_path12 != NULL);
3969  
3970  	first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
3971  	first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx;
3972  
3973  	set_thread(1);
3974  
3975  	ch2 = spdk_get_io_channel(bdev);
3976  	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
3977  
3978  	nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
3979  	io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1);
3980  	SPDK_CU_ASSERT_FATAL(io_path21 != NULL);
3981  	io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2);
3982  	SPDK_CU_ASSERT_FATAL(io_path22 != NULL);
3983  
3984  	second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
3985  
3986  	/* The first reset request from bdev_io is submitted on thread 0.
3987  	 * Check if ctrlr1 is reset and then ctrlr2 is reset.
3988  	 *
3989  	 * A few extra polls are necessary after resetting ctrlr1 to check
3990  	 * pending reset requests for ctrlr1.
3991  	 */
3992  	ctrlr1->is_failed = true;
3993  	curr_path1->is_failed = true;
3994  	ctrlr2->is_failed = true;
3995  	curr_path2->is_failed = true;
3996  
3997  	set_thread(0);
3998  
3999  	bdev_nvme_submit_request(ch1, first_bdev_io);
4000  	CU_ASSERT(first_bio->io_path == io_path11);
4001  	CU_ASSERT(nvme_ctrlr1->resetting == true);
4002  	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
4003  
4004  	poll_thread_times(0, 3);
4005  	CU_ASSERT(io_path11->qpair->qpair == NULL);
4006  	CU_ASSERT(io_path21->qpair->qpair != NULL);
4007  
4008  	poll_thread_times(1, 2);
4009  	CU_ASSERT(io_path11->qpair->qpair == NULL);
4010  	CU_ASSERT(io_path21->qpair->qpair == NULL);
4011  	CU_ASSERT(ctrlr1->is_failed == true);
4012  
4013  	poll_thread_times(0, 1);
4014  	CU_ASSERT(nvme_ctrlr1->resetting == true);
4015  	CU_ASSERT(ctrlr1->is_failed == false);
4016  	CU_ASSERT(ctrlr1->adminq.is_connected == false);
4017  	CU_ASSERT(curr_path1->is_failed == true);
4018  
4019  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4020  	poll_thread_times(0, 2);
4021  	CU_ASSERT(ctrlr1->adminq.is_connected == true);
4022  
4023  	poll_thread_times(0, 1);
4024  	CU_ASSERT(io_path11->qpair->qpair != NULL);
4025  	CU_ASSERT(io_path21->qpair->qpair == NULL);
4026  
4027  	poll_thread_times(1, 1);
4028  	CU_ASSERT(io_path11->qpair->qpair != NULL);
4029  	CU_ASSERT(io_path21->qpair->qpair != NULL);
4030  
4031  	poll_thread_times(0, 2);
4032  	CU_ASSERT(nvme_ctrlr1->resetting == true);
4033  	poll_thread_times(1, 1);
4034  	CU_ASSERT(nvme_ctrlr1->resetting == true);
4035  	poll_thread_times(0, 2);
4036  	CU_ASSERT(nvme_ctrlr1->resetting == false);
4037  	CU_ASSERT(curr_path1->is_failed == false);
4038  	CU_ASSERT(first_bio->io_path == io_path12);
4039  	CU_ASSERT(nvme_ctrlr2->resetting == true);
4040  
4041  	poll_thread_times(0, 3);
4042  	CU_ASSERT(io_path12->qpair->qpair == NULL);
4043  	CU_ASSERT(io_path22->qpair->qpair != NULL);
4044  
4045  	poll_thread_times(1, 2);
4046  	CU_ASSERT(io_path12->qpair->qpair == NULL);
4047  	CU_ASSERT(io_path22->qpair->qpair == NULL);
4048  	CU_ASSERT(ctrlr2->is_failed == true);
4049  
4050  	poll_thread_times(0, 1);
4051  	CU_ASSERT(nvme_ctrlr2->resetting == true);
4052  	CU_ASSERT(ctrlr2->is_failed == false);
4053  	CU_ASSERT(ctrlr2->adminq.is_connected == false);
4054  	CU_ASSERT(curr_path2->is_failed == true);
4055  
4056  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4057  	poll_thread_times(0, 2);
4058  	CU_ASSERT(ctrlr2->adminq.is_connected == true);
4059  
4060  	poll_thread_times(0, 1);
4061  	CU_ASSERT(io_path12->qpair->qpair != NULL);
4062  	CU_ASSERT(io_path22->qpair->qpair == NULL);
4063  
4064  	poll_thread_times(1, 2);
4065  	CU_ASSERT(io_path12->qpair->qpair != NULL);
4066  	CU_ASSERT(io_path22->qpair->qpair != NULL);
4067  
4068  	poll_thread_times(0, 2);
4069  	CU_ASSERT(nvme_ctrlr2->resetting == true);
4070  	poll_thread_times(1, 1);
4071  	CU_ASSERT(nvme_ctrlr2->resetting == true);
4072  	poll_thread_times(0, 2);
4073  	CU_ASSERT(first_bio->io_path == NULL);
4074  	CU_ASSERT(nvme_ctrlr2->resetting == false);
4075  	CU_ASSERT(curr_path2->is_failed == false);
4076  
4077  	poll_threads();
4078  
4079  	/* There is a race between two reset requests from bdev_io.
4080  	 *
4081  	 * The first reset request is submitted on thread 0, and the second reset
4082  	 * request is submitted on thread 1 while the first is resetting ctrlr1.
4083  	 * The second is pending on ctrlr1. After the first completes resetting ctrlr1,
4084  	 * both reset requests go to ctrlr2. The first comes earlier than the second.
4085  	 * The second is pending on ctrlr2 again. After the first completes resetting
4086  	 * ctrl2, both complete successfully.
4087  	 */
4088  	ctrlr1->is_failed = true;
4089  	curr_path1->is_failed = true;
4090  	ctrlr2->is_failed = true;
4091  	curr_path2->is_failed = true;
4092  	first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4093  	second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
4094  
4095  	set_thread(0);
4096  
4097  	bdev_nvme_submit_request(ch1, first_bdev_io);
4098  
4099  	set_thread(1);
4100  
4101  	bdev_nvme_submit_request(ch2, second_bdev_io);
4102  
4103  	CU_ASSERT(nvme_ctrlr1->resetting == true);
4104  	CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
4105  	CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io);
4106  
4107  	poll_threads();
4108  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4109  	poll_threads();
4110  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4111  	poll_threads();
4112  
4113  	CU_ASSERT(ctrlr1->is_failed == false);
4114  	CU_ASSERT(curr_path1->is_failed == false);
4115  	CU_ASSERT(ctrlr2->is_failed == false);
4116  	CU_ASSERT(curr_path2->is_failed == false);
4117  	CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4118  	CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4119  
4120  	set_thread(0);
4121  
4122  	spdk_put_io_channel(ch1);
4123  
4124  	set_thread(1);
4125  
4126  	spdk_put_io_channel(ch2);
4127  
4128  	poll_threads();
4129  
4130  	set_thread(0);
4131  
4132  	rc = bdev_nvme_delete("nvme0", &g_any_path);
4133  	CU_ASSERT(rc == 0);
4134  
4135  	poll_threads();
4136  	spdk_delay_us(1000);
4137  	poll_threads();
4138  
4139  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4140  
4141  	free(first_bdev_io);
4142  	free(second_bdev_io);
4143  }
4144  
4145  static void
4146  test_find_io_path(void)
4147  {
4148  	struct nvme_bdev_channel nbdev_ch = {
4149  		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
4150  	};
4151  	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {};
4152  	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {};
4153  	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
4154  	struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
4155  	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, };
4156  	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, };
4157  	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
4158  	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
4159  	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
4160  
4161  	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
4162  
4163  	/* Test if io_path whose ANA state is not accessible is excluded. */
4164  
4165  	nvme_qpair1.qpair = &qpair1;
4166  	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4167  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4168  
4169  	nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE;
4170  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4171  
4172  	nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
4173  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4174  
4175  	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4176  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4177  
4178  	nbdev_ch.current_io_path = NULL;
4179  
4180  	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4181  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4182  
4183  	nbdev_ch.current_io_path = NULL;
4184  
4185  	/* Test if io_path whose qpair is resetting is excluded. */
4186  
4187  	nvme_qpair1.qpair = NULL;
4188  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
4189  
4190  	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
4191  
4192  	/* Test if ANA optimized state or the first found ANA non-optimized state
4193  	 * is prioritized.
4194  	 */
4195  
4196  	nvme_qpair1.qpair = &qpair1;
4197  	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4198  	nvme_qpair2.qpair = &qpair2;
4199  	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4200  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
4201  
4202  	nbdev_ch.current_io_path = NULL;
4203  
4204  	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
4205  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
4206  
4207  	nbdev_ch.current_io_path = NULL;
4208  }
4209  
4210  static void
4211  test_retry_io_if_ana_state_is_updating(void)
4212  {
4213  	struct nvme_path_id path = {};
4214  	struct nvme_ctrlr_opts opts = {};
4215  	struct spdk_nvme_ctrlr *ctrlr;
4216  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4217  	struct nvme_ctrlr *nvme_ctrlr;
4218  	const int STRING_SIZE = 32;
4219  	const char *attached_names[STRING_SIZE];
4220  	struct nvme_bdev *bdev;
4221  	struct nvme_ns *nvme_ns;
4222  	struct spdk_bdev_io *bdev_io1;
4223  	struct spdk_io_channel *ch;
4224  	struct nvme_bdev_channel *nbdev_ch;
4225  	struct nvme_io_path *io_path;
4226  	struct nvme_qpair *nvme_qpair;
4227  	int rc;
4228  
4229  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4230  	ut_init_trid(&path.trid);
4231  
4232  	set_thread(0);
4233  
4234  	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4235  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4236  
4237  	g_ut_attach_ctrlr_status = 0;
4238  	g_ut_attach_bdev_count = 1;
4239  
4240  	opts.ctrlr_loss_timeout_sec = -1;
4241  	opts.reconnect_delay_sec = 1;
4242  
4243  	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4244  			      attach_ctrlr_done, NULL, NULL, &opts, false);
4245  	CU_ASSERT(rc == 0);
4246  
4247  	spdk_delay_us(1000);
4248  	poll_threads();
4249  
4250  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4251  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4252  
4253  	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4254  	CU_ASSERT(nvme_ctrlr != NULL);
4255  
4256  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4257  	CU_ASSERT(bdev != NULL);
4258  
4259  	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4260  	CU_ASSERT(nvme_ns != NULL);
4261  
4262  	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4263  	ut_bdev_io_set_buf(bdev_io1);
4264  
4265  	ch = spdk_get_io_channel(bdev);
4266  	SPDK_CU_ASSERT_FATAL(ch != NULL);
4267  
4268  	nbdev_ch = spdk_io_channel_get_ctx(ch);
4269  
4270  	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4271  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4272  
4273  	nvme_qpair = io_path->qpair;
4274  	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4275  	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4276  
4277  	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
4278  
4279  	/* If qpair is connected, I/O should succeed. */
4280  	bdev_io1->internal.in_submit_request = true;
4281  
4282  	bdev_nvme_submit_request(ch, bdev_io1);
4283  	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4284  
4285  	poll_threads();
4286  	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4287  	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
4288  
4289  	/* If ANA state of namespace is inaccessible, I/O should be queued. */
4290  	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4291  	nbdev_ch->current_io_path = NULL;
4292  
4293  	bdev_io1->internal.in_submit_request = true;
4294  
4295  	bdev_nvme_submit_request(ch, bdev_io1);
4296  
4297  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4298  	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4299  	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4300  
4301  	/* ANA state became accessible while I/O was queued. */
4302  	nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4303  
4304  	spdk_delay_us(1000000);
4305  
4306  	poll_thread_times(0, 1);
4307  
4308  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4309  	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
4310  	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
4311  
4312  	poll_threads();
4313  
4314  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4315  	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
4316  	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4317  
4318  	free(bdev_io1);
4319  
4320  	spdk_put_io_channel(ch);
4321  
4322  	poll_threads();
4323  
4324  	rc = bdev_nvme_delete("nvme0", &g_any_path);
4325  	CU_ASSERT(rc == 0);
4326  
4327  	poll_threads();
4328  	spdk_delay_us(1000);
4329  	poll_threads();
4330  
4331  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4332  }
4333  
4334  static void
4335  test_retry_io_for_io_path_error(void)
4336  {
4337  	struct nvme_path_id path1 = {}, path2 = {};
4338  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
4339  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4340  	struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
4341  	const int STRING_SIZE = 32;
4342  	const char *attached_names[STRING_SIZE];
4343  	struct nvme_bdev *bdev;
4344  	struct nvme_ns *nvme_ns1, *nvme_ns2;
4345  	struct spdk_bdev_io *bdev_io;
4346  	struct nvme_bdev_io *bio;
4347  	struct spdk_io_channel *ch;
4348  	struct nvme_bdev_channel *nbdev_ch;
4349  	struct nvme_io_path *io_path1, *io_path2;
4350  	struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
4351  	struct ut_nvme_req *req;
4352  	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
4353  	int rc;
4354  
4355  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4356  	ut_init_trid(&path1.trid);
4357  	ut_init_trid2(&path2.trid);
4358  
4359  	g_opts.bdev_retry_count = 1;
4360  
4361  	set_thread(0);
4362  
4363  	g_ut_attach_ctrlr_status = 0;
4364  	g_ut_attach_bdev_count = 1;
4365  
4366  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
4367  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
4368  
4369  	ctrlr1->ns[0].uuid = &uuid1;
4370  
4371  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
4372  			      attach_ctrlr_done, NULL, NULL, NULL, true);
4373  	CU_ASSERT(rc == 0);
4374  
4375  	spdk_delay_us(1000);
4376  	poll_threads();
4377  
4378  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4379  	poll_threads();
4380  
4381  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4382  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4383  
4384  	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
4385  	CU_ASSERT(nvme_ctrlr1 != NULL);
4386  
4387  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4388  	CU_ASSERT(bdev != NULL);
4389  
4390  	nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1);
4391  	CU_ASSERT(nvme_ns1 != NULL);
4392  	CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1));
4393  
4394  	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4395  	ut_bdev_io_set_buf(bdev_io);
4396  
4397  	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4398  
4399  	ch = spdk_get_io_channel(bdev);
4400  	SPDK_CU_ASSERT_FATAL(ch != NULL);
4401  
4402  	nbdev_ch = spdk_io_channel_get_ctx(ch);
4403  
4404  	io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
4405  	SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
4406  
4407  	nvme_qpair1 = io_path1->qpair;
4408  	SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
4409  	SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
4410  
4411  	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4412  
4413  	/* I/O got a temporary I/O path error, but it should not retry if DNR is set. */
4414  	bdev_io->internal.in_submit_request = true;
4415  
4416  	bdev_nvme_submit_request(ch, bdev_io);
4417  
4418  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4419  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4420  
4421  	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4422  	SPDK_CU_ASSERT_FATAL(req != NULL);
4423  
4424  	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4425  	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4426  	req->cpl.status.dnr = 1;
4427  
4428  	poll_thread_times(0, 1);
4429  
4430  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4431  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4432  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4433  
4434  	/* I/O got a temporary I/O path error, but it should succeed after retry. */
4435  	bdev_io->internal.in_submit_request = true;
4436  
4437  	bdev_nvme_submit_request(ch, bdev_io);
4438  
4439  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4440  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4441  
4442  	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4443  	SPDK_CU_ASSERT_FATAL(req != NULL);
4444  
4445  	req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
4446  	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4447  
4448  	poll_thread_times(0, 1);
4449  
4450  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4451  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4452  	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4453  
4454  	poll_threads();
4455  
4456  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4457  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4458  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4459  
4460  	/* Add io_path2 dynamically, and create a multipath configuration. */
4461  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
4462  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
4463  
4464  	ctrlr2->ns[0].uuid = &uuid1;
4465  
4466  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
4467  			      attach_ctrlr_done, NULL, NULL, NULL, true);
4468  	CU_ASSERT(rc == 0);
4469  
4470  	spdk_delay_us(1000);
4471  	poll_threads();
4472  
4473  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4474  	poll_threads();
4475  
4476  	nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
4477  	CU_ASSERT(nvme_ctrlr2 != NULL);
4478  
4479  	nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2);
4480  	CU_ASSERT(nvme_ns2 != NULL);
4481  	CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2));
4482  
4483  	io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
4484  	SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
4485  
4486  	nvme_qpair2 = io_path2->qpair;
4487  	SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
4488  	SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
4489  
4490  	/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
4491  	 * and deleted. Hence the I/O was aborted. But io_path2 is available.
4492  	 * So after a retry, I/O is submitted to io_path2 and should succeed.
4493  	 */
4494  	bdev_io->internal.in_submit_request = true;
4495  
4496  	bdev_nvme_submit_request(ch, bdev_io);
4497  
4498  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
4499  	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4500  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4501  
4502  	req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
4503  	SPDK_CU_ASSERT_FATAL(req != NULL);
4504  
4505  	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
4506  	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4507  
4508  	poll_thread_times(0, 1);
4509  
4510  	CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
4511  	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4512  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4513  	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4514  
4515  	spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
4516  	nvme_qpair1->qpair = NULL;
4517  
4518  	poll_threads();
4519  
4520  	CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
4521  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4522  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4523  
4524  	free(bdev_io);
4525  
4526  	spdk_put_io_channel(ch);
4527  
4528  	poll_threads();
4529  
4530  	rc = bdev_nvme_delete("nvme0", &g_any_path);
4531  	CU_ASSERT(rc == 0);
4532  
4533  	poll_threads();
4534  	spdk_delay_us(1000);
4535  	poll_threads();
4536  
4537  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4538  
4539  	g_opts.bdev_retry_count = 0;
4540  }
4541  
4542  static void
4543  test_retry_io_count(void)
4544  {
4545  	struct nvme_path_id path = {};
4546  	struct spdk_nvme_ctrlr *ctrlr;
4547  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4548  	struct nvme_ctrlr *nvme_ctrlr;
4549  	const int STRING_SIZE = 32;
4550  	const char *attached_names[STRING_SIZE];
4551  	struct nvme_bdev *bdev;
4552  	struct nvme_ns *nvme_ns;
4553  	struct spdk_bdev_io *bdev_io;
4554  	struct nvme_bdev_io *bio;
4555  	struct spdk_io_channel *ch;
4556  	struct nvme_bdev_channel *nbdev_ch;
4557  	struct nvme_io_path *io_path;
4558  	struct nvme_qpair *nvme_qpair;
4559  	struct ut_nvme_req *req;
4560  	int rc;
4561  
4562  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4563  	ut_init_trid(&path.trid);
4564  
4565  	set_thread(0);
4566  
4567  	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
4568  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4569  
4570  	g_ut_attach_ctrlr_status = 0;
4571  	g_ut_attach_bdev_count = 1;
4572  
4573  	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4574  			      attach_ctrlr_done, NULL, NULL, NULL, false);
4575  	CU_ASSERT(rc == 0);
4576  
4577  	spdk_delay_us(1000);
4578  	poll_threads();
4579  
4580  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4581  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4582  
4583  	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4584  	CU_ASSERT(nvme_ctrlr != NULL);
4585  
4586  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4587  	CU_ASSERT(bdev != NULL);
4588  
4589  	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4590  	CU_ASSERT(nvme_ns != NULL);
4591  
4592  	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4593  	ut_bdev_io_set_buf(bdev_io);
4594  
4595  	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4596  
4597  	ch = spdk_get_io_channel(bdev);
4598  	SPDK_CU_ASSERT_FATAL(ch != NULL);
4599  
4600  	nbdev_ch = spdk_io_channel_get_ctx(ch);
4601  
4602  	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4603  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4604  
4605  	nvme_qpair = io_path->qpair;
4606  	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4607  	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4608  
4609  	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4610  
4611  	/* If I/O is aborted by request, it should not be retried. */
4612  	g_opts.bdev_retry_count = 1;
4613  
4614  	bdev_io->internal.in_submit_request = true;
4615  
4616  	bdev_nvme_submit_request(ch, bdev_io);
4617  
4618  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4619  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4620  
4621  	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4622  	SPDK_CU_ASSERT_FATAL(req != NULL);
4623  
4624  	req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4625  	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4626  
4627  	poll_thread_times(0, 1);
4628  
4629  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4630  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4631  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
4632  
4633  	/* If bio->retry_count is not less than g_opts.bdev_retry_count,
4634  	 * the failed I/O should not be retried.
4635  	 */
4636  	g_opts.bdev_retry_count = 4;
4637  
4638  	bdev_io->internal.in_submit_request = true;
4639  
4640  	bdev_nvme_submit_request(ch, bdev_io);
4641  
4642  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4643  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4644  
4645  	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4646  	SPDK_CU_ASSERT_FATAL(req != NULL);
4647  
4648  	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4649  	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4650  	bio->retry_count = 4;
4651  
4652  	poll_thread_times(0, 1);
4653  
4654  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4655  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4656  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
4657  
4658  	/* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */
4659  	g_opts.bdev_retry_count = -1;
4660  
4661  	bdev_io->internal.in_submit_request = true;
4662  
4663  	bdev_nvme_submit_request(ch, bdev_io);
4664  
4665  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4666  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4667  
4668  	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4669  	SPDK_CU_ASSERT_FATAL(req != NULL);
4670  
4671  	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4672  	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4673  	bio->retry_count = 4;
4674  
4675  	poll_thread_times(0, 1);
4676  
4677  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4678  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4679  	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4680  
4681  	poll_threads();
4682  
4683  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4684  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4685  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4686  
4687  	/* If bio->retry_count is less than g_opts.bdev_retry_count,
4688  	 * the failed I/O should be retried.
4689  	 */
4690  	g_opts.bdev_retry_count = 4;
4691  
4692  	bdev_io->internal.in_submit_request = true;
4693  
4694  	bdev_nvme_submit_request(ch, bdev_io);
4695  
4696  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4697  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4698  
4699  	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4700  	SPDK_CU_ASSERT_FATAL(req != NULL);
4701  
4702  	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
4703  	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4704  	bio->retry_count = 3;
4705  
4706  	poll_thread_times(0, 1);
4707  
4708  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4709  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4710  	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4711  
4712  	poll_threads();
4713  
4714  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4715  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4716  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4717  
4718  	free(bdev_io);
4719  
4720  	spdk_put_io_channel(ch);
4721  
4722  	poll_threads();
4723  
4724  	rc = bdev_nvme_delete("nvme0", &g_any_path);
4725  	CU_ASSERT(rc == 0);
4726  
4727  	poll_threads();
4728  	spdk_delay_us(1000);
4729  	poll_threads();
4730  
4731  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4732  
4733  	g_opts.bdev_retry_count = 0;
4734  }
4735  
4736  static void
4737  test_concurrent_read_ana_log_page(void)
4738  {
4739  	struct spdk_nvme_transport_id trid = {};
4740  	struct spdk_nvme_ctrlr *ctrlr;
4741  	struct nvme_ctrlr *nvme_ctrlr;
4742  	const int STRING_SIZE = 32;
4743  	const char *attached_names[STRING_SIZE];
4744  	int rc;
4745  
4746  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4747  	ut_init_trid(&trid);
4748  
4749  	set_thread(0);
4750  
4751  	ctrlr = ut_attach_ctrlr(&trid, 1, true, false);
4752  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4753  
4754  	ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
4755  
4756  	g_ut_attach_ctrlr_status = 0;
4757  	g_ut_attach_bdev_count = 1;
4758  
4759  	rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE,
4760  			      attach_ctrlr_done, NULL, NULL, NULL, false);
4761  	CU_ASSERT(rc == 0);
4762  
4763  	spdk_delay_us(1000);
4764  	poll_threads();
4765  
4766  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4767  	poll_threads();
4768  
4769  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
4770  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
4771  
4772  	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4773  
4774  	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4775  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4776  
4777  	/* Following read request should be rejected. */
4778  	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4779  
4780  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4781  
4782  	set_thread(1);
4783  
4784  	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4785  
4786  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
4787  
4788  	/* Reset request while reading ANA log page should not be rejected. */
4789  	rc = bdev_nvme_reset(nvme_ctrlr);
4790  	CU_ASSERT(rc == 0);
4791  
4792  	poll_threads();
4793  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4794  	poll_threads();
4795  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4796  	poll_threads();
4797  
4798  	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4799  	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
4800  
4801  	/* Read ANA log page while resetting ctrlr should be rejected. */
4802  	rc = bdev_nvme_reset(nvme_ctrlr);
4803  	CU_ASSERT(rc == 0);
4804  
4805  	nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
4806  
4807  	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4808  
4809  	poll_threads();
4810  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4811  	poll_threads();
4812  
4813  	set_thread(0);
4814  
4815  	rc = bdev_nvme_delete("nvme0", &g_any_path);
4816  	CU_ASSERT(rc == 0);
4817  
4818  	poll_threads();
4819  	spdk_delay_us(1000);
4820  	poll_threads();
4821  
4822  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
4823  }
4824  
4825  static void
4826  test_retry_io_for_ana_error(void)
4827  {
4828  	struct nvme_path_id path = {};
4829  	struct spdk_nvme_ctrlr *ctrlr;
4830  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
4831  	struct nvme_ctrlr *nvme_ctrlr;
4832  	const int STRING_SIZE = 32;
4833  	const char *attached_names[STRING_SIZE];
4834  	struct nvme_bdev *bdev;
4835  	struct nvme_ns *nvme_ns;
4836  	struct spdk_bdev_io *bdev_io;
4837  	struct nvme_bdev_io *bio;
4838  	struct spdk_io_channel *ch;
4839  	struct nvme_bdev_channel *nbdev_ch;
4840  	struct nvme_io_path *io_path;
4841  	struct nvme_qpair *nvme_qpair;
4842  	struct ut_nvme_req *req;
4843  	uint64_t now;
4844  	int rc;
4845  
4846  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
4847  	ut_init_trid(&path.trid);
4848  
4849  	g_opts.bdev_retry_count = 1;
4850  
4851  	set_thread(0);
4852  
4853  	ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false);
4854  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
4855  
4856  	g_ut_attach_ctrlr_status = 0;
4857  	g_ut_attach_bdev_count = 1;
4858  
4859  	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
4860  			      attach_ctrlr_done, NULL, NULL, NULL, false);
4861  	CU_ASSERT(rc == 0);
4862  
4863  	spdk_delay_us(1000);
4864  	poll_threads();
4865  
4866  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4867  	poll_threads();
4868  
4869  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
4870  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
4871  
4872  	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
4873  	CU_ASSERT(nvme_ctrlr != NULL);
4874  
4875  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
4876  	CU_ASSERT(bdev != NULL);
4877  
4878  	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
4879  	CU_ASSERT(nvme_ns != NULL);
4880  
4881  	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
4882  	ut_bdev_io_set_buf(bdev_io);
4883  
4884  	bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
4885  
4886  	ch = spdk_get_io_channel(bdev);
4887  	SPDK_CU_ASSERT_FATAL(ch != NULL);
4888  
4889  	nbdev_ch = spdk_io_channel_get_ctx(ch);
4890  
4891  	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
4892  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
4893  
4894  	nvme_qpair = io_path->qpair;
4895  	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
4896  	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
4897  
4898  	now = spdk_get_ticks();
4899  
4900  	bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
4901  
4902  	/* If I/O got ANA error, it should be queued, the corresponding namespace
4903  	 * should be freezed and its ANA state should be updated.
4904  	 */
4905  	bdev_io->internal.in_submit_request = true;
4906  
4907  	bdev_nvme_submit_request(ch, bdev_io);
4908  
4909  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
4910  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4911  
4912  	req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
4913  	SPDK_CU_ASSERT_FATAL(req != NULL);
4914  
4915  	nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
4916  	req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE;
4917  	req->cpl.status.sct = SPDK_NVME_SCT_PATH;
4918  
4919  	poll_thread_times(0, 1);
4920  
4921  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4922  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4923  	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4924  	/* I/O should be retried immediately. */
4925  	CU_ASSERT(bio->retry_ticks == now);
4926  	CU_ASSERT(nvme_ns->ana_state_updating == true);
4927  	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true);
4928  
4929  	poll_threads();
4930  
4931  	/* Namespace is inaccessible, and hence I/O should be queued again. */
4932  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4933  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
4934  	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
4935  	/* I/O should be retried after a second if no I/O path was found but
4936  	 * any I/O path may become available.
4937  	 */
4938  	CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz());
4939  
4940  	/* Namespace should be unfreezed after completing to update its ANA state. */
4941  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
4942  	poll_threads();
4943  
4944  	CU_ASSERT(nvme_ns->ana_state_updating == false);
4945  	CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
4946  	CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false);
4947  
4948  	/* Retry the queued I/O should succeed. */
4949  	spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
4950  	poll_threads();
4951  
4952  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
4953  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
4954  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
4955  
4956  	free(bdev_io);
4957  
4958  	spdk_put_io_channel(ch);
4959  
4960  	poll_threads();
4961  
4962  	rc = bdev_nvme_delete("nvme0", &g_any_path);
4963  	CU_ASSERT(rc == 0);
4964  
4965  	poll_threads();
4966  	spdk_delay_us(1000);
4967  	poll_threads();
4968  
4969  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
4970  
4971  	g_opts.bdev_retry_count = 0;
4972  }
4973  
4974  static void
4975  test_check_io_error_resiliency_params(void)
4976  {
4977  	/* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and
4978  	 * 3rd parameter is fast_io_fail_timeout_sec.
4979  	 */
4980  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false);
4981  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false);
4982  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false);
4983  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false);
4984  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false);
4985  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true);
4986  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true);
4987  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true);
4988  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true);
4989  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true);
4990  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false);
4991  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false);
4992  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false);
4993  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false);
4994  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true);
4995  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true);
4996  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true);
4997  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true);
4998  	CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true);
4999  }
5000  
5001  static void
5002  test_retry_io_if_ctrlr_is_resetting(void)
5003  {
5004  	struct nvme_path_id path = {};
5005  	struct nvme_ctrlr_opts opts = {};
5006  	struct spdk_nvme_ctrlr *ctrlr;
5007  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5008  	struct nvme_ctrlr *nvme_ctrlr;
5009  	const int STRING_SIZE = 32;
5010  	const char *attached_names[STRING_SIZE];
5011  	struct nvme_bdev *bdev;
5012  	struct nvme_ns *nvme_ns;
5013  	struct spdk_bdev_io *bdev_io1, *bdev_io2;
5014  	struct spdk_io_channel *ch;
5015  	struct nvme_bdev_channel *nbdev_ch;
5016  	struct nvme_io_path *io_path;
5017  	struct nvme_qpair *nvme_qpair;
5018  	int rc;
5019  
5020  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5021  	ut_init_trid(&path.trid);
5022  
5023  	set_thread(0);
5024  
5025  	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5026  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5027  
5028  	g_ut_attach_ctrlr_status = 0;
5029  	g_ut_attach_bdev_count = 1;
5030  
5031  	opts.ctrlr_loss_timeout_sec = -1;
5032  	opts.reconnect_delay_sec = 1;
5033  
5034  	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5035  			      attach_ctrlr_done, NULL, NULL, &opts, false);
5036  	CU_ASSERT(rc == 0);
5037  
5038  	spdk_delay_us(1000);
5039  	poll_threads();
5040  
5041  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5042  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5043  
5044  	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5045  	CU_ASSERT(nvme_ctrlr != NULL);
5046  
5047  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5048  	CU_ASSERT(bdev != NULL);
5049  
5050  	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5051  	CU_ASSERT(nvme_ns != NULL);
5052  
5053  	bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5054  	ut_bdev_io_set_buf(bdev_io1);
5055  
5056  	bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL);
5057  	ut_bdev_io_set_buf(bdev_io2);
5058  
5059  	ch = spdk_get_io_channel(bdev);
5060  	SPDK_CU_ASSERT_FATAL(ch != NULL);
5061  
5062  	nbdev_ch = spdk_io_channel_get_ctx(ch);
5063  
5064  	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5065  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5066  
5067  	nvme_qpair = io_path->qpair;
5068  	SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
5069  	SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
5070  
5071  	bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
5072  	bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
5073  
5074  	/* If qpair is connected, I/O should succeed. */
5075  	bdev_io1->internal.in_submit_request = true;
5076  
5077  	bdev_nvme_submit_request(ch, bdev_io1);
5078  	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5079  
5080  	poll_threads();
5081  	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5082  	CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
5083  
5084  	/* If qpair is disconnected, it is freed and then reconnected via resetting
5085  	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
5086  	 * while resetting the nvme_ctrlr.
5087  	 */
5088  	nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
5089  	ctrlr->is_failed = true;
5090  
5091  	poll_thread_times(0, 5);
5092  
5093  	CU_ASSERT(nvme_qpair->qpair == NULL);
5094  	CU_ASSERT(nvme_ctrlr->resetting == true);
5095  	CU_ASSERT(ctrlr->is_failed == false);
5096  
5097  	bdev_io1->internal.in_submit_request = true;
5098  
5099  	bdev_nvme_submit_request(ch, bdev_io1);
5100  
5101  	spdk_delay_us(1);
5102  
5103  	bdev_io2->internal.in_submit_request = true;
5104  
5105  	bdev_nvme_submit_request(ch, bdev_io2);
5106  
5107  	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5108  	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5109  	CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5110  	CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link));
5111  
5112  	poll_threads();
5113  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5114  	poll_threads();
5115  
5116  	CU_ASSERT(nvme_qpair->qpair != NULL);
5117  	CU_ASSERT(nvme_ctrlr->resetting == false);
5118  
5119  	spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us);
5120  
5121  	poll_thread_times(0, 1);
5122  
5123  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5124  	CU_ASSERT(bdev_io1->internal.in_submit_request == true);
5125  	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5126  	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5127  
5128  	poll_threads();
5129  
5130  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5131  	CU_ASSERT(bdev_io1->internal.in_submit_request == false);
5132  	CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5133  	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5134  	CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5135  
5136  	spdk_delay_us(1);
5137  
5138  	poll_thread_times(0, 1);
5139  
5140  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
5141  	CU_ASSERT(bdev_io2->internal.in_submit_request == true);
5142  	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5143  
5144  	poll_threads();
5145  
5146  	CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
5147  	CU_ASSERT(bdev_io2->internal.in_submit_request == false);
5148  	CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
5149  
5150  	free(bdev_io1);
5151  	free(bdev_io2);
5152  
5153  	spdk_put_io_channel(ch);
5154  
5155  	poll_threads();
5156  
5157  	rc = bdev_nvme_delete("nvme0", &g_any_path);
5158  	CU_ASSERT(rc == 0);
5159  
5160  	poll_threads();
5161  	spdk_delay_us(1000);
5162  	poll_threads();
5163  
5164  	CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
5165  }
5166  
5167  static void
5168  test_reconnect_ctrlr(void)
5169  {
5170  	struct spdk_nvme_transport_id trid = {};
5171  	struct spdk_nvme_ctrlr ctrlr = {};
5172  	struct nvme_ctrlr *nvme_ctrlr;
5173  	struct spdk_io_channel *ch1, *ch2;
5174  	struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
5175  	int rc;
5176  
5177  	ut_init_trid(&trid);
5178  	TAILQ_INIT(&ctrlr.active_io_qpairs);
5179  
5180  	set_thread(0);
5181  
5182  	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL);
5183  	CU_ASSERT(rc == 0);
5184  
5185  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5186  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5187  
5188  	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2;
5189  	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5190  
5191  	ch1 = spdk_get_io_channel(nvme_ctrlr);
5192  	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
5193  
5194  	ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
5195  	CU_ASSERT(ctrlr_ch1->qpair != NULL);
5196  
5197  	set_thread(1);
5198  
5199  	ch2 = spdk_get_io_channel(nvme_ctrlr);
5200  	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
5201  
5202  	ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
5203  
5204  	/* Reset starts from thread 1. */
5205  	set_thread(1);
5206  
5207  	/* The reset should fail and a reconnect timer should be registered. */
5208  	ctrlr.fail_reset = true;
5209  	ctrlr.is_failed = true;
5210  
5211  	rc = bdev_nvme_reset(nvme_ctrlr);
5212  	CU_ASSERT(rc == 0);
5213  	CU_ASSERT(nvme_ctrlr->resetting == true);
5214  	CU_ASSERT(ctrlr.is_failed == true);
5215  
5216  	poll_threads();
5217  
5218  	CU_ASSERT(nvme_ctrlr->resetting == false);
5219  	CU_ASSERT(ctrlr.is_failed == false);
5220  	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5221  	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5222  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5223  	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5224  
5225  	/* Then a reconnect retry should suceeed. */
5226  	ctrlr.fail_reset = false;
5227  
5228  	spdk_delay_us(SPDK_SEC_TO_USEC);
5229  	poll_thread_times(0, 1);
5230  
5231  	CU_ASSERT(nvme_ctrlr->resetting == true);
5232  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5233  
5234  	poll_threads();
5235  
5236  	CU_ASSERT(nvme_ctrlr->resetting == false);
5237  	CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
5238  	CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
5239  	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5240  
5241  	/* The reset should fail and a reconnect timer should be registered. */
5242  	ctrlr.fail_reset = true;
5243  	ctrlr.is_failed = true;
5244  
5245  	rc = bdev_nvme_reset(nvme_ctrlr);
5246  	CU_ASSERT(rc == 0);
5247  	CU_ASSERT(nvme_ctrlr->resetting == true);
5248  	CU_ASSERT(ctrlr.is_failed == true);
5249  
5250  	poll_threads();
5251  
5252  	CU_ASSERT(nvme_ctrlr->resetting == false);
5253  	CU_ASSERT(ctrlr.is_failed == false);
5254  	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5255  	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5256  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5257  	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5258  
5259  	/* Then a reconnect retry should still fail. */
5260  	spdk_delay_us(SPDK_SEC_TO_USEC);
5261  	poll_thread_times(0, 1);
5262  
5263  	CU_ASSERT(nvme_ctrlr->resetting == true);
5264  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5265  
5266  	poll_threads();
5267  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5268  	poll_threads();
5269  
5270  	CU_ASSERT(nvme_ctrlr->resetting == false);
5271  	CU_ASSERT(ctrlr.is_failed == false);
5272  	CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
5273  	CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
5274  	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5275  
5276  	/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
5277  	spdk_delay_us(SPDK_SEC_TO_USEC);
5278  	poll_threads();
5279  
5280  	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5281  	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5282  	CU_ASSERT(nvme_ctrlr->destruct == true);
5283  
5284  	spdk_put_io_channel(ch2);
5285  
5286  	set_thread(0);
5287  
5288  	spdk_put_io_channel(ch1);
5289  
5290  	poll_threads();
5291  	spdk_delay_us(1000);
5292  	poll_threads();
5293  
5294  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5295  }
5296  
5297  static struct nvme_path_id *
5298  ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr,
5299  		       const struct spdk_nvme_transport_id *trid)
5300  {
5301  	struct nvme_path_id *p;
5302  
5303  	TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) {
5304  		if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) {
5305  			break;
5306  		}
5307  	}
5308  
5309  	return p;
5310  }
5311  
5312  static void
5313  test_retry_failover_ctrlr(void)
5314  {
5315  	struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
5316  	struct spdk_nvme_ctrlr ctrlr = {};
5317  	struct nvme_ctrlr *nvme_ctrlr = NULL;
5318  	struct nvme_path_id *path_id1, *path_id2, *path_id3;
5319  	struct spdk_io_channel *ch;
5320  	struct nvme_ctrlr_channel *ctrlr_ch;
5321  	int rc;
5322  
5323  	ut_init_trid(&trid1);
5324  	ut_init_trid2(&trid2);
5325  	ut_init_trid3(&trid3);
5326  	TAILQ_INIT(&ctrlr.active_io_qpairs);
5327  
5328  	set_thread(0);
5329  
5330  	rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL);
5331  	CU_ASSERT(rc == 0);
5332  
5333  	nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
5334  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
5335  
5336  	nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1;
5337  	nvme_ctrlr->opts.reconnect_delay_sec = 1;
5338  
5339  	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
5340  	CU_ASSERT(rc == 0);
5341  
5342  	rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
5343  	CU_ASSERT(rc == 0);
5344  
5345  	ch = spdk_get_io_channel(nvme_ctrlr);
5346  	SPDK_CU_ASSERT_FATAL(ch != NULL);
5347  
5348  	ctrlr_ch = spdk_io_channel_get_ctx(ch);
5349  
5350  	path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1);
5351  	SPDK_CU_ASSERT_FATAL(path_id1 != NULL);
5352  	CU_ASSERT(path_id1->is_failed == false);
5353  	CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id);
5354  
5355  	/* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */
5356  	ctrlr.fail_reset = true;
5357  	ctrlr.is_failed = true;
5358  
5359  	rc = bdev_nvme_reset(nvme_ctrlr);
5360  	CU_ASSERT(rc == 0);
5361  
5362  	poll_threads();
5363  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5364  	poll_threads();
5365  
5366  	CU_ASSERT(nvme_ctrlr->resetting == false);
5367  	CU_ASSERT(ctrlr.is_failed == false);
5368  	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5369  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5370  	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5371  	CU_ASSERT(path_id1->is_failed == true);
5372  
5373  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5374  	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
5375  
5376  	path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2);
5377  	SPDK_CU_ASSERT_FATAL(path_id2 != NULL);
5378  	CU_ASSERT(path_id2->is_failed == false);
5379  	CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id);
5380  
5381  	/* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is
5382  	 * switched to trid3 but reset is not started.
5383  	 */
5384  	rc = bdev_nvme_failover(nvme_ctrlr, true);
5385  	CU_ASSERT(rc == 0);
5386  
5387  	CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL);
5388  
5389  	path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3);
5390  	SPDK_CU_ASSERT_FATAL(path_id3 != NULL);
5391  	CU_ASSERT(path_id3->is_failed == false);
5392  	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5393  
5394  	CU_ASSERT(nvme_ctrlr->resetting == false);
5395  
5396  	/* If reconnect succeeds, trid3 should be the active path_id */
5397  	ctrlr.fail_reset = false;
5398  
5399  	spdk_delay_us(SPDK_SEC_TO_USEC);
5400  	poll_thread_times(0, 1);
5401  
5402  	CU_ASSERT(nvme_ctrlr->resetting == true);
5403  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL);
5404  
5405  	poll_threads();
5406  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5407  	poll_threads();
5408  
5409  	CU_ASSERT(path_id3->is_failed == false);
5410  	CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
5411  	CU_ASSERT(nvme_ctrlr->resetting == false);
5412  	CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
5413  	CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
5414  
5415  	spdk_put_io_channel(ch);
5416  
5417  	poll_threads();
5418  
5419  	rc = bdev_nvme_delete("nvme0", &g_any_path);
5420  	CU_ASSERT(rc == 0);
5421  
5422  	poll_threads();
5423  	spdk_delay_us(1000);
5424  	poll_threads();
5425  
5426  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5427  }
5428  
5429  static void
5430  test_fail_path(void)
5431  {
5432  	struct nvme_path_id path = {};
5433  	struct nvme_ctrlr_opts opts = {};
5434  	struct spdk_nvme_ctrlr *ctrlr;
5435  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5436  	struct nvme_ctrlr *nvme_ctrlr;
5437  	const int STRING_SIZE = 32;
5438  	const char *attached_names[STRING_SIZE];
5439  	struct nvme_bdev *bdev;
5440  	struct nvme_ns *nvme_ns;
5441  	struct spdk_bdev_io *bdev_io;
5442  	struct spdk_io_channel *ch;
5443  	struct nvme_bdev_channel *nbdev_ch;
5444  	struct nvme_io_path *io_path;
5445  	struct nvme_ctrlr_channel *ctrlr_ch;
5446  	int rc;
5447  
5448  	/* The test scenario is the following.
5449  	 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec.
5450  	 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated.
5451  	 * - While reconnecting the ctrlr, an I/O is submitted and queued.
5452  	 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec
5453  	 *   comes first. The queued I/O is failed.
5454  	 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately.
5455  	 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted.
5456  	 */
5457  
5458  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5459  	ut_init_trid(&path.trid);
5460  
5461  	set_thread(0);
5462  
5463  	ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false);
5464  	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
5465  
5466  	g_ut_attach_ctrlr_status = 0;
5467  	g_ut_attach_bdev_count = 1;
5468  
5469  	opts.ctrlr_loss_timeout_sec = 4;
5470  	opts.reconnect_delay_sec = 1;
5471  	opts.fast_io_fail_timeout_sec = 2;
5472  
5473  	rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE,
5474  			      attach_ctrlr_done, NULL, NULL, &opts, false);
5475  	CU_ASSERT(rc == 0);
5476  
5477  	spdk_delay_us(1000);
5478  	poll_threads();
5479  
5480  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5481  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5482  
5483  	nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid);
5484  	CU_ASSERT(nvme_ctrlr != NULL);
5485  
5486  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5487  	CU_ASSERT(bdev != NULL);
5488  
5489  	nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr);
5490  	CU_ASSERT(nvme_ns != NULL);
5491  
5492  	ch = spdk_get_io_channel(bdev);
5493  	SPDK_CU_ASSERT_FATAL(ch != NULL);
5494  
5495  	nbdev_ch = spdk_io_channel_get_ctx(ch);
5496  
5497  	io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
5498  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5499  
5500  	ctrlr_ch = io_path->qpair->ctrlr_ch;
5501  	SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
5502  	SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
5503  
5504  	bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
5505  	ut_bdev_io_set_buf(bdev_io);
5506  
5507  
5508  	/* Resetting a ctrlr should fail and a reconnect timer should be registered. */
5509  	ctrlr->fail_reset = true;
5510  	ctrlr->is_failed = true;
5511  
5512  	rc = bdev_nvme_reset(nvme_ctrlr);
5513  	CU_ASSERT(rc == 0);
5514  	CU_ASSERT(nvme_ctrlr->resetting == true);
5515  	CU_ASSERT(ctrlr->is_failed == true);
5516  
5517  	poll_threads();
5518  
5519  	CU_ASSERT(nvme_ctrlr->resetting == false);
5520  	CU_ASSERT(ctrlr->is_failed == false);
5521  	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5522  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5523  	CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
5524  	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5525  
5526  	/* I/O should be queued. */
5527  	bdev_io->internal.in_submit_request = true;
5528  
5529  	bdev_nvme_submit_request(ch, bdev_io);
5530  
5531  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5532  	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5533  
5534  	/* After a second, the I/O should be still queued and the ctrlr should be
5535  	 * still recovering.
5536  	 */
5537  	spdk_delay_us(SPDK_SEC_TO_USEC);
5538  	poll_threads();
5539  
5540  	CU_ASSERT(bdev_io->internal.in_submit_request == true);
5541  	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
5542  
5543  	CU_ASSERT(nvme_ctrlr->resetting == false);
5544  	CU_ASSERT(ctrlr->is_failed == false);
5545  	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5546  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5547  	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5548  	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
5549  
5550  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5551  
5552  	/* After two seconds, ctrlr_fail_timeout_sec should expire. */
5553  	spdk_delay_us(SPDK_SEC_TO_USEC);
5554  	poll_threads();
5555  
5556  	CU_ASSERT(nvme_ctrlr->resetting == false);
5557  	CU_ASSERT(ctrlr->is_failed == false);
5558  	CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
5559  	CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
5560  	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
5561  	CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);
5562  
5563  	/* Then within a second, pending I/O should be failed. */
5564  	spdk_delay_us(SPDK_SEC_TO_USEC);
5565  	poll_threads();
5566  
5567  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5568  	poll_threads();
5569  
5570  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5571  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5572  	CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
5573  
5574  	/* Another I/O submission should be failed immediately. */
5575  	bdev_io->internal.in_submit_request = true;
5576  
5577  	bdev_nvme_submit_request(ch, bdev_io);
5578  
5579  	CU_ASSERT(bdev_io->internal.in_submit_request == false);
5580  	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
5581  
5582  	/* After four seconds, path_loss_timeout_sec should expire and ctrlr should
5583  	 * be deleted.
5584  	 */
5585  	spdk_delay_us(SPDK_SEC_TO_USEC);
5586  	poll_threads();
5587  
5588  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5589  	poll_threads();
5590  
5591  	CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0"));
5592  	CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true);
5593  	CU_ASSERT(nvme_ctrlr->destruct == true);
5594  
5595  	spdk_put_io_channel(ch);
5596  
5597  	poll_threads();
5598  	spdk_delay_us(1000);
5599  	poll_threads();
5600  
5601  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5602  
5603  	free(bdev_io);
5604  }
5605  
5606  static void
5607  test_nvme_ns_cmp(void)
5608  {
5609  	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
5610  
5611  	nvme_ns1.id = 0;
5612  	nvme_ns2.id = UINT32_MAX;
5613  
5614  	CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0);
5615  	CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0);
5616  }
5617  
5618  static void
5619  test_ana_transition(void)
5620  {
5621  	struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, };
5622  	struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, };
5623  	struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, };
5624  	struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, };
5625  
5626  	/* case 1: ANA transition timedout is canceled. */
5627  	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5628  	nvme_ns.ana_transition_timedout = true;
5629  
5630  	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5631  
5632  	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5633  
5634  	CU_ASSERT(nvme_ns.ana_transition_timedout == false);
5635  	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5636  
5637  	/* case 2: ANATT timer is kept. */
5638  	nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5639  	nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout,
5640  			      &nvme_ns,
5641  			      ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5642  
5643  	desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5644  
5645  	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5646  
5647  	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5648  	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
5649  
5650  	/* case 3: ANATT timer is stopped. */
5651  	desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5652  
5653  	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5654  
5655  	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5656  	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
5657  
5658  	/* ANATT timer is started. */
5659  	desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE;
5660  
5661  	_nvme_ns_set_ana_state(&nvme_ns, &desc);
5662  
5663  	CU_ASSERT(nvme_ns.anatt_timer != NULL);
5664  	CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE);
5665  
5666  	/* ANATT timer is expired. */
5667  	spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC);
5668  
5669  	poll_threads();
5670  
5671  	CU_ASSERT(nvme_ns.anatt_timer == NULL);
5672  	CU_ASSERT(nvme_ns.ana_transition_timedout == true);
5673  }
5674  
5675  static void
5676  _set_preferred_path_cb(void *cb_arg, int rc)
5677  {
5678  	bool *done = cb_arg;
5679  
5680  	*done = true;
5681  }
5682  
5683  static void
5684  test_set_preferred_path(void)
5685  {
5686  	struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
5687  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
5688  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5689  	const int STRING_SIZE = 32;
5690  	const char *attached_names[STRING_SIZE];
5691  	struct nvme_bdev *bdev;
5692  	struct spdk_io_channel *ch;
5693  	struct nvme_bdev_channel *nbdev_ch;
5694  	struct nvme_io_path *io_path;
5695  	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
5696  	const struct spdk_nvme_ctrlr_data *cdata;
5697  	bool done;
5698  	int rc;
5699  
5700  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5701  	ut_init_trid(&path1.trid);
5702  	ut_init_trid2(&path2.trid);
5703  	ut_init_trid3(&path3.trid);
5704  	g_ut_attach_ctrlr_status = 0;
5705  	g_ut_attach_bdev_count = 1;
5706  
5707  	set_thread(0);
5708  
5709  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
5710  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
5711  
5712  	ctrlr1->ns[0].uuid = &uuid1;
5713  
5714  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
5715  			      attach_ctrlr_done, NULL, NULL, NULL, true);
5716  	CU_ASSERT(rc == 0);
5717  
5718  	spdk_delay_us(1000);
5719  	poll_threads();
5720  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5721  	poll_threads();
5722  
5723  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
5724  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
5725  
5726  	ctrlr2->ns[0].uuid = &uuid1;
5727  
5728  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
5729  			      attach_ctrlr_done, NULL, NULL, NULL, true);
5730  	CU_ASSERT(rc == 0);
5731  
5732  	spdk_delay_us(1000);
5733  	poll_threads();
5734  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5735  	poll_threads();
5736  
5737  	ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
5738  	SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
5739  
5740  	ctrlr3->ns[0].uuid = &uuid1;
5741  
5742  	rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE,
5743  			      attach_ctrlr_done, NULL, NULL, NULL, true);
5744  	CU_ASSERT(rc == 0);
5745  
5746  	spdk_delay_us(1000);
5747  	poll_threads();
5748  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5749  	poll_threads();
5750  
5751  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5752  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5753  
5754  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5755  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
5756  
5757  	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
5758  
5759  	ch = spdk_get_io_channel(bdev);
5760  	SPDK_CU_ASSERT_FATAL(ch != NULL);
5761  	nbdev_ch = spdk_io_channel_get_ctx(ch);
5762  
5763  	io_path = bdev_nvme_find_io_path(nbdev_ch);
5764  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5765  
5766  	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
5767  
5768  	/* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path()
5769  	 * should return io_path to ctrlr2.
5770  	 */
5771  
5772  	cdata = spdk_nvme_ctrlr_get_data(ctrlr2);
5773  	done = false;
5774  
5775  	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5776  
5777  	poll_threads();
5778  	CU_ASSERT(done == true);
5779  
5780  	io_path = bdev_nvme_find_io_path(nbdev_ch);
5781  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5782  
5783  	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
5784  
5785  	/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
5786  	 * aquired, find_io_path() should return io_path to ctrlr3.
5787  	 */
5788  
5789  	spdk_put_io_channel(ch);
5790  
5791  	poll_threads();
5792  
5793  	cdata = spdk_nvme_ctrlr_get_data(ctrlr3);
5794  	done = false;
5795  
5796  	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5797  
5798  	poll_threads();
5799  	CU_ASSERT(done == true);
5800  
5801  	ch = spdk_get_io_channel(bdev);
5802  	SPDK_CU_ASSERT_FATAL(ch != NULL);
5803  	nbdev_ch = spdk_io_channel_get_ctx(ch);
5804  
5805  	io_path = bdev_nvme_find_io_path(nbdev_ch);
5806  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5807  
5808  	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3);
5809  
5810  	spdk_put_io_channel(ch);
5811  
5812  	poll_threads();
5813  
5814  	rc = bdev_nvme_delete("nvme0", &g_any_path);
5815  	CU_ASSERT(rc == 0);
5816  
5817  	poll_threads();
5818  	spdk_delay_us(1000);
5819  	poll_threads();
5820  
5821  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
5822  }
5823  
5824  static void
5825  test_find_next_io_path(void)
5826  {
5827  	struct nvme_bdev_channel nbdev_ch = {
5828  		.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
5829  		.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
5830  	};
5831  	struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {};
5832  	struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {};
5833  	struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, };
5834  	struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, };
5835  	struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, };
5836  	struct nvme_ctrlr_channel ctrlr_ch1 = {};
5837  	struct nvme_ctrlr_channel ctrlr_ch2 = {};
5838  	struct nvme_ctrlr_channel ctrlr_ch3 = {};
5839  	struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, };
5840  	struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, };
5841  	struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, };
5842  	struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {};
5843  	struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
5844  	struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
5845  	struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, };
5846  
5847  	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
5848  	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
5849  	STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq);
5850  
5851  	/* nbdev_ch->current_io_path is filled always when bdev_nvme_find_next_io_path() is called. */
5852  
5853  	nbdev_ch.current_io_path = &io_path2;
5854  	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5855  	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5856  	nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5857  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5858  
5859  	nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5860  	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5861  	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5862  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5863  
5864  	nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5865  	nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
5866  	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5867  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1);
5868  
5869  	nbdev_ch.current_io_path = &io_path3;
5870  	nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
5871  	nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5872  	nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
5873  	CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
5874  }
5875  
5876  static void
5877  test_disable_auto_failback(void)
5878  {
5879  	struct nvme_path_id path1 = {}, path2 = {};
5880  	struct nvme_ctrlr_opts opts = {};
5881  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
5882  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
5883  	struct nvme_ctrlr *nvme_ctrlr1;
5884  	const int STRING_SIZE = 32;
5885  	const char *attached_names[STRING_SIZE];
5886  	struct nvme_bdev *bdev;
5887  	struct spdk_io_channel *ch;
5888  	struct nvme_bdev_channel *nbdev_ch;
5889  	struct nvme_io_path *io_path;
5890  	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
5891  	const struct spdk_nvme_ctrlr_data *cdata;
5892  	bool done;
5893  	int rc;
5894  
5895  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
5896  	ut_init_trid(&path1.trid);
5897  	ut_init_trid2(&path2.trid);
5898  	g_ut_attach_ctrlr_status = 0;
5899  	g_ut_attach_bdev_count = 1;
5900  
5901  	g_opts.disable_auto_failback = true;
5902  
5903  	opts.ctrlr_loss_timeout_sec = -1;
5904  	opts.reconnect_delay_sec = 1;
5905  
5906  	set_thread(0);
5907  
5908  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
5909  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
5910  
5911  	ctrlr1->ns[0].uuid = &uuid1;
5912  
5913  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
5914  			      attach_ctrlr_done, NULL, NULL, &opts, true);
5915  	CU_ASSERT(rc == 0);
5916  
5917  	spdk_delay_us(1000);
5918  	poll_threads();
5919  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5920  	poll_threads();
5921  
5922  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
5923  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
5924  
5925  	ctrlr2->ns[0].uuid = &uuid1;
5926  
5927  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
5928  			      attach_ctrlr_done, NULL, NULL, &opts, true);
5929  	CU_ASSERT(rc == 0);
5930  
5931  	spdk_delay_us(1000);
5932  	poll_threads();
5933  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5934  	poll_threads();
5935  
5936  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
5937  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
5938  
5939  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
5940  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
5941  
5942  	nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
5943  	SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
5944  
5945  	/* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */
5946  
5947  	ch = spdk_get_io_channel(bdev);
5948  	SPDK_CU_ASSERT_FATAL(ch != NULL);
5949  	nbdev_ch = spdk_io_channel_get_ctx(ch);
5950  
5951  	io_path = bdev_nvme_find_io_path(nbdev_ch);
5952  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5953  
5954  	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
5955  
5956  	/* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */
5957  	ctrlr1->fail_reset = true;
5958  	ctrlr1->is_failed = true;
5959  
5960  	bdev_nvme_reset(nvme_ctrlr1);
5961  
5962  	poll_threads();
5963  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5964  	poll_threads();
5965  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
5966  	poll_threads();
5967  
5968  	CU_ASSERT(ctrlr1->adminq.is_connected == false);
5969  
5970  	io_path = bdev_nvme_find_io_path(nbdev_ch);
5971  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5972  
5973  	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
5974  
5975  	/* After a second, ctrlr1 is recovered. However, automatic failback is disabled.
5976  	 * Hence, io_path to ctrlr2 should still be used.
5977  	 */
5978  	ctrlr1->fail_reset = false;
5979  
5980  	spdk_delay_us(SPDK_SEC_TO_USEC);
5981  	poll_threads();
5982  
5983  	CU_ASSERT(ctrlr1->adminq.is_connected == true);
5984  
5985  	io_path = bdev_nvme_find_io_path(nbdev_ch);
5986  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
5987  
5988  	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
5989  
5990  	/* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should
5991  	 * be used again.
5992  	 */
5993  
5994  	cdata = spdk_nvme_ctrlr_get_data(ctrlr1);
5995  	done = false;
5996  
5997  	bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done);
5998  
5999  	poll_threads();
6000  	CU_ASSERT(done == true);
6001  
6002  	io_path = bdev_nvme_find_io_path(nbdev_ch);
6003  	SPDK_CU_ASSERT_FATAL(io_path != NULL);
6004  
6005  	CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1);
6006  
6007  	spdk_put_io_channel(ch);
6008  
6009  	poll_threads();
6010  
6011  	rc = bdev_nvme_delete("nvme0", &g_any_path);
6012  	CU_ASSERT(rc == 0);
6013  
6014  	poll_threads();
6015  	spdk_delay_us(1000);
6016  	poll_threads();
6017  
6018  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6019  
6020  	g_opts.disable_auto_failback = false;
6021  }
6022  
6023  static void
6024  ut_set_multipath_policy_done(void *cb_arg, int rc)
6025  {
6026  	int *done = cb_arg;
6027  
6028  	SPDK_CU_ASSERT_FATAL(done != NULL);
6029  	*done = rc;
6030  }
6031  
6032  static void
6033  test_set_multipath_policy(void)
6034  {
6035  	struct nvme_path_id path1 = {}, path2 = {};
6036  	struct nvme_ctrlr_opts opts = {};
6037  	struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
6038  	struct nvme_bdev_ctrlr *nbdev_ctrlr;
6039  	const int STRING_SIZE = 32;
6040  	const char *attached_names[STRING_SIZE];
6041  	struct nvme_bdev *bdev;
6042  	struct spdk_io_channel *ch;
6043  	struct nvme_bdev_channel *nbdev_ch;
6044  	struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
6045  	int done;
6046  	int rc;
6047  
6048  	memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
6049  	ut_init_trid(&path1.trid);
6050  	ut_init_trid2(&path2.trid);
6051  	g_ut_attach_ctrlr_status = 0;
6052  	g_ut_attach_bdev_count = 1;
6053  
6054  	g_opts.disable_auto_failback = true;
6055  
6056  	opts.ctrlr_loss_timeout_sec = -1;
6057  	opts.reconnect_delay_sec = 1;
6058  
6059  	set_thread(0);
6060  
6061  	ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
6062  	SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
6063  
6064  	ctrlr1->ns[0].uuid = &uuid1;
6065  
6066  	rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE,
6067  			      attach_ctrlr_done, NULL, NULL, &opts, true);
6068  	CU_ASSERT(rc == 0);
6069  
6070  	spdk_delay_us(1000);
6071  	poll_threads();
6072  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6073  	poll_threads();
6074  
6075  	ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
6076  	SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
6077  
6078  	ctrlr2->ns[0].uuid = &uuid1;
6079  
6080  	rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE,
6081  			      attach_ctrlr_done, NULL, NULL, &opts, true);
6082  	CU_ASSERT(rc == 0);
6083  
6084  	spdk_delay_us(1000);
6085  	poll_threads();
6086  	spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
6087  	poll_threads();
6088  
6089  	nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
6090  	SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
6091  
6092  	bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
6093  	SPDK_CU_ASSERT_FATAL(bdev != NULL);
6094  
6095  	/* If multipath policy is updated before getting any I/O channel,
6096  	 * an new I/O channel should have the update.
6097  	 */
6098  	done = -1;
6099  	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
6100  				       ut_set_multipath_policy_done, &done);
6101  	poll_threads();
6102  	CU_ASSERT(done == 0);
6103  
6104  	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6105  
6106  	ch = spdk_get_io_channel(bdev);
6107  	SPDK_CU_ASSERT_FATAL(ch != NULL);
6108  	nbdev_ch = spdk_io_channel_get_ctx(ch);
6109  
6110  	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE);
6111  
6112  	/* If multipath policy is updated while a I/O channel is active,
6113  	 * the update should be applied to the I/O channel immediately.
6114  	 */
6115  	done = -1;
6116  	bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
6117  				       ut_set_multipath_policy_done, &done);
6118  	poll_threads();
6119  	CU_ASSERT(done == 0);
6120  
6121  	CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6122  	CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE);
6123  
6124  	spdk_put_io_channel(ch);
6125  
6126  	poll_threads();
6127  
6128  	rc = bdev_nvme_delete("nvme0", &g_any_path);
6129  	CU_ASSERT(rc == 0);
6130  
6131  	poll_threads();
6132  	spdk_delay_us(1000);
6133  	poll_threads();
6134  
6135  	CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
6136  }
6137  
6138  int
6139  main(int argc, const char **argv)
6140  {
6141  	CU_pSuite	suite = NULL;
6142  	unsigned int	num_failures;
6143  
6144  	CU_set_error_action(CUEA_ABORT);
6145  	CU_initialize_registry();
6146  
6147  	suite = CU_add_suite("nvme", NULL, NULL);
6148  
6149  	CU_ADD_TEST(suite, test_create_ctrlr);
6150  	CU_ADD_TEST(suite, test_reset_ctrlr);
6151  	CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr);
6152  	CU_ADD_TEST(suite, test_failover_ctrlr);
6153  	CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid);
6154  	CU_ADD_TEST(suite, test_pending_reset);
6155  	CU_ADD_TEST(suite, test_attach_ctrlr);
6156  	CU_ADD_TEST(suite, test_aer_cb);
6157  	CU_ADD_TEST(suite, test_submit_nvme_cmd);
6158  	CU_ADD_TEST(suite, test_add_remove_trid);
6159  	CU_ADD_TEST(suite, test_abort);
6160  	CU_ADD_TEST(suite, test_get_io_qpair);
6161  	CU_ADD_TEST(suite, test_bdev_unregister);
6162  	CU_ADD_TEST(suite, test_compare_ns);
6163  	CU_ADD_TEST(suite, test_init_ana_log_page);
6164  	CU_ADD_TEST(suite, test_get_memory_domains);
6165  	CU_ADD_TEST(suite, test_reconnect_qpair);
6166  	CU_ADD_TEST(suite, test_create_bdev_ctrlr);
6167  	CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
6168  	CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
6169  	CU_ADD_TEST(suite, test_admin_path);
6170  	CU_ADD_TEST(suite, test_reset_bdev_ctrlr);
6171  	CU_ADD_TEST(suite, test_find_io_path);
6172  	CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating);
6173  	CU_ADD_TEST(suite, test_retry_io_for_io_path_error);
6174  	CU_ADD_TEST(suite, test_retry_io_count);
6175  	CU_ADD_TEST(suite, test_concurrent_read_ana_log_page);
6176  	CU_ADD_TEST(suite, test_retry_io_for_ana_error);
6177  	CU_ADD_TEST(suite, test_check_io_error_resiliency_params);
6178  	CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting);
6179  	CU_ADD_TEST(suite, test_reconnect_ctrlr);
6180  	CU_ADD_TEST(suite, test_retry_failover_ctrlr);
6181  	CU_ADD_TEST(suite, test_fail_path);
6182  	CU_ADD_TEST(suite, test_nvme_ns_cmp);
6183  	CU_ADD_TEST(suite, test_ana_transition);
6184  	CU_ADD_TEST(suite, test_set_preferred_path);
6185  	CU_ADD_TEST(suite, test_find_next_io_path);
6186  	CU_ADD_TEST(suite, test_disable_auto_failback);
6187  	CU_ADD_TEST(suite, test_set_multipath_policy);
6188  
6189  	CU_basic_set_mode(CU_BRM_VERBOSE);
6190  
6191  	allocate_threads(3);
6192  	set_thread(0);
6193  	bdev_nvme_library_init();
6194  	init_accel();
6195  
6196  	CU_basic_run_tests();
6197  
6198  	set_thread(0);
6199  	bdev_nvme_library_fini();
6200  	fini_accel();
6201  	free_threads();
6202  
6203  	num_failures = CU_get_number_of_failures();
6204  	CU_cleanup_registry();
6205  
6206  	return num_failures;
6207  }
6208