xref: /spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c (revision 83ba9086796471697a4975a58f60e2392bccd08c)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "spdk/log.h"
12 
13 #include "common/lib/test_env.c"
14 
15 #include "nvme/nvme_ctrlr.c"
16 #include "nvme/nvme_quirks.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvme)
19 
20 pid_t g_spdk_nvme_pid;
21 
22 struct nvme_driver _g_nvme_driver = {
23 	.lock = PTHREAD_MUTEX_INITIALIZER,
24 };
25 
26 struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
27 
28 struct spdk_nvme_registers g_ut_nvme_regs = {};
29 typedef void (*set_reg_cb)(void);
30 set_reg_cb g_set_reg_cb;
31 
32 __thread int    nvme_thread_ioq_index = -1;
33 
34 uint32_t set_size = 1;
35 
36 int set_status_cpl = -1;
37 
38 #define UT_HOSTID "e53e9258-c93b-48b5-be1a-f025af6d232a"
39 
40 DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
41 	    (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
42 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
43 DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
44 DEFINE_STUB_V(nvme_ns_set_id_desc_list_data, (struct spdk_nvme_ns *ns));
45 DEFINE_STUB_V(nvme_ns_free_iocs_specific_data, (struct spdk_nvme_ns *ns));
46 DEFINE_STUB_V(nvme_qpair_abort_all_queued_reqs, (struct spdk_nvme_qpair *qpair));
47 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
48 		struct spdk_nvme_qpair *qpair), 0);
49 DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));
50 DEFINE_STUB(nvme_io_msg_process, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
51 DEFINE_STUB(nvme_transport_ctrlr_reserve_cmb, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
52 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_receive, int, (struct spdk_nvme_ctrlr *ctrlr,
53 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
54 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
55 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_send, int, (struct spdk_nvme_ctrlr *ctrlr,
56 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
57 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
58 DEFINE_STUB_V(nvme_qpair_abort_queued_reqs, (struct spdk_nvme_qpair *qpair));
59 DEFINE_STUB(spdk_nvme_qpair_authenticate, int, (struct spdk_nvme_qpair *qpair,
60 		spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
61 
62 int
63 nvme_get_default_hostnqn(char *buf, int len)
64 {
65 	const char *nqn = "nqn.2014-08.org.nvmexpress:uuid:" UT_HOSTID;
66 
67 	SPDK_CU_ASSERT_FATAL(len >= (int)strlen(nqn));
68 	memcpy(buf, nqn, strlen(nqn));
69 
70 	return 0;
71 }
72 
73 DEFINE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domains, int);
74 int
75 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
76 					struct spdk_memory_domain **domains, int array_size)
77 {
78 	HANDLE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domains);
79 
80 	return 0;
81 }
82 
83 DEFINE_RETURN_MOCK(nvme_transport_ctrlr_ready, int);
84 int
85 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
86 {
87 	HANDLE_RETURN_MOCK(nvme_transport_ctrlr_ready);
88 	return 0;
89 }
90 
91 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
92 		const struct spdk_nvme_ctrlr_opts *opts,
93 		void *devhandle)
94 {
95 	return NULL;
96 }
97 
98 int
99 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
100 {
101 	nvme_ctrlr_destruct_finish(ctrlr);
102 
103 	return 0;
104 }
105 
106 int
107 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
108 {
109 	return 0;
110 }
111 
112 int
113 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
114 {
115 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
116 	*(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
117 	if (g_set_reg_cb) {
118 		g_set_reg_cb();
119 	}
120 	return 0;
121 }
122 
123 int
124 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
125 {
126 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
127 	*(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
128 	if (g_set_reg_cb) {
129 		g_set_reg_cb();
130 	}
131 	return 0;
132 }
133 
134 int
135 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
136 {
137 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
138 	*value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
139 	return 0;
140 }
141 
142 int
143 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
144 {
145 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
146 	*value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
147 	return 0;
148 }
149 
150 int
151 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
152 				     uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg)
153 {
154 	struct spdk_nvme_cpl cpl = {};
155 
156 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
157 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
158 
159 	nvme_transport_ctrlr_set_reg_4(ctrlr, offset, value);
160 	cb_fn(cb_arg, value, &cpl);
161 	return 0;
162 }
163 
164 int
165 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
166 				     uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg)
167 {
168 	struct spdk_nvme_cpl cpl = {};
169 
170 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
171 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
172 
173 	nvme_transport_ctrlr_set_reg_8(ctrlr, offset, value);
174 	cb_fn(cb_arg, value, &cpl);
175 	return 0;
176 }
177 
178 int
179 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
180 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
181 {
182 	struct spdk_nvme_cpl cpl = {};
183 	uint32_t value;
184 
185 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
186 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
187 
188 	nvme_transport_ctrlr_get_reg_4(ctrlr, offset, &value);
189 	cb_fn(cb_arg, value, &cpl);
190 	return 0;
191 }
192 
193 int
194 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
195 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
196 {
197 	struct spdk_nvme_cpl cpl = {};
198 	uint64_t value;
199 
200 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
201 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
202 
203 	nvme_transport_ctrlr_get_reg_8(ctrlr, offset, &value);
204 	cb_fn(cb_arg, value, &cpl);
205 	return 0;
206 }
207 
208 uint32_t
209 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
210 {
211 	return UINT32_MAX;
212 }
213 
214 uint16_t
215 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
216 {
217 	return 1;
218 }
219 
220 void *
221 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
222 {
223 	return NULL;
224 }
225 
226 int
227 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
228 {
229 	return 0;
230 }
231 
232 int
233 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
234 {
235 	return 0;
236 }
237 
238 int
239 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
240 {
241 	return 0;
242 }
243 
244 void *
245 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
246 {
247 	return NULL;
248 }
249 
250 int
251 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
252 {
253 	return 0;
254 }
255 
256 struct spdk_nvme_qpair *
257 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
258 				     const struct spdk_nvme_io_qpair_opts *opts)
259 {
260 	struct spdk_nvme_qpair *qpair;
261 
262 	qpair = calloc(1, sizeof(*qpair));
263 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
264 
265 	qpair->ctrlr = ctrlr;
266 	qpair->id = qid;
267 	qpair->qprio = opts->qprio;
268 
269 	return qpair;
270 }
271 
272 void
273 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
274 {
275 	free(qpair);
276 }
277 
278 void
279 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
280 {
281 }
282 
283 int
284 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
285 {
286 	return 0;
287 }
288 
289 void
290 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
291 {
292 }
293 
294 void
295 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
296 {
297 }
298 
299 int
300 nvme_driver_init(void)
301 {
302 	return 0;
303 }
304 
305 int
306 nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
307 		struct spdk_nvme_ctrlr *ctrlr,
308 		enum spdk_nvme_qprio qprio,
309 		uint32_t num_requests, bool async)
310 {
311 	qpair->id = id;
312 	qpair->qprio = qprio;
313 	qpair->ctrlr = ctrlr;
314 	qpair->async = async;
315 
316 	return 0;
317 }
318 
319 static struct spdk_nvme_cpl fake_cpl = {};
320 static enum spdk_nvme_generic_command_status_code set_status_code = SPDK_NVME_SC_SUCCESS;
321 
322 static void
323 fake_cpl_sc(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
324 {
325 	fake_cpl.status.sc = set_status_code;
326 	cb_fn(cb_arg, &fake_cpl);
327 }
328 
329 static uint32_t g_ut_cdw11;
330 
331 int
332 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
333 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
334 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
335 {
336 	g_ut_cdw11 = cdw11;
337 	fake_cpl_sc(cb_fn, cb_arg);
338 	return 0;
339 }
340 
341 int
342 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
343 				uint32_t cdw11, void *payload, uint32_t payload_size,
344 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
345 {
346 	fake_cpl_sc(cb_fn, cb_arg);
347 	return 0;
348 }
349 
350 struct spdk_nvme_ana_page *g_ana_hdr;
351 struct spdk_nvme_ana_group_descriptor **g_ana_descs;
352 
353 int
354 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
355 				 uint32_t nsid, void *payload, uint32_t payload_size,
356 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
357 {
358 	if ((log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) && g_ana_hdr) {
359 		uint32_t i;
360 		uint8_t *ptr = payload;
361 
362 		memset(payload, 0, payload_size);
363 		memcpy(ptr, g_ana_hdr, sizeof(*g_ana_hdr));
364 		ptr += sizeof(*g_ana_hdr);
365 		for (i = 0; i < g_ana_hdr->num_ana_group_desc; ++i) {
366 			uint32_t desc_size = sizeof(**g_ana_descs) +
367 					     g_ana_descs[i]->num_of_nsid * sizeof(uint32_t);
368 			memcpy(ptr, g_ana_descs[i], desc_size);
369 			ptr += desc_size;
370 		}
371 	} else if (log_page == SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY) {
372 		struct spdk_nvme_intel_log_page_directory *log_page_directory = payload;
373 		log_page_directory->read_latency_log_len = true;
374 		log_page_directory->write_latency_log_len = true;
375 		log_page_directory->temperature_statistics_log_len = true;
376 		log_page_directory->smart_log_len = true;
377 		log_page_directory->marketing_description_log_len =  true;
378 	}
379 
380 	fake_cpl_sc(cb_fn, cb_arg);
381 	return 0;
382 }
383 
384 int
385 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
386 				     uint32_t nsid, void *payload, uint32_t payload_size,
387 				     uint64_t offset, uint32_t cdw10, uint32_t cdw11,
388 				     uint32_t cdw14, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
389 {
390 	fake_cpl_sc(cb_fn, cb_arg);
391 	return 0;
392 }
393 
394 int
395 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
396 {
397 	CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
398 	STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
399 
400 	/*
401 	 * For the purposes of this unit test, we don't need to bother emulating request submission.
402 	 */
403 
404 	return 0;
405 }
406 
407 static int32_t g_wait_for_completion_return_val;
408 
409 int32_t
410 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
411 {
412 	return g_wait_for_completion_return_val;
413 }
414 
415 void
416 nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
417 {
418 }
419 
420 
421 void
422 nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
423 {
424 	struct nvme_completion_poll_status	*status = arg;
425 	/* This should not happen it test env since this callback is always called
426 	 * before wait_for_completion_* while this field can only be set to true in
427 	 * wait_for_completion_* functions */
428 	CU_ASSERT(status->timed_out == false);
429 
430 	status->cpl = *cpl;
431 	status->done = true;
432 }
433 
434 static struct nvme_completion_poll_status *g_failed_status;
435 
436 int
437 nvme_wait_for_completion_robust_lock_timeout(
438 	struct spdk_nvme_qpair *qpair,
439 	struct nvme_completion_poll_status *status,
440 	pthread_mutex_t *robust_mutex,
441 	uint64_t timeout_in_usecs)
442 {
443 	if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
444 		g_failed_status = status;
445 		status->timed_out = true;
446 		return -1;
447 	}
448 
449 	status->done = true;
450 	if (set_status_cpl == 1) {
451 		status->cpl.status.sc = 1;
452 	}
453 	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
454 }
455 
456 int
457 nvme_wait_for_completion_robust_lock(
458 	struct spdk_nvme_qpair *qpair,
459 	struct nvme_completion_poll_status *status,
460 	pthread_mutex_t *robust_mutex)
461 {
462 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, robust_mutex, 0);
463 }
464 
465 int
466 nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
467 			 struct nvme_completion_poll_status *status)
468 {
469 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, 0);
470 }
471 
472 int
473 nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
474 				 struct nvme_completion_poll_status *status,
475 				 uint64_t timeout_in_usecs)
476 {
477 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, timeout_in_usecs);
478 }
479 
480 int
481 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
482 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
483 				      void *cb_arg)
484 {
485 	fake_cpl_sc(cb_fn, cb_arg);
486 	return 0;
487 }
488 
489 static uint32_t *g_active_ns_list = NULL;
490 static uint32_t g_active_ns_list_length = 0;
491 static struct spdk_nvme_ctrlr_data *g_cdata = NULL;
492 static bool g_fail_next_identify = false;
493 
494 int
495 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
496 			uint8_t csi, void *payload, size_t payload_size,
497 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
498 {
499 	if (g_fail_next_identify) {
500 		g_fail_next_identify = false;
501 		return 1;
502 	}
503 
504 	memset(payload, 0, payload_size);
505 	if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
506 		uint32_t count = 0;
507 		uint32_t i = 0;
508 		struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
509 
510 		if (g_active_ns_list == NULL) {
511 			for (i = 1; i <= ctrlr->cdata.nn; i++) {
512 				if (i <= nsid) {
513 					continue;
514 				}
515 
516 				ns_list->ns_list[count++] = i;
517 				if (count == SPDK_COUNTOF(ns_list->ns_list)) {
518 					break;
519 				}
520 			}
521 		} else {
522 			for (i = 0; i < g_active_ns_list_length; i++) {
523 				uint32_t cur_nsid = g_active_ns_list[i];
524 				if (cur_nsid <= nsid) {
525 					continue;
526 				}
527 
528 				ns_list->ns_list[count++] = cur_nsid;
529 				if (count == SPDK_COUNTOF(ns_list->ns_list)) {
530 					break;
531 				}
532 			}
533 		}
534 	} else if (cns == SPDK_NVME_IDENTIFY_CTRLR) {
535 		if (g_cdata) {
536 			memcpy(payload, g_cdata, sizeof(*g_cdata));
537 		}
538 	} else if (cns == SPDK_NVME_IDENTIFY_NS_IOCS) {
539 		return 0;
540 	}
541 
542 	fake_cpl_sc(cb_fn, cb_arg);
543 	return 0;
544 }
545 
546 int
547 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
548 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
549 {
550 	fake_cpl_sc(cb_fn, cb_arg);
551 	return 0;
552 }
553 
554 int
555 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
556 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
557 {
558 	CU_ASSERT(0);
559 	return -1;
560 }
561 
562 int
563 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
564 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
565 {
566 	return 0;
567 }
568 
569 int
570 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
571 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
572 {
573 	return 0;
574 }
575 
576 int
577 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
578 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
579 {
580 	fake_cpl_sc(cb_fn, cb_arg);
581 	return 0;
582 }
583 
584 int
585 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
586 			 void *cb_arg)
587 {
588 	return 0;
589 }
590 
591 int
592 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
593 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
594 {
595 	return 0;
596 }
597 
598 int
599 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
600 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
601 				   void *payload, uint32_t payload_size, uint32_t cdw12,
602 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
603 {
604 	return 0;
605 }
606 
607 int
608 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
609 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
610 				      void *payload, uint32_t payload_size, uint32_t cdw12,
611 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
612 {
613 	return 0;
614 }
615 
616 int
617 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
618 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
619 {
620 	CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
621 	if (fw_commit->fs == 0) {
622 		return -1;
623 	}
624 	set_status_cpl = 1;
625 	if (ctrlr->is_resetting == true) {
626 		set_status_cpl = 0;
627 	}
628 	return 0;
629 }
630 
631 int
632 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
633 				 uint32_t size, uint32_t offset, void *payload,
634 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
635 {
636 	if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
637 		return -1;
638 	}
639 	CU_ASSERT(offset == 0);
640 	return 0;
641 }
642 
643 bool
644 nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns)
645 {
646 	switch (ns->csi) {
647 	case SPDK_NVME_CSI_NVM:
648 		/*
649 		 * NVM Command Set Specific Identify Namespace data structure
650 		 * is currently all-zeroes, reserved for future use.
651 		 */
652 		return false;
653 	case SPDK_NVME_CSI_ZNS:
654 		return true;
655 	default:
656 		SPDK_WARNLOG("Unsupported CSI: %u for NSID: %u\n", ns->csi, ns->id);
657 		return false;
658 	}
659 }
660 
661 void
662 nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns)
663 {
664 	if (!ns->id) {
665 		return;
666 	}
667 
668 	if (ns->nsdata_zns) {
669 		spdk_free(ns->nsdata_zns);
670 		ns->nsdata_zns = NULL;
671 	}
672 }
673 
674 void
675 nvme_ns_free_nvm_specific_data(struct spdk_nvme_ns *ns)
676 {
677 	if (!ns->id) {
678 		return;
679 	}
680 
681 	if (ns->nsdata_nvm) {
682 		spdk_free(ns->nsdata_nvm);
683 		ns->nsdata_nvm = NULL;
684 	}
685 }
686 
687 void
688 nvme_ns_destruct(struct spdk_nvme_ns *ns)
689 {
690 }
691 
692 int
693 nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
694 		  struct spdk_nvme_ctrlr *ctrlr)
695 {
696 	return 0;
697 }
698 
699 void
700 spdk_pci_device_detach(struct spdk_pci_device *device)
701 {
702 }
703 
704 #define DECLARE_AND_CONSTRUCT_CTRLR()	\
705 	struct spdk_nvme_ctrlr	ctrlr = {};	\
706 	struct spdk_nvme_qpair	adminq = {};	\
707 	struct nvme_request	req;		\
708 						\
709 	STAILQ_INIT(&adminq.free_req);		\
710 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);	\
711 	ctrlr.adminq = &adminq;					\
712 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
713 
714 static void
715 test_nvme_ctrlr_init_en_1_rdy_0(void)
716 {
717 	DECLARE_AND_CONSTRUCT_CTRLR();
718 
719 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
720 
721 	/*
722 	 * Initial state: CC.EN = 1, CSTS.RDY = 0
723 	 */
724 	g_ut_nvme_regs.cc.bits.en = 1;
725 	g_ut_nvme_regs.csts.bits.rdy = 0;
726 
727 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
728 	ctrlr.cdata.nn = 1;
729 	ctrlr.page_size = 0x1000;
730 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
731 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
732 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
733 	}
734 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
735 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
736 
737 	/*
738 	 * Transition to CSTS.RDY = 1.
739 	 * init() should set CC.EN = 0.
740 	 */
741 	g_ut_nvme_regs.csts.bits.rdy = 1;
742 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
743 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_EN_0);
744 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
745 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
746 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
747 
748 	/*
749 	 * Transition to CSTS.RDY = 0.
750 	 */
751 	g_ut_nvme_regs.csts.bits.rdy = 0;
752 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
753 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
754 
755 	/*
756 	 * Start enabling the controller.
757 	 */
758 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
759 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
760 
761 	/*
762 	 * Transition to CC.EN = 1
763 	 */
764 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
765 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
766 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
767 
768 	/*
769 	 * Transition to CSTS.RDY = 1.
770 	 */
771 	g_ut_nvme_regs.csts.bits.rdy = 1;
772 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
773 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
774 
775 	/*
776 	 * Transition to READY.
777 	 */
778 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
779 		nvme_ctrlr_process_init(&ctrlr);
780 	}
781 
782 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
783 	nvme_ctrlr_destruct(&ctrlr);
784 }
785 
786 static void
787 test_nvme_ctrlr_init_en_1_rdy_1(void)
788 {
789 	DECLARE_AND_CONSTRUCT_CTRLR();
790 
791 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
792 
793 	/*
794 	 * Initial state: CC.EN = 1, CSTS.RDY = 1
795 	 * init() should set CC.EN = 0.
796 	 */
797 	g_ut_nvme_regs.cc.bits.en = 1;
798 	g_ut_nvme_regs.csts.bits.rdy = 1;
799 
800 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
801 	ctrlr.cdata.nn = 1;
802 	ctrlr.page_size = 0x1000;
803 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
804 	while (ctrlr.state != NVME_CTRLR_STATE_SET_EN_0) {
805 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
806 	}
807 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
808 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
809 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
810 
811 	/*
812 	 * Transition to CSTS.RDY = 0.
813 	 */
814 	g_ut_nvme_regs.csts.bits.rdy = 0;
815 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
816 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
817 
818 	/*
819 	 * Start enabling the controller.
820 	 */
821 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
822 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
823 
824 	/*
825 	 * Transition to CC.EN = 1
826 	 */
827 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
828 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
829 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
830 
831 	/*
832 	 * Transition to CSTS.RDY = 1.
833 	 */
834 	g_ut_nvme_regs.csts.bits.rdy = 1;
835 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
836 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
837 
838 	/*
839 	 * Transition to READY.
840 	 */
841 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
842 		nvme_ctrlr_process_init(&ctrlr);
843 	}
844 
845 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
846 	nvme_ctrlr_destruct(&ctrlr);
847 }
848 
849 static void
850 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
851 {
852 	DECLARE_AND_CONSTRUCT_CTRLR();
853 
854 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
855 
856 	/*
857 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
858 	 * init() should set CC.EN = 1.
859 	 */
860 	g_ut_nvme_regs.cc.bits.en = 0;
861 	g_ut_nvme_regs.csts.bits.rdy = 0;
862 
863 	/*
864 	 * Default round robin enabled
865 	 */
866 	g_ut_nvme_regs.cap.bits.ams = 0x0;
867 	ctrlr.cap = g_ut_nvme_regs.cap;
868 
869 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
870 	ctrlr.cdata.nn = 1;
871 	ctrlr.page_size = 0x1000;
872 	/*
873 	 * Case 1: default round robin arbitration mechanism selected
874 	 */
875 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
876 
877 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
878 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
879 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
880 	}
881 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
882 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
883 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
884 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
885 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
886 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
887 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
888 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
889 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
890 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
891 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
892 
893 	/*
894 	 * Complete and destroy the controller
895 	 */
896 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
897 	nvme_ctrlr_destruct(&ctrlr);
898 
899 	/*
900 	 * Reset to initial state
901 	 */
902 	g_ut_nvme_regs.cc.bits.en = 0;
903 	g_ut_nvme_regs.csts.bits.rdy = 0;
904 
905 	/*
906 	 * Case 2: weighted round robin arbitration mechanism selected
907 	 */
908 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
909 	ctrlr.cdata.nn = 1;
910 	ctrlr.page_size = 0x1000;
911 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
912 
913 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
914 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
915 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
916 	}
917 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
918 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
919 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
920 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
921 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
922 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
923 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
924 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
925 
926 	/*
927 	 * Complete and destroy the controller
928 	 */
929 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
930 	nvme_ctrlr_destruct(&ctrlr);
931 
932 	/*
933 	 * Reset to initial state
934 	 */
935 	g_ut_nvme_regs.cc.bits.en = 0;
936 	g_ut_nvme_regs.csts.bits.rdy = 0;
937 
938 	/*
939 	 * Case 3: vendor specific arbitration mechanism selected
940 	 */
941 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
942 	ctrlr.cdata.nn = 1;
943 	ctrlr.page_size = 0x1000;
944 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
945 
946 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
947 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
948 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
949 	}
950 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
951 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
952 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
953 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
954 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
955 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
956 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
957 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
958 
959 	/*
960 	 * Complete and destroy the controller
961 	 */
962 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
963 	nvme_ctrlr_destruct(&ctrlr);
964 
965 	/*
966 	 * Reset to initial state
967 	 */
968 	g_ut_nvme_regs.cc.bits.en = 0;
969 	g_ut_nvme_regs.csts.bits.rdy = 0;
970 
971 	/*
972 	 * Case 4: invalid arbitration mechanism selected
973 	 */
974 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
975 	ctrlr.cdata.nn = 1;
976 	ctrlr.page_size = 0x1000;
977 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
978 
979 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
980 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
981 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
982 	}
983 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
984 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
985 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
986 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
987 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
988 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
989 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
990 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
991 
992 	/*
993 	 * Complete and destroy the controller
994 	 */
995 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
996 	nvme_ctrlr_destruct(&ctrlr);
997 
998 	/*
999 	 * Reset to initial state
1000 	 */
1001 	g_ut_nvme_regs.cc.bits.en = 0;
1002 	g_ut_nvme_regs.csts.bits.rdy = 0;
1003 
1004 	/*
1005 	 * Case 5: reset to default round robin arbitration mechanism
1006 	 */
1007 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1008 	ctrlr.cdata.nn = 1;
1009 	ctrlr.page_size = 0x1000;
1010 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1011 
1012 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1013 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1014 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1015 	}
1016 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1017 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1018 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1019 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1020 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1021 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1022 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1023 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1024 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1025 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1026 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1027 
1028 	/*
1029 	 * Transition to CSTS.RDY = 1.
1030 	 */
1031 	g_ut_nvme_regs.csts.bits.rdy = 1;
1032 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1033 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1034 
1035 	/*
1036 	 * Transition to READY.
1037 	 */
1038 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1039 		nvme_ctrlr_process_init(&ctrlr);
1040 	}
1041 
1042 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1043 	nvme_ctrlr_destruct(&ctrlr);
1044 }
1045 
1046 static void
1047 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
1048 {
1049 	DECLARE_AND_CONSTRUCT_CTRLR();
1050 
1051 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1052 
1053 	/*
1054 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1055 	 * init() should set CC.EN = 1.
1056 	 */
1057 	g_ut_nvme_regs.cc.bits.en = 0;
1058 	g_ut_nvme_regs.csts.bits.rdy = 0;
1059 
1060 	/*
1061 	 * Weighted round robin enabled
1062 	 */
1063 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
1064 	ctrlr.cap = g_ut_nvme_regs.cap;
1065 
1066 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1067 	ctrlr.cdata.nn = 1;
1068 	ctrlr.page_size = 0x1000;
1069 	/*
1070 	 * Case 1: default round robin arbitration mechanism selected
1071 	 */
1072 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1073 
1074 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1075 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1076 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1077 	}
1078 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1079 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1080 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1081 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1082 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1083 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1084 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1085 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1086 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1087 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1088 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1089 
1090 	/*
1091 	 * Complete and destroy the controller
1092 	 */
1093 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1094 	nvme_ctrlr_destruct(&ctrlr);
1095 
1096 	/*
1097 	 * Reset to initial state
1098 	 */
1099 	g_ut_nvme_regs.cc.bits.en = 0;
1100 	g_ut_nvme_regs.csts.bits.rdy = 0;
1101 
1102 	/*
1103 	 * Case 2: weighted round robin arbitration mechanism selected
1104 	 */
1105 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1106 	ctrlr.cdata.nn = 1;
1107 	ctrlr.page_size = 0x1000;
1108 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1109 
1110 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1111 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1112 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1113 	}
1114 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1115 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1116 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1117 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1118 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1119 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1120 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1121 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1122 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1123 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
1124 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
1125 
1126 	/*
1127 	 * Complete and destroy the controller
1128 	 */
1129 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1130 	nvme_ctrlr_destruct(&ctrlr);
1131 
1132 	/*
1133 	 * Reset to initial state
1134 	 */
1135 	g_ut_nvme_regs.cc.bits.en = 0;
1136 	g_ut_nvme_regs.csts.bits.rdy = 0;
1137 
1138 	/*
1139 	 * Case 3: vendor specific arbitration mechanism selected
1140 	 */
1141 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1142 	ctrlr.cdata.nn = 1;
1143 	ctrlr.page_size = 0x1000;
1144 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1145 
1146 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1147 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1148 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1149 	}
1150 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1151 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1152 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1153 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1154 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1155 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1156 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1157 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1158 
1159 	/*
1160 	 * Complete and destroy the controller
1161 	 */
1162 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1163 	nvme_ctrlr_destruct(&ctrlr);
1164 
1165 	/*
1166 	 * Reset to initial state
1167 	 */
1168 	g_ut_nvme_regs.cc.bits.en = 0;
1169 	g_ut_nvme_regs.csts.bits.rdy = 0;
1170 
1171 	/*
1172 	 * Case 4: invalid arbitration mechanism selected
1173 	 */
1174 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1175 	ctrlr.cdata.nn = 1;
1176 	ctrlr.page_size = 0x1000;
1177 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1178 
1179 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1180 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1181 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1182 	}
1183 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1184 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1185 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1186 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1187 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1188 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1189 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1190 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1191 
1192 	/*
1193 	 * Complete and destroy the controller
1194 	 */
1195 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1196 	nvme_ctrlr_destruct(&ctrlr);
1197 
1198 	/*
1199 	 * Reset to initial state
1200 	 */
1201 	g_ut_nvme_regs.cc.bits.en = 0;
1202 	g_ut_nvme_regs.csts.bits.rdy = 0;
1203 
1204 	/*
1205 	 * Case 5: reset to weighted round robin arbitration mechanism
1206 	 */
1207 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1208 	ctrlr.cdata.nn = 1;
1209 	ctrlr.page_size = 0x1000;
1210 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1211 
1212 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1213 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1214 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1215 	}
1216 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1217 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1218 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1219 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1220 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1221 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1222 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1223 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1224 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1225 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
1226 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
1227 
1228 	/*
1229 	 * Transition to CSTS.RDY = 1.
1230 	 */
1231 	g_ut_nvme_regs.csts.bits.rdy = 1;
1232 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1233 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1234 
1235 	/*
1236 	 * Transition to READY.
1237 	 */
1238 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1239 		nvme_ctrlr_process_init(&ctrlr);
1240 	}
1241 
1242 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1243 	nvme_ctrlr_destruct(&ctrlr);
1244 }
1245 static void
1246 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
1247 {
1248 	DECLARE_AND_CONSTRUCT_CTRLR();
1249 
1250 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1251 
1252 	/*
1253 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1254 	 * init() should set CC.EN = 1.
1255 	 */
1256 	g_ut_nvme_regs.cc.bits.en = 0;
1257 	g_ut_nvme_regs.csts.bits.rdy = 0;
1258 
1259 	/*
1260 	 * Default round robin enabled
1261 	 */
1262 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
1263 	ctrlr.cap = g_ut_nvme_regs.cap;
1264 
1265 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1266 	ctrlr.cdata.nn = 1;
1267 	ctrlr.page_size = 0x1000;
1268 	/*
1269 	 * Case 1: default round robin arbitration mechanism selected
1270 	 */
1271 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1272 
1273 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1274 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1275 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1276 	}
1277 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1278 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1279 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1280 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1281 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1282 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1283 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1284 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1285 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1286 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1287 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1288 
1289 	/*
1290 	 * Complete and destroy the controller
1291 	 */
1292 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1293 	nvme_ctrlr_destruct(&ctrlr);
1294 
1295 	/*
1296 	 * Reset to initial state
1297 	 */
1298 	g_ut_nvme_regs.cc.bits.en = 0;
1299 	g_ut_nvme_regs.csts.bits.rdy = 0;
1300 
1301 	/*
1302 	 * Case 2: weighted round robin arbitration mechanism selected
1303 	 */
1304 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1305 	ctrlr.cdata.nn = 1;
1306 	ctrlr.page_size = 0x1000;
1307 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1308 
1309 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1310 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1311 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1312 	}
1313 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1314 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1315 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1316 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1317 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1318 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1319 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1320 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1321 
1322 	/*
1323 	 * Complete and destroy the controller
1324 	 */
1325 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1326 	nvme_ctrlr_destruct(&ctrlr);
1327 
1328 	/*
1329 	 * Reset to initial state
1330 	 */
1331 	g_ut_nvme_regs.cc.bits.en = 0;
1332 	g_ut_nvme_regs.csts.bits.rdy = 0;
1333 
1334 	/*
1335 	 * Case 3: vendor specific arbitration mechanism selected
1336 	 */
1337 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1338 	ctrlr.cdata.nn = 1;
1339 	ctrlr.page_size = 0x1000;
1340 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1341 
1342 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1343 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1344 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1345 	}
1346 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1347 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1348 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1349 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1350 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1351 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1352 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1353 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1354 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1355 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1356 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1357 
1358 	/*
1359 	 * Complete and destroy the controller
1360 	 */
1361 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1362 	nvme_ctrlr_destruct(&ctrlr);
1363 
1364 	/*
1365 	 * Reset to initial state
1366 	 */
1367 	g_ut_nvme_regs.cc.bits.en = 0;
1368 	g_ut_nvme_regs.csts.bits.rdy = 0;
1369 
1370 	/*
1371 	 * Case 4: invalid arbitration mechanism selected
1372 	 */
1373 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1374 	ctrlr.cdata.nn = 1;
1375 	ctrlr.page_size = 0x1000;
1376 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1377 
1378 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1379 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1380 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1381 	}
1382 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1383 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1384 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1385 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1386 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1387 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1388 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1389 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1390 
1391 	/*
1392 	 * Complete and destroy the controller
1393 	 */
1394 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1395 	nvme_ctrlr_destruct(&ctrlr);
1396 
1397 	/*
1398 	 * Reset to initial state
1399 	 */
1400 	g_ut_nvme_regs.cc.bits.en = 0;
1401 	g_ut_nvme_regs.csts.bits.rdy = 0;
1402 
1403 	/*
1404 	 * Case 5: reset to vendor specific arbitration mechanism
1405 	 */
1406 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1407 	ctrlr.cdata.nn = 1;
1408 	ctrlr.page_size = 0x1000;
1409 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1410 
1411 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1412 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1413 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1414 	}
1415 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1416 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1417 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1418 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1419 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1420 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1421 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1422 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1423 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1424 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1425 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1426 
1427 	/*
1428 	 * Transition to CSTS.RDY = 1.
1429 	 */
1430 	g_ut_nvme_regs.csts.bits.rdy = 1;
1431 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1432 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1433 
1434 	/*
1435 	 * Transition to READY.
1436 	 */
1437 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1438 		nvme_ctrlr_process_init(&ctrlr);
1439 	}
1440 
1441 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1442 	nvme_ctrlr_destruct(&ctrlr);
1443 }
1444 
1445 static void
1446 test_nvme_ctrlr_init_en_0_rdy_0(void)
1447 {
1448 	DECLARE_AND_CONSTRUCT_CTRLR();
1449 
1450 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1451 
1452 	/*
1453 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1454 	 * init() should set CC.EN = 1.
1455 	 */
1456 	g_ut_nvme_regs.cc.bits.en = 0;
1457 	g_ut_nvme_regs.csts.bits.rdy = 0;
1458 
1459 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1460 	ctrlr.cdata.nn = 1;
1461 	ctrlr.page_size = 0x1000;
1462 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1463 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1464 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1465 	}
1466 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1467 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1468 
1469 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1470 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1471 
1472 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1473 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1474 
1475 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1476 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1477 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1478 
1479 	/*
1480 	 * Transition to CSTS.RDY = 1.
1481 	 */
1482 	g_ut_nvme_regs.csts.bits.rdy = 1;
1483 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1484 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1485 
1486 	/*
1487 	 * Transition to READY.
1488 	 */
1489 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1490 		nvme_ctrlr_process_init(&ctrlr);
1491 	}
1492 
1493 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1494 	nvme_ctrlr_destruct(&ctrlr);
1495 }
1496 
1497 static void
1498 test_nvme_ctrlr_init_en_0_rdy_1(void)
1499 {
1500 	DECLARE_AND_CONSTRUCT_CTRLR();
1501 
1502 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1503 
1504 	/*
1505 	 * Initial state: CC.EN = 0, CSTS.RDY = 1
1506 	 */
1507 	g_ut_nvme_regs.cc.bits.en = 0;
1508 	g_ut_nvme_regs.csts.bits.rdy = 1;
1509 
1510 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1511 	ctrlr.cdata.nn = 1;
1512 	ctrlr.page_size = 0x1000;
1513 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1514 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1515 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1516 	}
1517 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1518 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1519 
1520 	/*
1521 	 * Transition to CSTS.RDY = 0.
1522 	 */
1523 	g_ut_nvme_regs.csts.bits.rdy = 0;
1524 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1525 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1526 
1527 	/*
1528 	 * Start enabling the controller.
1529 	 */
1530 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1531 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1532 
1533 	/*
1534 	 * Transition to CC.EN = 1
1535 	 */
1536 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1537 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1538 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1539 
1540 	/*
1541 	 * Transition to CSTS.RDY = 1.
1542 	 */
1543 	g_ut_nvme_regs.csts.bits.rdy = 1;
1544 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1545 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1546 
1547 	/*
1548 	 * Transition to READY.
1549 	 */
1550 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1551 		nvme_ctrlr_process_init(&ctrlr);
1552 	}
1553 
1554 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1555 	nvme_ctrlr_destruct(&ctrlr);
1556 }
1557 
1558 static void
1559 setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
1560 {
1561 	uint32_t i;
1562 
1563 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
1564 
1565 	ctrlr->page_size = 0x1000;
1566 	ctrlr->opts.num_io_queues = num_io_queues;
1567 	ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
1568 	ctrlr->state = NVME_CTRLR_STATE_READY;
1569 	SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
1570 
1571 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1572 	for (i = 1; i <= num_io_queues; i++) {
1573 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1574 	}
1575 }
1576 
1577 static void
1578 cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
1579 {
1580 	nvme_ctrlr_destruct(ctrlr);
1581 }
1582 
1583 static void
1584 test_alloc_io_qpair_rr_1(void)
1585 {
1586 	struct spdk_nvme_io_qpair_opts opts;
1587 	struct spdk_nvme_ctrlr ctrlr = {};
1588 	struct spdk_nvme_qpair *q0;
1589 
1590 	setup_qpairs(&ctrlr, 1);
1591 
1592 	/*
1593 	 * Fake to simulate the controller with default round robin
1594 	 * arbitration mechanism.
1595 	 */
1596 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
1597 
1598 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1599 
1600 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1601 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1602 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1603 	/* Only 1 I/O qpair was allocated, so this should fail */
1604 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
1605 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1606 
1607 	/*
1608 	 * Now that the qpair has been returned to the free list,
1609 	 * we should be able to allocate it again.
1610 	 */
1611 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1612 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1613 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1614 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1615 
1616 	/* Only 0 qprio is acceptable for default round robin arbitration mechanism */
1617 	opts.qprio = 1;
1618 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1619 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1620 
1621 	opts.qprio = 2;
1622 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1623 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1624 
1625 	opts.qprio = 3;
1626 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1627 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1628 
1629 	/* Only 0 ~ 3 qprio is acceptable */
1630 	opts.qprio = 4;
1631 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1632 	opts.qprio = 0;
1633 
1634 	/* IO qpair can only be created when ctrlr is in READY state */
1635 	ctrlr.state = NVME_CTRLR_STATE_ENABLE;
1636 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1637 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1638 	ctrlr.state = NVME_CTRLR_STATE_READY;
1639 
1640 	cleanup_qpairs(&ctrlr);
1641 }
1642 
1643 static void
1644 test_alloc_io_qpair_wrr_1(void)
1645 {
1646 	struct spdk_nvme_io_qpair_opts opts;
1647 	struct spdk_nvme_ctrlr ctrlr = {};
1648 	struct spdk_nvme_qpair *q0, *q1;
1649 
1650 	setup_qpairs(&ctrlr, 2);
1651 
1652 	/*
1653 	 * Fake to simulate the controller with weighted round robin
1654 	 * arbitration mechanism.
1655 	 */
1656 	ctrlr.process_init_cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1657 
1658 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1659 
1660 	/*
1661 	 * Allocate 2 qpairs and free them
1662 	 */
1663 	opts.qprio = 0;
1664 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1665 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1666 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1667 
1668 	opts.qprio = 1;
1669 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1670 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1671 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1672 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1673 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1674 
1675 	/*
1676 	 * Allocate 2 qpairs and free them in the reverse order
1677 	 */
1678 	opts.qprio = 2;
1679 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1680 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1681 	SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
1682 
1683 	opts.qprio = 3;
1684 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1685 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1686 	SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
1687 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1688 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1689 
1690 	/* Only 0 ~ 3 qprio is acceptable */
1691 	opts.qprio = 4;
1692 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1693 
1694 	cleanup_qpairs(&ctrlr);
1695 }
1696 
1697 static void
1698 test_alloc_io_qpair_wrr_2(void)
1699 {
1700 	struct spdk_nvme_io_qpair_opts opts;
1701 	struct spdk_nvme_ctrlr ctrlr = {};
1702 	struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
1703 
1704 	setup_qpairs(&ctrlr, 4);
1705 
1706 	/*
1707 	 * Fake to simulate the controller with weighted round robin
1708 	 * arbitration mechanism.
1709 	 */
1710 	ctrlr.process_init_cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1711 
1712 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1713 
1714 	opts.qprio = 0;
1715 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1716 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1717 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1718 
1719 	opts.qprio = 1;
1720 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1721 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1722 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1723 
1724 	opts.qprio = 2;
1725 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1726 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1727 	SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
1728 
1729 	opts.qprio = 3;
1730 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1731 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1732 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1733 
1734 	/* Only 4 I/O qpairs was allocated, so this should fail */
1735 	opts.qprio = 0;
1736 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1737 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1738 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1739 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1740 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1741 
1742 	/*
1743 	 * Now that the qpair has been returned to the free list,
1744 	 * we should be able to allocate it again.
1745 	 *
1746 	 * Allocate 4 I/O qpairs and half of them with same qprio.
1747 	 */
1748 	opts.qprio = 1;
1749 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1750 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1751 	SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
1752 
1753 	opts.qprio = 1;
1754 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1755 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1756 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1757 
1758 	opts.qprio = 3;
1759 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1760 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1761 	SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
1762 
1763 	opts.qprio = 3;
1764 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1765 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1766 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1767 
1768 	/*
1769 	 * Free all I/O qpairs in reverse order
1770 	 */
1771 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1772 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1773 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1774 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1775 
1776 	cleanup_qpairs(&ctrlr);
1777 }
1778 
1779 bool g_connect_qpair_called = false;
1780 int g_connect_qpair_return_code = 0;
1781 int
1782 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1783 {
1784 	g_connect_qpair_called = true;
1785 	qpair->state = NVME_QPAIR_CONNECTED;
1786 	return g_connect_qpair_return_code;
1787 }
1788 
1789 static void
1790 test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
1791 {
1792 	struct spdk_nvme_ctrlr	ctrlr = {};
1793 	struct spdk_nvme_qpair	qpair = {};
1794 	int rc;
1795 
1796 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
1797 
1798 	/* Various states of controller disconnect. */
1799 	qpair.id = 1;
1800 	qpair.ctrlr = &ctrlr;
1801 	ctrlr.is_removed = 1;
1802 	ctrlr.is_failed = 0;
1803 	ctrlr.is_resetting = 0;
1804 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1805 	CU_ASSERT(rc == -ENODEV)
1806 
1807 	ctrlr.is_removed = 0;
1808 	ctrlr.is_failed = 1;
1809 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1810 	CU_ASSERT(rc == -ENXIO)
1811 
1812 	ctrlr.is_failed = 0;
1813 	ctrlr.is_resetting = 1;
1814 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1815 	CU_ASSERT(rc == -EAGAIN)
1816 
1817 	/* Confirm precedence for controller states: removed > resetting > failed */
1818 	ctrlr.is_removed = 1;
1819 	ctrlr.is_failed = 1;
1820 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1821 	CU_ASSERT(rc == -ENODEV)
1822 
1823 	ctrlr.is_removed = 0;
1824 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1825 	CU_ASSERT(rc == -EAGAIN)
1826 
1827 	ctrlr.is_resetting = 0;
1828 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1829 	CU_ASSERT(rc == -ENXIO)
1830 
1831 	/* qpair not failed. Make sure we don't call down to the transport */
1832 	ctrlr.is_failed = 0;
1833 	qpair.state = NVME_QPAIR_CONNECTED;
1834 	g_connect_qpair_called = false;
1835 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1836 	CU_ASSERT(g_connect_qpair_called == false);
1837 	CU_ASSERT(rc == 0)
1838 
1839 	/* transport qpair is failed. make sure we call down to the transport */
1840 	qpair.state = NVME_QPAIR_DISCONNECTED;
1841 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1842 	CU_ASSERT(g_connect_qpair_called == true);
1843 	CU_ASSERT(rc == 0)
1844 
1845 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
1846 }
1847 
1848 static void
1849 test_nvme_ctrlr_fail(void)
1850 {
1851 	struct spdk_nvme_ctrlr	ctrlr = {};
1852 
1853 	ctrlr.opts.num_io_queues = 0;
1854 	nvme_ctrlr_fail(&ctrlr, false);
1855 
1856 	CU_ASSERT(ctrlr.is_failed == true);
1857 }
1858 
1859 static void
1860 test_nvme_ctrlr_construct_intel_support_log_page_list(void)
1861 {
1862 	bool	res;
1863 	struct spdk_nvme_ctrlr				ctrlr = {};
1864 	struct spdk_nvme_intel_log_page_directory	payload = {};
1865 	struct spdk_pci_id				pci_id = {};
1866 
1867 	/* Get quirks for a device with all 0 vendor/device id */
1868 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1869 	CU_ASSERT(ctrlr.quirks == 0);
1870 
1871 	/* Set the vendor to Intel, but provide no device id */
1872 	pci_id.class_id = SPDK_PCI_CLASS_NVME;
1873 	ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1874 	payload.temperature_statistics_log_len = 1;
1875 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1876 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1877 
1878 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1879 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1880 	CU_ASSERT(res == true);
1881 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1882 	CU_ASSERT(res == true);
1883 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1884 	CU_ASSERT(res == false);
1885 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1886 	CU_ASSERT(res == false);
1887 
1888 	/* set valid vendor id, device id and sub device id */
1889 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1890 	payload.temperature_statistics_log_len = 0;
1891 	pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1892 	pci_id.device_id = 0x0953;
1893 	pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
1894 	pci_id.subdevice_id = 0x3702;
1895 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1896 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1897 
1898 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1899 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1900 	CU_ASSERT(res == true);
1901 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1902 	CU_ASSERT(res == false);
1903 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1904 	CU_ASSERT(res == true);
1905 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1906 	CU_ASSERT(res == false);
1907 }
1908 
1909 static void
1910 test_nvme_ctrlr_set_supported_features(void)
1911 {
1912 	bool	res;
1913 	struct spdk_nvme_ctrlr			ctrlr = {};
1914 
1915 	/* set a invalid vendor id */
1916 	ctrlr.cdata.vid = 0xFFFF;
1917 	nvme_ctrlr_set_supported_features(&ctrlr);
1918 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1919 	CU_ASSERT(res == true);
1920 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1921 	CU_ASSERT(res == false);
1922 
1923 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1924 	nvme_ctrlr_set_supported_features(&ctrlr);
1925 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1926 	CU_ASSERT(res == true);
1927 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1928 	CU_ASSERT(res == true);
1929 }
1930 
1931 static void
1932 test_nvme_ctrlr_set_host_feature(void)
1933 {
1934 	DECLARE_AND_CONSTRUCT_CTRLR();
1935 
1936 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1937 
1938 	ctrlr.cdata.ctratt.bits.elbas = 0;
1939 	ctrlr.state = NVME_CTRLR_STATE_SET_HOST_FEATURE;
1940 
1941 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1942 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_DB_BUF_CFG);
1943 
1944 	ctrlr.cdata.ctratt.bits.elbas = 1;
1945 	ctrlr.state = NVME_CTRLR_STATE_SET_HOST_FEATURE;
1946 
1947 	while (ctrlr.state != NVME_CTRLR_STATE_SET_DB_BUF_CFG) {
1948 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1949 	}
1950 
1951 	CU_ASSERT(ctrlr.tmp_ptr == NULL);
1952 	CU_ASSERT(ctrlr.feature_supported[SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT] == true);
1953 
1954 	nvme_ctrlr_destruct(&ctrlr);
1955 }
1956 
1957 static void
1958 test_ctrlr_get_default_ctrlr_opts(void)
1959 {
1960 	struct spdk_nvme_ctrlr_opts opts = {};
1961 
1962 	CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id, UT_HOSTID) == 0);
1963 
1964 	memset(&opts, 0, sizeof(opts));
1965 
1966 	/* set a smaller opts_size */
1967 	CU_ASSERT(sizeof(opts) > 8);
1968 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
1969 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1970 	CU_ASSERT_FALSE(opts.use_cmb_sqs);
1971 	/* check below fields are not initialized by default value */
1972 	CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
1973 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
1974 	CU_ASSERT_EQUAL(opts.io_queue_size, 0);
1975 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1976 	for (int i = 0; i < 8; i++) {
1977 		CU_ASSERT(opts.host_id[i] == 0);
1978 	}
1979 	for (int i = 0; i < 16; i++) {
1980 		CU_ASSERT(opts.extended_host_id[i] == 0);
1981 	}
1982 	CU_ASSERT(strlen(opts.hostnqn) == 0);
1983 	CU_ASSERT(strlen(opts.src_addr) == 0);
1984 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1985 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
1986 
1987 	/* set a consistent opts_size */
1988 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
1989 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1990 	CU_ASSERT_FALSE(opts.use_cmb_sqs);
1991 	CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
1992 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
1993 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1994 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1995 	for (int i = 0; i < 8; i++) {
1996 		CU_ASSERT(opts.host_id[i] == 0);
1997 	}
1998 	CU_ASSERT_STRING_EQUAL(opts.hostnqn,
1999 			       "nqn.2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
2000 	CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
2001 			 sizeof(opts.extended_host_id)) == 0);
2002 	CU_ASSERT(strlen(opts.src_addr) == 0);
2003 	CU_ASSERT(strlen(opts.src_svcid) == 0);
2004 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
2005 }
2006 
2007 static void
2008 test_ctrlr_get_default_io_qpair_opts(void)
2009 {
2010 	struct spdk_nvme_ctrlr ctrlr = {};
2011 	struct spdk_nvme_io_qpair_opts opts = {};
2012 
2013 	memset(&opts, 0, sizeof(opts));
2014 
2015 	/* set a smaller opts_size */
2016 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
2017 	CU_ASSERT(sizeof(opts) > 8);
2018 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
2019 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
2020 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
2021 	/* check below field is not initialized by default value */
2022 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
2023 
2024 	/* set a consistent opts_size */
2025 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
2026 	ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
2027 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
2028 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
2029 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
2030 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
2031 }
2032 
2033 #if 0 /* TODO: move to PCIe-specific unit test */
2034 static void
2035 test_nvme_ctrlr_alloc_cmb(void)
2036 {
2037 	int			rc;
2038 	uint64_t		offset;
2039 	struct spdk_nvme_ctrlr	ctrlr = {};
2040 
2041 	ctrlr.cmb_size = 0x1000000;
2042 	ctrlr.cmb_current_offset = 0x100;
2043 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
2044 	CU_ASSERT(rc == 0);
2045 	CU_ASSERT(offset == 0x1000);
2046 	CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
2047 
2048 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
2049 	CU_ASSERT(rc == 0);
2050 	CU_ASSERT(offset == 0x2000);
2051 	CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
2052 
2053 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
2054 	CU_ASSERT(rc == 0);
2055 	CU_ASSERT(offset == 0x100000);
2056 	CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
2057 
2058 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
2059 	CU_ASSERT(rc == -1);
2060 }
2061 #endif
2062 
2063 static void
2064 test_spdk_nvme_ctrlr_update_firmware(void)
2065 {
2066 	struct spdk_nvme_ctrlr ctrlr = {};
2067 	void *payload = NULL;
2068 	int point_payload = 1;
2069 	int slot = 0;
2070 	int ret = 0;
2071 	struct spdk_nvme_status status;
2072 	enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
2073 
2074 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
2075 
2076 	/* Set invalid size check function return value */
2077 	set_size = 5;
2078 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2079 	CU_ASSERT(ret == -1);
2080 
2081 	/* When payload is NULL but set_size < min_page_size */
2082 	set_size = 4;
2083 	ctrlr.min_page_size = 5;
2084 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2085 	CU_ASSERT(ret == -1);
2086 
2087 	/* When payload not NULL but min_page_size is 0 */
2088 	set_size = 4;
2089 	ctrlr.min_page_size = 0;
2090 	payload = &point_payload;
2091 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2092 	CU_ASSERT(ret == -1);
2093 
2094 	/* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
2095 	set_status_cpl = 1;
2096 	set_size = 4;
2097 	ctrlr.min_page_size = 5;
2098 	payload = &point_payload;
2099 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2100 	CU_ASSERT(ret == -ENXIO);
2101 
2102 	/* Check firmware image download and set status.cpl value is 0 */
2103 	set_status_cpl = 0;
2104 	set_size = 4;
2105 	ctrlr.min_page_size = 5;
2106 	payload = &point_payload;
2107 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2108 	CU_ASSERT(ret == -1);
2109 
2110 	/* Check firmware commit */
2111 	ctrlr.is_resetting = false;
2112 	set_status_cpl = 0;
2113 	slot = 1;
2114 	set_size = 4;
2115 	ctrlr.min_page_size = 5;
2116 	payload = &point_payload;
2117 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2118 	CU_ASSERT(ret == -ENXIO);
2119 
2120 	/* Set size check firmware download and firmware commit */
2121 	ctrlr.is_resetting = true;
2122 	set_status_cpl = 0;
2123 	slot = 1;
2124 	set_size = 4;
2125 	ctrlr.min_page_size = 5;
2126 	payload = &point_payload;
2127 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2128 	CU_ASSERT(ret == 0);
2129 
2130 	/* nvme_wait_for_completion returns an error */
2131 	g_wait_for_completion_return_val = -1;
2132 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2133 	CU_ASSERT(ret == -ENXIO);
2134 	CU_ASSERT(g_failed_status != NULL);
2135 	CU_ASSERT(g_failed_status->timed_out == true);
2136 	/* status should be freed by callback, which is not triggered in test env.
2137 	   Store status to global variable and free it manually.
2138 	   If spdk_nvme_ctrlr_update_firmware changes its behaviour and frees the status
2139 	   itself, we'll get a double free here.. */
2140 	free(g_failed_status);
2141 	g_failed_status = NULL;
2142 	g_wait_for_completion_return_val = 0;
2143 
2144 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
2145 	set_status_cpl = 0;
2146 }
2147 
2148 int
2149 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
2150 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
2151 {
2152 	fake_cpl_sc(cb_fn, cb_arg);
2153 	return 0;
2154 }
2155 
2156 static void
2157 test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
2158 {
2159 	struct spdk_nvme_ctrlr ctrlr = {};
2160 	int ret = -1;
2161 
2162 	ctrlr.cdata.oacs.doorbell_buffer_config = 1;
2163 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2164 	ctrlr.page_size = 0x1000;
2165 	MOCK_CLEAR(spdk_malloc);
2166 	MOCK_CLEAR(spdk_zmalloc);
2167 	ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
2168 	CU_ASSERT(ret == 0);
2169 	nvme_ctrlr_free_doorbell_buffer(&ctrlr);
2170 }
2171 
2172 static void
2173 test_nvme_ctrlr_test_active_ns(void)
2174 {
2175 	uint32_t		nsid, minor;
2176 	size_t			ns_id_count;
2177 	struct spdk_nvme_ctrlr	ctrlr = {};
2178 	uint32_t		active_ns_list[1531];
2179 
2180 	for (nsid = 1; nsid <= 1531; nsid++) {
2181 		active_ns_list[nsid - 1] = nsid;
2182 	}
2183 
2184 	g_active_ns_list = active_ns_list;
2185 
2186 	ctrlr.page_size = 0x1000;
2187 
2188 	for (minor = 0; minor <= 2; minor++) {
2189 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2190 		ctrlr.state = NVME_CTRLR_STATE_READY;
2191 
2192 		ctrlr.vs.bits.mjr = 1;
2193 		ctrlr.vs.bits.mnr = minor;
2194 		ctrlr.vs.bits.ter = 0;
2195 		ctrlr.cdata.nn = 1531;
2196 
2197 		RB_INIT(&ctrlr.ns);
2198 
2199 		g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2200 		nvme_ctrlr_identify_active_ns(&ctrlr);
2201 
2202 		for (nsid = 1; nsid <= ctrlr.cdata.nn; nsid++) {
2203 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
2204 		}
2205 
2206 		for (; nsid <= 1559; nsid++) {
2207 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
2208 		}
2209 
2210 		g_active_ns_list_length = 0;
2211 		if (minor <= 1) {
2212 			ctrlr.cdata.nn = 0;
2213 		}
2214 		nvme_ctrlr_identify_active_ns(&ctrlr);
2215 		CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
2216 
2217 		g_active_ns_list_length = 1;
2218 		if (minor <= 1) {
2219 			ctrlr.cdata.nn = 1;
2220 		}
2221 		nvme_ctrlr_identify_active_ns(&ctrlr);
2222 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
2223 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
2224 		nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
2225 		CU_ASSERT(nsid == 1);
2226 
2227 		if (minor >= 2) {
2228 			/* For NVMe 1.2 and newer, the namespace list can have "holes" where
2229 			 * some namespaces are not active. Test this. */
2230 			g_active_ns_list_length = 2;
2231 			g_active_ns_list[1] = 3;
2232 			nvme_ctrlr_identify_active_ns(&ctrlr);
2233 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
2234 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
2235 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
2236 			nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
2237 			CU_ASSERT(nsid == 3);
2238 			nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
2239 			CU_ASSERT(nsid == 0);
2240 
2241 			/* Reset the active namespace list array */
2242 			g_active_ns_list[1] = 2;
2243 		}
2244 
2245 		g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2246 		if (minor <= 1) {
2247 			ctrlr.cdata.nn = 1531;
2248 		}
2249 		nvme_ctrlr_identify_active_ns(&ctrlr);
2250 
2251 		ns_id_count = 0;
2252 		for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
2253 		     nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
2254 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
2255 			ns_id_count++;
2256 		}
2257 		CU_ASSERT(ns_id_count == ctrlr.cdata.nn);
2258 
2259 		nvme_ctrlr_destruct(&ctrlr);
2260 	}
2261 
2262 	g_active_ns_list = NULL;
2263 	g_active_ns_list_length = 0;
2264 }
2265 
2266 static void
2267 test_nvme_ctrlr_test_active_ns_error_case(void)
2268 {
2269 	int rc;
2270 	struct spdk_nvme_ctrlr	ctrlr = {.state = NVME_CTRLR_STATE_READY};
2271 
2272 	ctrlr.page_size = 0x1000;
2273 	ctrlr.vs.bits.mjr = 1;
2274 	ctrlr.vs.bits.mnr = 2;
2275 	ctrlr.vs.bits.ter = 0;
2276 	ctrlr.cdata.nn = 2;
2277 
2278 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2279 	rc = nvme_ctrlr_identify_active_ns(&ctrlr);
2280 	CU_ASSERT(rc == -ENXIO);
2281 	set_status_code = SPDK_NVME_SC_SUCCESS;
2282 }
2283 
2284 static void
2285 test_nvme_ctrlr_init_delay(void)
2286 {
2287 	DECLARE_AND_CONSTRUCT_CTRLR();
2288 
2289 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
2290 
2291 	/*
2292 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
2293 	 * init() should set CC.EN = 1.
2294 	 */
2295 	g_ut_nvme_regs.cc.bits.en = 0;
2296 	g_ut_nvme_regs.csts.bits.rdy = 0;
2297 
2298 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2299 	/* Test that the initialization delay works correctly.  We only
2300 	 * do the initialization delay on SSDs that require it, so
2301 	 * set that quirk here.
2302 	 */
2303 	ctrlr.quirks = NVME_QUIRK_DELAY_BEFORE_INIT;
2304 	ctrlr.cdata.nn = 1;
2305 	ctrlr.page_size = 0x1000;
2306 	ctrlr.state = NVME_CTRLR_STATE_INIT_DELAY;
2307 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2308 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
2309 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
2310 
2311 	/* delay 1s, just return as sleep time isn't enough */
2312 	spdk_delay_us(1 * spdk_get_ticks_hz());
2313 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2314 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
2315 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
2316 
2317 	/* sleep timeout, start to initialize */
2318 	spdk_delay_us(2 * spdk_get_ticks_hz());
2319 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
2320 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2321 	}
2322 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2323 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
2324 
2325 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2326 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
2327 
2328 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2329 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
2330 
2331 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2332 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
2333 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
2334 
2335 	/*
2336 	 * Transition to CSTS.RDY = 1.
2337 	 */
2338 	g_ut_nvme_regs.csts.bits.rdy = 1;
2339 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2340 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
2341 
2342 	/*
2343 	 * Transition to READY.
2344 	 */
2345 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2346 		nvme_ctrlr_process_init(&ctrlr);
2347 	}
2348 
2349 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
2350 	nvme_ctrlr_destruct(&ctrlr);
2351 }
2352 
2353 static void
2354 test_spdk_nvme_ctrlr_set_trid(void)
2355 {
2356 	struct spdk_nvme_ctrlr ctrlr = {{0}};
2357 	struct spdk_nvme_transport_id new_trid = {{0}};
2358 
2359 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
2360 
2361 	ctrlr.is_failed = false;
2362 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2363 	snprintf(ctrlr.trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
2364 	snprintf(ctrlr.trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
2365 	snprintf(ctrlr.trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
2366 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EPERM);
2367 
2368 	ctrlr.is_failed = true;
2369 	new_trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2370 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
2371 	CU_ASSERT(ctrlr.trid.trtype == SPDK_NVME_TRANSPORT_RDMA);
2372 
2373 	new_trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2374 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode2");
2375 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
2376 	CU_ASSERT(strncmp(ctrlr.trid.subnqn, "nqn.2016-06.io.spdk:cnode1", SPDK_NVMF_NQN_MAX_LEN) == 0);
2377 
2378 
2379 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
2380 	snprintf(new_trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
2381 	snprintf(new_trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4421");
2382 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == 0);
2383 	CU_ASSERT(strncmp(ctrlr.trid.traddr, "192.168.100.9", SPDK_NVMF_TRADDR_MAX_LEN) == 0);
2384 	CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
2385 
2386 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
2387 }
2388 
2389 static void
2390 test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
2391 {
2392 	struct spdk_nvme_ctrlr_data cdata = {};
2393 	DECLARE_AND_CONSTRUCT_CTRLR();
2394 	/* equivalent of 4096 bytes */
2395 	cdata.nvmf_specific.ioccsz = 260;
2396 	cdata.nvmf_specific.icdoff = 1;
2397 	g_cdata = &cdata;
2398 
2399 	/* Check PCI trtype, */
2400 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2401 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2402 
2403 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2404 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2405 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2406 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2407 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2408 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2409 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2410 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2411 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2412 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2413 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2414 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2415 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2416 
2417 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2418 	CU_ASSERT(ctrlr.icdoff == 0);
2419 
2420 	nvme_ctrlr_destruct(&ctrlr);
2421 
2422 	/* Check RDMA trtype, */
2423 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2424 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2425 
2426 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2427 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2428 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2429 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2430 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2431 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2432 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2433 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2434 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2435 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2436 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2437 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2438 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2439 
2440 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2441 	CU_ASSERT(ctrlr.icdoff == 1);
2442 	ctrlr.ioccsz_bytes = 0;
2443 	ctrlr.icdoff = 0;
2444 
2445 	nvme_ctrlr_destruct(&ctrlr);
2446 
2447 	/* Check TCP trtype, */
2448 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2449 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2450 
2451 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2452 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2453 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2454 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2455 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2456 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2457 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2458 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2459 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2460 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2461 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2462 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2463 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2464 
2465 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2466 	CU_ASSERT(ctrlr.icdoff == 1);
2467 	ctrlr.ioccsz_bytes = 0;
2468 	ctrlr.icdoff = 0;
2469 
2470 	nvme_ctrlr_destruct(&ctrlr);
2471 
2472 	/* Check FC trtype, */
2473 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2474 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
2475 
2476 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2477 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2478 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2479 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2480 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2481 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2482 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2483 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2484 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2485 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2486 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2487 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2488 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2489 
2490 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2491 	CU_ASSERT(ctrlr.icdoff == 1);
2492 	ctrlr.ioccsz_bytes = 0;
2493 	ctrlr.icdoff = 0;
2494 
2495 	nvme_ctrlr_destruct(&ctrlr);
2496 
2497 	/* Check CUSTOM trtype, */
2498 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2499 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
2500 
2501 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2502 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2503 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2504 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2505 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2506 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2507 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2508 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2509 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2510 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2511 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2512 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2513 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2514 
2515 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2516 	CU_ASSERT(ctrlr.icdoff == 0);
2517 
2518 	nvme_ctrlr_destruct(&ctrlr);
2519 
2520 	/* Check CUSTOM_FABRICS trtype, */
2521 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2522 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM_FABRICS;
2523 
2524 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2525 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2526 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2527 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2528 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2529 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2530 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2531 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2532 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2533 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2534 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2535 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2536 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2537 
2538 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2539 	CU_ASSERT(ctrlr.icdoff == 1);
2540 	ctrlr.ioccsz_bytes = 0;
2541 	ctrlr.icdoff = 0;
2542 
2543 	nvme_ctrlr_destruct(&ctrlr);
2544 
2545 	g_cdata = NULL;
2546 }
2547 
2548 static void
2549 test_nvme_ctrlr_init_set_num_queues(void)
2550 {
2551 	DECLARE_AND_CONSTRUCT_CTRLR();
2552 
2553 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2554 
2555 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2556 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2557 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2558 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2559 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2560 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2561 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2562 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2563 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2564 
2565 	ctrlr.opts.num_io_queues = 64;
2566 	/* Num queues is zero-based. So, use 31 to get 32 queues */
2567 	fake_cpl.cdw0 = 31 + (31 << 16);
2568 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_ACTIVE_NS */
2569 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2570 	CU_ASSERT(ctrlr.opts.num_io_queues == 32);
2571 	fake_cpl.cdw0 = 0;
2572 
2573 	nvme_ctrlr_destruct(&ctrlr);
2574 }
2575 
2576 static void
2577 test_nvme_ctrlr_init_set_keep_alive_timeout(void)
2578 {
2579 	DECLARE_AND_CONSTRUCT_CTRLR();
2580 
2581 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2582 
2583 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2584 	ctrlr.cdata.kas = 1;
2585 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2586 	fake_cpl.cdw0 = 120000;
2587 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_IOCS_SPECIFIC */
2588 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2589 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 120000);
2590 	fake_cpl.cdw0 = 0;
2591 
2592 	/* Target does not support Get Feature "Keep Alive Timer" */
2593 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2594 	ctrlr.cdata.kas = 1;
2595 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2596 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2597 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_IOCS_SPECIFIC */
2598 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2599 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 60000);
2600 	set_status_code = SPDK_NVME_SC_SUCCESS;
2601 
2602 	/* Target fails Get Feature "Keep Alive Timer" for another reason */
2603 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2604 	ctrlr.cdata.kas = 1;
2605 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2606 	set_status_code = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2607 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> ERROR */
2608 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
2609 	set_status_code = SPDK_NVME_SC_SUCCESS;
2610 
2611 	nvme_ctrlr_destruct(&ctrlr);
2612 }
2613 
2614 static void
2615 test_alloc_io_qpair_fail(void)
2616 {
2617 	struct spdk_nvme_ctrlr ctrlr = {};
2618 	struct spdk_nvme_qpair *q0;
2619 
2620 	setup_qpairs(&ctrlr, 1);
2621 
2622 	/* Modify the connect_qpair return code to inject a failure */
2623 	g_connect_qpair_return_code = 1;
2624 
2625 	/* Attempt to allocate a qpair, this should fail */
2626 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
2627 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
2628 
2629 	/* Verify that the qpair is removed from the lists */
2630 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.active_io_qpairs));
2631 
2632 	g_connect_qpair_return_code = 0;
2633 	cleanup_qpairs(&ctrlr);
2634 }
2635 
2636 static void
2637 test_nvme_ctrlr_add_remove_process(void)
2638 {
2639 	struct spdk_nvme_ctrlr ctrlr = {};
2640 	void *devhandle = (void *)0xDEADBEEF;
2641 	struct spdk_nvme_ctrlr_process *proc = NULL;
2642 	int rc;
2643 
2644 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2645 	TAILQ_INIT(&ctrlr.active_procs);
2646 
2647 	rc = nvme_ctrlr_add_process(&ctrlr, devhandle);
2648 	CU_ASSERT(rc == 0);
2649 	proc = TAILQ_FIRST(&ctrlr.active_procs);
2650 	SPDK_CU_ASSERT_FATAL(proc != NULL);
2651 	CU_ASSERT(proc->is_primary == true);
2652 	CU_ASSERT(proc->pid == getpid());
2653 	CU_ASSERT(proc->devhandle == (void *)0xDEADBEEF);
2654 	CU_ASSERT(proc->ref == 0);
2655 
2656 	nvme_ctrlr_remove_process(&ctrlr, proc);
2657 	CU_ASSERT(TAILQ_EMPTY(&ctrlr.active_procs));
2658 }
2659 
2660 static void
2661 test_nvme_ctrlr_set_arbitration_feature(void)
2662 {
2663 	struct spdk_nvme_ctrlr ctrlr = {};
2664 
2665 	ctrlr.opts.arbitration_burst = 6;
2666 	ctrlr.flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
2667 	ctrlr.opts.low_priority_weight = 1;
2668 	ctrlr.opts.medium_priority_weight = 2;
2669 	ctrlr.opts.high_priority_weight = 3;
2670 	/* g_ut_cdw11 used to record value command feature set. */
2671 	g_ut_cdw11 = 0;
2672 
2673 	/* arbitration_burst count available. */
2674 	nvme_ctrlr_set_arbitration_feature(&ctrlr);
2675 	CU_ASSERT((uint8_t)g_ut_cdw11 == 6);
2676 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 8) == 1);
2677 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 16) == 2);
2678 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 24) == 3);
2679 
2680 	/* arbitration_burst unavailable. */
2681 	g_ut_cdw11 = 0;
2682 	ctrlr.opts.arbitration_burst = 8;
2683 
2684 	nvme_ctrlr_set_arbitration_feature(&ctrlr);
2685 	CU_ASSERT(g_ut_cdw11 == 0);
2686 }
2687 
2688 static void
2689 test_nvme_ctrlr_set_state(void)
2690 {
2691 	struct spdk_nvme_ctrlr ctrlr = {};
2692 	MOCK_SET(spdk_get_ticks, 0);
2693 
2694 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 1000);
2695 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2696 	CU_ASSERT(ctrlr.state_timeout_tsc == 1000000);
2697 
2698 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 0);
2699 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2700 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
2701 
2702 	/* Time out ticks causes integer overflow. */
2703 	MOCK_SET(spdk_get_ticks, UINT64_MAX);
2704 
2705 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 1000);
2706 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2707 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
2708 	MOCK_CLEAR(spdk_get_ticks);
2709 }
2710 
2711 static void
2712 test_nvme_ctrlr_active_ns_list_v0(void)
2713 {
2714 	DECLARE_AND_CONSTRUCT_CTRLR();
2715 
2716 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2717 
2718 	ctrlr.vs.bits.mjr = 1;
2719 	ctrlr.vs.bits.mnr = 0;
2720 	ctrlr.vs.bits.ter = 0;
2721 	ctrlr.cdata.nn = 1024;
2722 
2723 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2724 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2725 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2726 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2727 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2728 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2729 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2730 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 1024);
2731 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2732 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2733 
2734 	nvme_ctrlr_destruct(&ctrlr);
2735 }
2736 
2737 static void
2738 test_nvme_ctrlr_active_ns_list_v2(void)
2739 {
2740 	uint32_t i;
2741 	uint32_t active_ns_list[1024];
2742 	DECLARE_AND_CONSTRUCT_CTRLR();
2743 
2744 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2745 
2746 	ctrlr.vs.bits.mjr = 1;
2747 	ctrlr.vs.bits.mnr = 2;
2748 	ctrlr.vs.bits.ter = 0;
2749 	ctrlr.cdata.nn = 4096;
2750 
2751 	g_active_ns_list = active_ns_list;
2752 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2753 
2754 	/* No active namespaces */
2755 	memset(active_ns_list, 0, sizeof(active_ns_list));
2756 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2757 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2758 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2759 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2760 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2761 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2762 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
2763 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2764 
2765 	nvme_ctrlr_destruct(&ctrlr);
2766 
2767 	/* 1024 active namespaces - one full page */
2768 	memset(active_ns_list, 0, sizeof(active_ns_list));
2769 	for (i = 0; i < 1024; ++i) {
2770 		active_ns_list[i] = i + 1;
2771 	}
2772 
2773 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2774 
2775 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2776 	g_active_ns_list = active_ns_list;
2777 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2778 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2779 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2780 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2781 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2782 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2783 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2784 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 1024);
2785 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2786 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2787 
2788 	nvme_ctrlr_destruct(&ctrlr);
2789 
2790 	/* 1023 active namespaces - full page minus one	 */
2791 	memset(active_ns_list, 0, sizeof(active_ns_list));
2792 	for (i = 0; i < 1023; ++i) {
2793 		active_ns_list[i] = i + 1;
2794 	}
2795 
2796 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2797 
2798 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2799 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2800 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2801 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2802 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1023));
2803 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2804 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2805 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2806 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 0);
2807 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2808 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2809 
2810 	nvme_ctrlr_destruct(&ctrlr);
2811 
2812 	g_active_ns_list = NULL;
2813 	g_active_ns_list_length = 0;
2814 }
2815 
2816 static void
2817 test_nvme_ctrlr_ns_mgmt(void)
2818 {
2819 	DECLARE_AND_CONSTRUCT_CTRLR();
2820 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2821 	uint32_t active_ns_list2[] = { 1, 2, 3, 100, 1024 };
2822 	struct spdk_nvme_ns_data nsdata = {};
2823 	struct spdk_nvme_ctrlr_list ctrlr_list = {};
2824 	uint32_t nsid;
2825 
2826 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2827 
2828 	ctrlr.vs.bits.mjr = 1;
2829 	ctrlr.vs.bits.mnr = 2;
2830 	ctrlr.vs.bits.ter = 0;
2831 	ctrlr.cdata.nn = 4096;
2832 
2833 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2834 	g_active_ns_list = active_ns_list;
2835 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2836 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2837 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2838 	}
2839 
2840 	fake_cpl.cdw0 = 3;
2841 	nsid = spdk_nvme_ctrlr_create_ns(&ctrlr, &nsdata);
2842 	fake_cpl.cdw0 = 0;
2843 	CU_ASSERT(nsid == 3);
2844 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2845 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2846 
2847 	g_active_ns_list = active_ns_list2;
2848 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
2849 	CU_ASSERT(spdk_nvme_ctrlr_attach_ns(&ctrlr, 3, &ctrlr_list) == 0);
2850 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2851 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2852 
2853 	g_active_ns_list = active_ns_list;
2854 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2855 	CU_ASSERT(spdk_nvme_ctrlr_detach_ns(&ctrlr, 3, &ctrlr_list) == 0);
2856 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2857 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2858 
2859 	CU_ASSERT(spdk_nvme_ctrlr_delete_ns(&ctrlr, 3) == 0);
2860 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2861 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2862 	g_active_ns_list = NULL;
2863 	g_active_ns_list_length = 0;
2864 
2865 	nvme_ctrlr_destruct(&ctrlr);
2866 }
2867 
2868 static void
2869 check_en_set_rdy(void)
2870 {
2871 	if (g_ut_nvme_regs.cc.bits.en == 1) {
2872 		g_ut_nvme_regs.csts.bits.rdy = 1;
2873 	}
2874 }
2875 
2876 static void
2877 test_nvme_ctrlr_reset(void)
2878 {
2879 	DECLARE_AND_CONSTRUCT_CTRLR();
2880 	struct spdk_nvme_ctrlr_data cdata = { .nn = 4096 };
2881 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2882 	uint32_t active_ns_list2[] = { 1, 100, 1024 };
2883 
2884 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2885 
2886 	g_ut_nvme_regs.vs.bits.mjr = 1;
2887 	g_ut_nvme_regs.vs.bits.mnr = 2;
2888 	g_ut_nvme_regs.vs.bits.ter = 0;
2889 	nvme_ctrlr_get_vs(&ctrlr, &ctrlr.vs);
2890 	ctrlr.cdata.nn = 2048;
2891 
2892 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2893 	g_active_ns_list = active_ns_list;
2894 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2895 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2896 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2897 	}
2898 	CU_ASSERT(spdk_nvme_ctrlr_get_num_ns(&ctrlr) == 2048);
2899 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 2) != NULL);
2900 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2));
2901 
2902 	/* Reset controller with changed number of namespaces */
2903 	g_cdata = &cdata;
2904 	g_active_ns_list = active_ns_list2;
2905 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
2906 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);
2907 	g_ut_nvme_regs.cc.raw = 0;
2908 	g_ut_nvme_regs.csts.raw = 0;
2909 	g_set_reg_cb = check_en_set_rdy;
2910 	g_wait_for_completion_return_val = -ENXIO;
2911 	CU_ASSERT(spdk_nvme_ctrlr_reset(&ctrlr) == 0);
2912 	g_set_reg_cb = NULL;
2913 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
2914 	g_cdata = NULL;
2915 	g_active_ns_list = NULL;
2916 	g_active_ns_list_length = 0;
2917 
2918 	CU_ASSERT(spdk_nvme_ctrlr_get_num_ns(&ctrlr) == 4096);
2919 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 2) != NULL);
2920 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2));
2921 
2922 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
2923 	nvme_ctrlr_destruct(&ctrlr);
2924 
2925 	g_wait_for_completion_return_val = 0;
2926 }
2927 
2928 static uint32_t g_aer_cb_counter;
2929 
2930 static void
2931 aer_cb(void *aer_cb_arg, const struct spdk_nvme_cpl *cpl)
2932 {
2933 	g_aer_cb_counter++;
2934 }
2935 
2936 static void
2937 test_nvme_ctrlr_aer_callback(void)
2938 {
2939 	DECLARE_AND_CONSTRUCT_CTRLR();
2940 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2941 	union spdk_nvme_async_event_completion	aer_event = {
2942 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
2943 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
2944 	};
2945 	struct spdk_nvme_cpl aer_cpl = {
2946 		.status.sct = SPDK_NVME_SCT_GENERIC,
2947 		.status.sc = SPDK_NVME_SC_SUCCESS,
2948 		.cdw0 = aer_event.raw
2949 	};
2950 
2951 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2952 
2953 	ctrlr.vs.bits.mjr = 1;
2954 	ctrlr.vs.bits.mnr = 2;
2955 	ctrlr.vs.bits.ter = 0;
2956 	ctrlr.cdata.nn = 4096;
2957 
2958 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
2959 	g_active_ns_list = active_ns_list;
2960 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2961 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2962 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2963 	}
2964 
2965 	CU_ASSERT(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
2966 	spdk_nvme_ctrlr_register_aer_callback(&ctrlr, aer_cb, NULL);
2967 
2968 	/* Async event */
2969 	g_aer_cb_counter = 0;
2970 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
2971 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
2972 	CU_ASSERT(g_aer_cb_counter == 1);
2973 	g_active_ns_list = NULL;
2974 	g_active_ns_list_length = 0;
2975 
2976 	nvme_ctrlr_free_processes(&ctrlr);
2977 	nvme_ctrlr_destruct(&ctrlr);
2978 }
2979 
2980 static void
2981 test_nvme_ctrlr_ns_attr_changed(void)
2982 {
2983 	DECLARE_AND_CONSTRUCT_CTRLR();
2984 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2985 	uint32_t active_ns_list2[] = { 1, 2, 1024 };
2986 	uint32_t active_ns_list3[] = { 1, 2, 101, 1024 };
2987 	union spdk_nvme_async_event_completion	aer_event = {
2988 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
2989 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
2990 	};
2991 	struct spdk_nvme_cpl aer_cpl = {
2992 		.status.sct = SPDK_NVME_SCT_GENERIC,
2993 		.status.sc = SPDK_NVME_SC_SUCCESS,
2994 		.cdw0 = aer_event.raw
2995 	};
2996 
2997 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2998 
2999 	ctrlr.vs.bits.mjr = 1;
3000 	ctrlr.vs.bits.mnr = 3;
3001 	ctrlr.vs.bits.ter = 0;
3002 	ctrlr.cap.bits.css |= SPDK_NVME_CAP_CSS_IOCS;
3003 	ctrlr.cdata.nn = 4096;
3004 
3005 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
3006 	g_active_ns_list = active_ns_list;
3007 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
3008 
3009 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
3010 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3011 	}
3012 
3013 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 100));
3014 
3015 	CU_ASSERT(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
3016 	spdk_nvme_ctrlr_register_aer_callback(&ctrlr, aer_cb, NULL);
3017 
3018 	/* Remove NS 100 */
3019 	g_aer_cb_counter = 0;
3020 	g_active_ns_list = active_ns_list2;
3021 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
3022 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3023 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3024 	CU_ASSERT(g_aer_cb_counter == 1);
3025 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 100));
3026 
3027 	/* Add NS 101 */
3028 	g_active_ns_list = active_ns_list3;
3029 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list3);
3030 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3031 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3032 	CU_ASSERT(g_aer_cb_counter == 2);
3033 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 101));
3034 
3035 	g_active_ns_list = NULL;
3036 	g_active_ns_list_length = 0;
3037 	nvme_ctrlr_free_processes(&ctrlr);
3038 	nvme_ctrlr_destruct(&ctrlr);
3039 }
3040 
3041 static void
3042 test_nvme_ctrlr_identify_namespaces_iocs_specific_next(void)
3043 {
3044 	struct spdk_nvme_ctrlr ctrlr = {};
3045 	uint32_t prev_nsid;
3046 	struct spdk_nvme_ns ns[5] = {};
3047 	struct spdk_nvme_ctrlr ns_ctrlr[5] = {};
3048 	int rc = 0;
3049 	int i;
3050 
3051 	RB_INIT(&ctrlr.ns);
3052 	for (i = 0; i < 5; i++) {
3053 		ns[i].id = i + 1;
3054 		ns[i].active = true;
3055 	}
3056 
3057 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
3058 
3059 	ctrlr.cdata.nn = 5;
3060 	/* case 1: No first/next active NS, move on to the next state, expect: pass */
3061 	prev_nsid = 0;
3062 	ctrlr.active_ns_count = 0;
3063 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3064 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3065 	CU_ASSERT(rc == 0);
3066 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES);
3067 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3068 
3069 	/* case 2: move on to the next active NS, and no namespace with (supported) iocs specific data found , expect: pass */
3070 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3071 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3072 	prev_nsid = 1;
3073 	for (i = 0; i < 5; i++) {
3074 		RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns[i]);
3075 	}
3076 	ctrlr.active_ns_count = 5;
3077 	ns[1].csi = SPDK_NVME_CSI_NVM;
3078 	ns[1].id = 2;
3079 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3080 	CU_ASSERT(rc == 0);
3081 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES);
3082 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3083 
3084 	/* case 3: ns.csi is SPDK_NVME_CSI_ZNS, do not loop, expect: pass */
3085 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3086 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3087 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3088 	prev_nsid = 0;
3089 	ctrlr.active_ns_count = 5;
3090 
3091 	for (int i = 0; i < 5; i++) {
3092 		ns[i].csi = SPDK_NVME_CSI_NVM;
3093 		ns[i].id = i + 1;
3094 		ns[i].ctrlr = &ns_ctrlr[i];
3095 	}
3096 	ns[4].csi = SPDK_NVME_CSI_ZNS;
3097 	ns_ctrlr[4].opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3098 
3099 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3100 	CU_ASSERT(rc == 0);
3101 	CU_ASSERT(ctrlr.state == 0);
3102 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3103 	CU_ASSERT(ns_ctrlr[4].state == NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC);
3104 	CU_ASSERT(ns_ctrlr[4].state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3105 
3106 	for (int i = 0; i < 5; i++) {
3107 		nvme_ns_free_zns_specific_data(&ns[i]);
3108 	}
3109 
3110 	/* case 4: nvme_ctrlr_identify_ns_iocs_specific_async return 1, expect: false */
3111 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3112 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3113 	prev_nsid = 1;
3114 	ctrlr.active_ns_count = 5;
3115 	ns[1].csi = SPDK_NVME_CSI_ZNS;
3116 	g_fail_next_identify = true;
3117 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3118 	CU_ASSERT(rc == 1);
3119 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
3120 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3121 
3122 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
3123 }
3124 
3125 static void
3126 test_nvme_ctrlr_set_supported_log_pages(void)
3127 {
3128 	int rc;
3129 	struct spdk_nvme_ctrlr ctrlr = {};
3130 
3131 	/* ana supported */
3132 	memset(&ctrlr, 0, sizeof(ctrlr));
3133 	ctrlr.cdata.cmic.ana_reporting = true;
3134 	ctrlr.cdata.lpa.celp = 1;
3135 	ctrlr.cdata.nanagrpid = 1;
3136 	ctrlr.active_ns_count = 1;
3137 
3138 	rc = nvme_ctrlr_set_supported_log_pages(&ctrlr);
3139 	CU_ASSERT(rc == 0);
3140 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ERROR] == true);
3141 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] == true);
3142 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] == true);
3143 	CU_ASSERT(ctrlr.ana_log_page_size == sizeof(struct spdk_nvme_ana_page) +
3144 		  sizeof(struct spdk_nvme_ana_group_descriptor) * 1 + sizeof(uint32_t) * 1);
3145 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS] == true);
3146 	free(ctrlr.ana_log_page);
3147 	free(ctrlr.copied_ana_desc);
3148 }
3149 
3150 static void
3151 test_nvme_ctrlr_set_intel_supported_log_pages(void)
3152 {
3153 	DECLARE_AND_CONSTRUCT_CTRLR();
3154 
3155 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3156 
3157 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3158 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
3159 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
3160 	ctrlr.state = NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES;
3161 
3162 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
3163 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES);
3164 
3165 	set_status_code = SPDK_NVME_SC_SUCCESS;
3166 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
3167 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES);
3168 
3169 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ERROR] == true);
3170 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] == true);
3171 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] == true);
3172 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] == true);
3173 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] == true);
3174 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] == true);
3175 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_SMART] == true);
3176 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] == true);
3177 
3178 	nvme_ctrlr_destruct(&ctrlr);
3179 }
3180 
3181 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
3182 				 sizeof(uint32_t))
3183 static void
3184 test_nvme_ctrlr_parse_ana_log_page(void)
3185 {
3186 	int rc, i;
3187 	struct spdk_nvme_ctrlr ctrlr = {};
3188 	struct spdk_nvme_ns ns[3] = {};
3189 	struct spdk_nvme_ana_page ana_hdr;
3190 	char _ana_desc[UT_ANA_DESC_SIZE];
3191 	struct spdk_nvme_ana_group_descriptor *ana_desc;
3192 	uint32_t offset;
3193 
3194 	RB_INIT(&ctrlr.ns);
3195 	for (i = 0; i < 3; i++) {
3196 		ns[i].id = i + 1;
3197 		ns[i].active = true;
3198 		RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns[i]);
3199 	}
3200 
3201 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
3202 
3203 	ctrlr.cdata.nn = 3;
3204 	ctrlr.cdata.nanagrpid = 3;
3205 	ctrlr.active_ns_count = 3;
3206 
3207 	rc = nvme_ctrlr_update_ana_log_page(&ctrlr);
3208 	CU_ASSERT(rc == 0);
3209 	CU_ASSERT(ctrlr.ana_log_page != NULL);
3210 	CU_ASSERT(ctrlr.copied_ana_desc != NULL);
3211 
3212 	/*
3213 	 * Create ANA log page data - There are three ANA groups.
3214 	 * Each ANA group has a namespace and has a different ANA state.
3215 	 */
3216 	memset(&ana_hdr, 0, sizeof(ana_hdr));
3217 	ana_hdr.num_ana_group_desc = 3;
3218 
3219 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= ctrlr.ana_log_page_size);
3220 	memcpy((char *)ctrlr.ana_log_page, (char *)&ana_hdr, sizeof(ana_hdr));
3221 	offset = sizeof(ana_hdr);
3222 
3223 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
3224 	memset(ana_desc, 0, UT_ANA_DESC_SIZE);
3225 	ana_desc->num_of_nsid = 1;
3226 
3227 	ana_desc->ana_group_id = 1;
3228 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3229 	ana_desc->nsid[0] = 3;
3230 
3231 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3232 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3233 	offset += UT_ANA_DESC_SIZE;
3234 
3235 	ana_desc->ana_group_id = 2;
3236 	ana_desc->ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3237 	ana_desc->nsid[0] = 2;
3238 
3239 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3240 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3241 	offset += UT_ANA_DESC_SIZE;
3242 
3243 	ana_desc->ana_group_id = 3;
3244 	ana_desc->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3245 	ana_desc->nsid[0] = 1;
3246 
3247 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3248 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3249 
3250 	/* Parse the created ANA log page data, and update ANA states. */
3251 	rc = nvme_ctrlr_parse_ana_log_page(&ctrlr, nvme_ctrlr_update_ns_ana_states,
3252 					   &ctrlr);
3253 	CU_ASSERT(rc == 0);
3254 	CU_ASSERT(ns[0].ana_group_id == 3);
3255 	CU_ASSERT(ns[0].ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3256 	CU_ASSERT(ns[1].ana_group_id == 2);
3257 	CU_ASSERT(ns[1].ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3258 	CU_ASSERT(ns[2].ana_group_id == 1);
3259 	CU_ASSERT(ns[2].ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3260 
3261 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
3262 
3263 	free(ctrlr.ana_log_page);
3264 	free(ctrlr.copied_ana_desc);
3265 }
3266 
3267 static void
3268 test_nvme_ctrlr_ana_resize(void)
3269 {
3270 	DECLARE_AND_CONSTRUCT_CTRLR();
3271 	uint32_t active_ns_list[] = { 1, 2, 3, 4 };
3272 	struct spdk_nvme_ana_page ana_hdr = {
3273 		.change_count = 0,
3274 		.num_ana_group_desc = 1
3275 	};
3276 	uint8_t ana_desc_buf[sizeof(struct spdk_nvme_ana_group_descriptor) + 4 * sizeof(uint32_t)] = {};
3277 	struct spdk_nvme_ana_group_descriptor *ana_desc =
3278 		(struct spdk_nvme_ana_group_descriptor *)ana_desc_buf;
3279 	struct spdk_nvme_ns *ns;
3280 	union spdk_nvme_async_event_completion aer_event = {
3281 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
3282 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
3283 	};
3284 	struct spdk_nvme_cpl aer_cpl = {
3285 		.status.sct = SPDK_NVME_SCT_GENERIC,
3286 		.status.sc = SPDK_NVME_SC_SUCCESS,
3287 		.cdw0 = aer_event.raw
3288 	};
3289 	uint32_t i;
3290 
3291 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3292 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
3293 
3294 	ctrlr.vs.bits.mjr = 1;
3295 	ctrlr.vs.bits.mnr = 4;
3296 	ctrlr.vs.bits.ter = 0;
3297 	ctrlr.cdata.nn = 4096;
3298 	ctrlr.cdata.cmic.ana_reporting = true;
3299 	ctrlr.cdata.nanagrpid = 1;
3300 
3301 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
3302 	/* Start with 2 active namespaces */
3303 	g_active_ns_list = active_ns_list;
3304 	g_active_ns_list_length = 2;
3305 	g_ana_hdr = &ana_hdr;
3306 	g_ana_descs = &ana_desc;
3307 	ana_desc->ana_group_id = 1;
3308 	ana_desc->ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3309 	ana_desc->num_of_nsid = 2;
3310 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3311 		ana_desc->nsid[i] = i + 1;
3312 	}
3313 
3314 	/* Bring controller to ready state */
3315 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
3316 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3317 	}
3318 
3319 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3320 		ns = spdk_nvme_ctrlr_get_ns(&ctrlr, i + 1);
3321 		CU_ASSERT(ns->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3322 	}
3323 
3324 	/* Add more namespaces */
3325 	g_active_ns_list_length = 4;
3326 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3327 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3328 
3329 	/* Update ANA log with new namespaces */
3330 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3331 	ana_desc->num_of_nsid = 4;
3332 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3333 		ana_desc->nsid[i] = i + 1;
3334 	}
3335 	aer_event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
3336 	aer_cpl.cdw0 = aer_event.raw;
3337 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3338 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3339 
3340 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3341 		ns = spdk_nvme_ctrlr_get_ns(&ctrlr, i + 1);
3342 		CU_ASSERT(ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3343 	}
3344 
3345 	g_active_ns_list = NULL;
3346 	g_active_ns_list_length = 0;
3347 	g_ana_hdr = NULL;
3348 	g_ana_descs = NULL;
3349 	nvme_ctrlr_free_processes(&ctrlr);
3350 	nvme_ctrlr_destruct(&ctrlr);
3351 }
3352 
3353 static void
3354 test_nvme_ctrlr_get_memory_domains(void)
3355 {
3356 	struct spdk_nvme_ctrlr ctrlr = {};
3357 
3358 	MOCK_SET(nvme_transport_ctrlr_get_memory_domains, 1);
3359 	CU_ASSERT(spdk_nvme_ctrlr_get_memory_domains(&ctrlr, NULL, 0) == 1);
3360 
3361 	MOCK_SET(nvme_transport_ctrlr_get_memory_domains, 0);
3362 	CU_ASSERT(spdk_nvme_ctrlr_get_memory_domains(&ctrlr, NULL, 0) == 0);
3363 
3364 	MOCK_CLEAR(nvme_transport_ctrlr_get_memory_domains);
3365 }
3366 
3367 static void
3368 test_nvme_transport_ctrlr_ready(void)
3369 {
3370 	DECLARE_AND_CONSTRUCT_CTRLR();
3371 
3372 	/* Transport init succeeded */
3373 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3374 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3375 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
3376 
3377 	/* Transport init failed */
3378 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3379 	MOCK_SET(nvme_transport_ctrlr_ready, -1);
3380 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == -1);
3381 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
3382 	MOCK_CLEAR(nvme_transport_ctrlr_ready);
3383 }
3384 
3385 static void
3386 test_nvme_ctrlr_disable(void)
3387 {
3388 	DECLARE_AND_CONSTRUCT_CTRLR();
3389 	int rc;
3390 
3391 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3392 
3393 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3394 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3395 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
3396 
3397 	/* Start a Controller Level Reset. */
3398 	ctrlr.is_disconnecting = true;
3399 	nvme_ctrlr_disable(&ctrlr);
3400 
3401 	g_ut_nvme_regs.cc.bits.en = 0;
3402 
3403 	rc = nvme_ctrlr_disable_poll(&ctrlr);
3404 	CU_ASSERT(rc == -EAGAIN);
3405 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
3406 
3407 	g_ut_nvme_regs.csts.bits.rdy = 0;
3408 
3409 	rc = nvme_ctrlr_disable_poll(&ctrlr);
3410 	CU_ASSERT(rc == 0);
3411 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
3412 
3413 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
3414 	nvme_ctrlr_destruct(&ctrlr);
3415 }
3416 
3417 static void
3418 test_nvme_numa_id(void)
3419 {
3420 	struct spdk_nvme_ctrlr ctrlr = {};
3421 
3422 	ctrlr.numa.id = 3;
3423 	ctrlr.numa.id_valid = 0;
3424 	CU_ASSERT(spdk_nvme_ctrlr_get_numa_id(&ctrlr) == SPDK_ENV_NUMA_ID_ANY);
3425 
3426 	ctrlr.numa.id_valid = 1;
3427 	CU_ASSERT(spdk_nvme_ctrlr_get_numa_id(&ctrlr) == 3);
3428 
3429 	ctrlr.numa.id = SPDK_ENV_NUMA_ID_ANY;
3430 	CU_ASSERT(spdk_nvme_ctrlr_get_numa_id(&ctrlr) == SPDK_ENV_NUMA_ID_ANY);
3431 }
3432 
3433 int
3434 main(int argc, char **argv)
3435 {
3436 	CU_pSuite	suite = NULL;
3437 	unsigned int	num_failures;
3438 
3439 	CU_initialize_registry();
3440 
3441 	suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
3442 
3443 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_0);
3444 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_1);
3445 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0);
3446 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_1);
3447 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_rr);
3448 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr);
3449 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_vs);
3450 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_delay);
3451 	CU_ADD_TEST(suite, test_alloc_io_qpair_rr_1);
3452 	CU_ADD_TEST(suite, test_ctrlr_get_default_ctrlr_opts);
3453 	CU_ADD_TEST(suite, test_ctrlr_get_default_io_qpair_opts);
3454 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_1);
3455 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_2);
3456 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_update_firmware);
3457 	CU_ADD_TEST(suite, test_nvme_ctrlr_fail);
3458 	CU_ADD_TEST(suite, test_nvme_ctrlr_construct_intel_support_log_page_list);
3459 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_features);
3460 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_host_feature);
3461 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_doorbell_buffer_config);
3462 #if 0 /* TODO: move to PCIe-specific unit test */
3463 	CU_ADD_TEST(suite, test_nvme_ctrlr_alloc_cmb);
3464 #endif
3465 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns);
3466 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns_error_case);
3467 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_reconnect_io_qpair);
3468 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_set_trid);
3469 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_nvmf_ioccsz);
3470 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_num_queues);
3471 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_keep_alive_timeout);
3472 	CU_ADD_TEST(suite, test_alloc_io_qpair_fail);
3473 	CU_ADD_TEST(suite, test_nvme_ctrlr_add_remove_process);
3474 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_arbitration_feature);
3475 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_state);
3476 	CU_ADD_TEST(suite, test_nvme_ctrlr_active_ns_list_v0);
3477 	CU_ADD_TEST(suite, test_nvme_ctrlr_active_ns_list_v2);
3478 	CU_ADD_TEST(suite, test_nvme_ctrlr_ns_mgmt);
3479 	CU_ADD_TEST(suite, test_nvme_ctrlr_reset);
3480 	CU_ADD_TEST(suite, test_nvme_ctrlr_aer_callback);
3481 	CU_ADD_TEST(suite, test_nvme_ctrlr_ns_attr_changed);
3482 	CU_ADD_TEST(suite, test_nvme_ctrlr_identify_namespaces_iocs_specific_next);
3483 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_log_pages);
3484 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_intel_supported_log_pages);
3485 	CU_ADD_TEST(suite, test_nvme_ctrlr_parse_ana_log_page);
3486 	CU_ADD_TEST(suite, test_nvme_ctrlr_ana_resize);
3487 	CU_ADD_TEST(suite, test_nvme_ctrlr_get_memory_domains);
3488 	CU_ADD_TEST(suite, test_nvme_transport_ctrlr_ready);
3489 	CU_ADD_TEST(suite, test_nvme_ctrlr_disable);
3490 	CU_ADD_TEST(suite, test_nvme_numa_id);
3491 
3492 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3493 	CU_cleanup_registry();
3494 	return num_failures;
3495 }
3496