xref: /spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c (revision 318515b44ec8b67f83bcc9ca83f0c7d5ea919e62)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "spdk/log.h"
12 
13 #include "common/lib/test_env.c"
14 
15 #include "nvme/nvme_ctrlr.c"
16 #include "nvme/nvme_quirks.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvme)
19 
20 pid_t g_spdk_nvme_pid;
21 
22 struct nvme_driver _g_nvme_driver = {
23 	.lock = PTHREAD_MUTEX_INITIALIZER,
24 };
25 
26 struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
27 
28 struct spdk_nvme_registers g_ut_nvme_regs = {};
29 typedef void (*set_reg_cb)(void);
30 set_reg_cb g_set_reg_cb;
31 
32 __thread int    nvme_thread_ioq_index = -1;
33 
34 uint32_t set_size = 1;
35 
36 int set_status_cpl = -1;
37 
38 #define UT_HOSTID "e53e9258-c93b-48b5-be1a-f025af6d232a"
39 
40 DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
41 	    (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
42 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
43 DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
44 DEFINE_STUB_V(nvme_ns_set_id_desc_list_data, (struct spdk_nvme_ns *ns));
45 DEFINE_STUB_V(nvme_ns_free_iocs_specific_data, (struct spdk_nvme_ns *ns));
46 DEFINE_STUB_V(nvme_qpair_abort_all_queued_reqs, (struct spdk_nvme_qpair *qpair));
47 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
48 		struct spdk_nvme_qpair *qpair), 0);
49 DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));
50 DEFINE_STUB(nvme_io_msg_process, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
51 DEFINE_STUB(nvme_transport_ctrlr_reserve_cmb, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
52 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_receive, int, (struct spdk_nvme_ctrlr *ctrlr,
53 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
54 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
55 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_send, int, (struct spdk_nvme_ctrlr *ctrlr,
56 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
57 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
58 DEFINE_STUB_V(nvme_qpair_abort_queued_reqs, (struct spdk_nvme_qpair *qpair));
59 DEFINE_STUB(spdk_nvme_qpair_authenticate, int, (struct spdk_nvme_qpair *qpair,
60 		spdk_nvme_authenticate_cb cb_fn, void *cb_ctx), 0);
61 DEFINE_STUB(nvme_transport_ctrlr_enable_interrupts, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
62 
63 int
64 nvme_get_default_hostnqn(char *buf, int len)
65 {
66 	const char *nqn = "nqn.2014-08.org.nvmexpress:uuid:" UT_HOSTID;
67 
68 	SPDK_CU_ASSERT_FATAL(len >= (int)strlen(nqn));
69 	memcpy(buf, nqn, strlen(nqn));
70 
71 	return 0;
72 }
73 
74 DEFINE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domains, int);
75 int
76 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
77 					struct spdk_memory_domain **domains, int array_size)
78 {
79 	HANDLE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domains);
80 
81 	return 0;
82 }
83 
84 DEFINE_RETURN_MOCK(nvme_transport_ctrlr_ready, int);
85 int
86 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
87 {
88 	HANDLE_RETURN_MOCK(nvme_transport_ctrlr_ready);
89 	return 0;
90 }
91 
92 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
93 		const struct spdk_nvme_ctrlr_opts *opts,
94 		void *devhandle)
95 {
96 	return NULL;
97 }
98 
99 int
100 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
101 {
102 	nvme_ctrlr_destruct_finish(ctrlr);
103 
104 	return 0;
105 }
106 
107 int
108 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
109 {
110 	return 0;
111 }
112 
113 int
114 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
115 {
116 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
117 	*(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
118 	if (g_set_reg_cb) {
119 		g_set_reg_cb();
120 	}
121 	return 0;
122 }
123 
124 int
125 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
126 {
127 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
128 	*(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
129 	if (g_set_reg_cb) {
130 		g_set_reg_cb();
131 	}
132 	return 0;
133 }
134 
135 int
136 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
137 {
138 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
139 	*value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
140 	return 0;
141 }
142 
143 int
144 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
145 {
146 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
147 	*value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
148 	return 0;
149 }
150 
151 int
152 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
153 				     uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg)
154 {
155 	struct spdk_nvme_cpl cpl = {};
156 
157 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
158 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
159 
160 	nvme_transport_ctrlr_set_reg_4(ctrlr, offset, value);
161 	cb_fn(cb_arg, value, &cpl);
162 	return 0;
163 }
164 
165 int
166 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
167 				     uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg)
168 {
169 	struct spdk_nvme_cpl cpl = {};
170 
171 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
172 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
173 
174 	nvme_transport_ctrlr_set_reg_8(ctrlr, offset, value);
175 	cb_fn(cb_arg, value, &cpl);
176 	return 0;
177 }
178 
179 int
180 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
181 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
182 {
183 	struct spdk_nvme_cpl cpl = {};
184 	uint32_t value;
185 
186 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
187 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
188 
189 	nvme_transport_ctrlr_get_reg_4(ctrlr, offset, &value);
190 	cb_fn(cb_arg, value, &cpl);
191 	return 0;
192 }
193 
194 int
195 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
196 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
197 {
198 	struct spdk_nvme_cpl cpl = {};
199 	uint64_t value;
200 
201 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
202 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
203 
204 	nvme_transport_ctrlr_get_reg_8(ctrlr, offset, &value);
205 	cb_fn(cb_arg, value, &cpl);
206 	return 0;
207 }
208 
209 uint32_t
210 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
211 {
212 	return UINT32_MAX;
213 }
214 
215 uint16_t
216 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
217 {
218 	return 1;
219 }
220 
221 void *
222 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
223 {
224 	return NULL;
225 }
226 
227 int
228 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
229 {
230 	return 0;
231 }
232 
233 int
234 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
235 {
236 	return 0;
237 }
238 
239 int
240 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
241 {
242 	return 0;
243 }
244 
245 void *
246 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
247 {
248 	return NULL;
249 }
250 
251 int
252 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
253 {
254 	return 0;
255 }
256 
257 struct spdk_nvme_qpair *
258 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
259 				     const struct spdk_nvme_io_qpair_opts *opts)
260 {
261 	struct spdk_nvme_qpair *qpair;
262 
263 	qpair = calloc(1, sizeof(*qpair));
264 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
265 
266 	qpair->ctrlr = ctrlr;
267 	qpair->id = qid;
268 	qpair->qprio = opts->qprio;
269 
270 	return qpair;
271 }
272 
273 void
274 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
275 {
276 	free(qpair);
277 }
278 
279 void
280 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
281 {
282 }
283 
284 int
285 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
286 {
287 	return 0;
288 }
289 
290 void
291 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
292 {
293 }
294 
295 void
296 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
297 {
298 }
299 
300 int
301 nvme_driver_init(void)
302 {
303 	return 0;
304 }
305 
306 int
307 nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
308 		struct spdk_nvme_ctrlr *ctrlr,
309 		enum spdk_nvme_qprio qprio,
310 		uint32_t num_requests, bool async)
311 {
312 	qpair->id = id;
313 	qpair->qprio = qprio;
314 	qpair->ctrlr = ctrlr;
315 	qpair->async = async;
316 
317 	return 0;
318 }
319 
320 static struct spdk_nvme_cpl fake_cpl = {};
321 static enum spdk_nvme_generic_command_status_code set_status_code = SPDK_NVME_SC_SUCCESS;
322 
323 static void
324 fake_cpl_sc(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
325 {
326 	fake_cpl.status.sc = set_status_code;
327 	cb_fn(cb_arg, &fake_cpl);
328 }
329 
330 static uint32_t g_ut_cdw11;
331 
332 int
333 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
334 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
335 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
336 {
337 	g_ut_cdw11 = cdw11;
338 	fake_cpl_sc(cb_fn, cb_arg);
339 	return 0;
340 }
341 
342 int
343 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
344 				uint32_t cdw11, void *payload, uint32_t payload_size,
345 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
346 {
347 	fake_cpl_sc(cb_fn, cb_arg);
348 	return 0;
349 }
350 
351 struct spdk_nvme_ana_page *g_ana_hdr;
352 struct spdk_nvme_ana_group_descriptor **g_ana_descs;
353 
354 int
355 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
356 				 uint32_t nsid, void *payload, uint32_t payload_size,
357 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
358 {
359 	if ((log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) && g_ana_hdr) {
360 		uint32_t i;
361 		uint8_t *ptr = payload;
362 
363 		memset(payload, 0, payload_size);
364 		memcpy(ptr, g_ana_hdr, sizeof(*g_ana_hdr));
365 		ptr += sizeof(*g_ana_hdr);
366 		for (i = 0; i < g_ana_hdr->num_ana_group_desc; ++i) {
367 			uint32_t desc_size = sizeof(**g_ana_descs) +
368 					     g_ana_descs[i]->num_of_nsid * sizeof(uint32_t);
369 			memcpy(ptr, g_ana_descs[i], desc_size);
370 			ptr += desc_size;
371 		}
372 	} else if (log_page == SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY) {
373 		struct spdk_nvme_intel_log_page_directory *log_page_directory = payload;
374 		log_page_directory->read_latency_log_len = true;
375 		log_page_directory->write_latency_log_len = true;
376 		log_page_directory->temperature_statistics_log_len = true;
377 		log_page_directory->smart_log_len = true;
378 		log_page_directory->marketing_description_log_len =  true;
379 	}
380 
381 	fake_cpl_sc(cb_fn, cb_arg);
382 	return 0;
383 }
384 
385 int
386 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
387 				     uint32_t nsid, void *payload, uint32_t payload_size,
388 				     uint64_t offset, uint32_t cdw10, uint32_t cdw11,
389 				     uint32_t cdw14, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
390 {
391 	fake_cpl_sc(cb_fn, cb_arg);
392 	return 0;
393 }
394 
395 int
396 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
397 {
398 	CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
399 	STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
400 
401 	/*
402 	 * For the purposes of this unit test, we don't need to bother emulating request submission.
403 	 */
404 
405 	return 0;
406 }
407 
408 static int32_t g_wait_for_completion_return_val;
409 
410 int32_t
411 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
412 {
413 	return g_wait_for_completion_return_val;
414 }
415 
416 void
417 nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
418 {
419 }
420 
421 
422 void
423 nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
424 {
425 	struct nvme_completion_poll_status	*status = arg;
426 	/* This should not happen it test env since this callback is always called
427 	 * before wait_for_completion_* while this field can only be set to true in
428 	 * wait_for_completion_* functions */
429 	CU_ASSERT(status->timed_out == false);
430 
431 	status->cpl = *cpl;
432 	status->done = true;
433 }
434 
435 static struct nvme_completion_poll_status *g_failed_status;
436 
437 int
438 nvme_wait_for_completion_robust_lock_timeout(
439 	struct spdk_nvme_qpair *qpair,
440 	struct nvme_completion_poll_status *status,
441 	pthread_mutex_t *robust_mutex,
442 	uint64_t timeout_in_usecs)
443 {
444 	if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
445 		g_failed_status = status;
446 		status->timed_out = true;
447 		return -1;
448 	}
449 
450 	status->done = true;
451 	if (set_status_cpl == 1) {
452 		status->cpl.status.sc = 1;
453 	}
454 	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
455 }
456 
457 int
458 nvme_wait_for_completion_robust_lock(
459 	struct spdk_nvme_qpair *qpair,
460 	struct nvme_completion_poll_status *status,
461 	pthread_mutex_t *robust_mutex)
462 {
463 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, robust_mutex, 0);
464 }
465 
466 int
467 nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
468 			 struct nvme_completion_poll_status *status)
469 {
470 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, 0);
471 }
472 
473 int
474 nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
475 				 struct nvme_completion_poll_status *status,
476 				 uint64_t timeout_in_usecs)
477 {
478 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, timeout_in_usecs);
479 }
480 
481 int
482 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
483 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
484 				      void *cb_arg)
485 {
486 	fake_cpl_sc(cb_fn, cb_arg);
487 	return 0;
488 }
489 
490 static uint32_t *g_active_ns_list = NULL;
491 static uint32_t g_active_ns_list_length = 0;
492 static struct spdk_nvme_ctrlr_data *g_cdata = NULL;
493 static bool g_fail_next_identify = false;
494 
495 int
496 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
497 			uint8_t csi, void *payload, size_t payload_size,
498 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
499 {
500 	if (g_fail_next_identify) {
501 		g_fail_next_identify = false;
502 		return 1;
503 	}
504 
505 	memset(payload, 0, payload_size);
506 	if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
507 		uint32_t count = 0;
508 		uint32_t i = 0;
509 		struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
510 
511 		if (g_active_ns_list == NULL) {
512 			for (i = 1; i <= ctrlr->cdata.nn; i++) {
513 				if (i <= nsid) {
514 					continue;
515 				}
516 
517 				ns_list->ns_list[count++] = i;
518 				if (count == SPDK_COUNTOF(ns_list->ns_list)) {
519 					break;
520 				}
521 			}
522 		} else {
523 			for (i = 0; i < g_active_ns_list_length; i++) {
524 				uint32_t cur_nsid = g_active_ns_list[i];
525 				if (cur_nsid <= nsid) {
526 					continue;
527 				}
528 
529 				ns_list->ns_list[count++] = cur_nsid;
530 				if (count == SPDK_COUNTOF(ns_list->ns_list)) {
531 					break;
532 				}
533 			}
534 		}
535 	} else if (cns == SPDK_NVME_IDENTIFY_CTRLR) {
536 		if (g_cdata) {
537 			memcpy(payload, g_cdata, sizeof(*g_cdata));
538 		}
539 	} else if (cns == SPDK_NVME_IDENTIFY_NS_IOCS) {
540 		return 0;
541 	}
542 
543 	fake_cpl_sc(cb_fn, cb_arg);
544 	return 0;
545 }
546 
547 int
548 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
549 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
550 {
551 	fake_cpl_sc(cb_fn, cb_arg);
552 	return 0;
553 }
554 
555 int
556 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
557 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
558 {
559 	CU_ASSERT(0);
560 	return -1;
561 }
562 
563 int
564 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
565 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
566 {
567 	return 0;
568 }
569 
570 int
571 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
572 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
573 {
574 	return 0;
575 }
576 
577 int
578 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
579 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
580 {
581 	fake_cpl_sc(cb_fn, cb_arg);
582 	return 0;
583 }
584 
585 int
586 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
587 			 void *cb_arg)
588 {
589 	return 0;
590 }
591 
592 int
593 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
594 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
595 {
596 	return 0;
597 }
598 
599 int
600 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
601 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
602 				   void *payload, uint32_t payload_size, uint32_t cdw12,
603 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
604 {
605 	return 0;
606 }
607 
608 int
609 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
610 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
611 				      void *payload, uint32_t payload_size, uint32_t cdw12,
612 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
613 {
614 	return 0;
615 }
616 
617 int
618 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
619 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
620 {
621 	CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
622 	if (fw_commit->fs == 0) {
623 		return -1;
624 	}
625 	set_status_cpl = 1;
626 	if (ctrlr->is_resetting == true) {
627 		set_status_cpl = 0;
628 	}
629 	return 0;
630 }
631 
632 int
633 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
634 				 uint32_t size, uint32_t offset, void *payload,
635 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
636 {
637 	if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
638 		return -1;
639 	}
640 	CU_ASSERT(offset == 0);
641 	return 0;
642 }
643 
644 bool
645 nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns)
646 {
647 	switch (ns->csi) {
648 	case SPDK_NVME_CSI_NVM:
649 		/*
650 		 * NVM Command Set Specific Identify Namespace data structure
651 		 * is currently all-zeroes, reserved for future use.
652 		 */
653 		return false;
654 	case SPDK_NVME_CSI_ZNS:
655 		return true;
656 	default:
657 		SPDK_WARNLOG("Unsupported CSI: %u for NSID: %u\n", ns->csi, ns->id);
658 		return false;
659 	}
660 }
661 
662 void
663 nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns)
664 {
665 	if (!ns->id) {
666 		return;
667 	}
668 
669 	if (ns->nsdata_zns) {
670 		spdk_free(ns->nsdata_zns);
671 		ns->nsdata_zns = NULL;
672 	}
673 }
674 
675 void
676 nvme_ns_free_nvm_specific_data(struct spdk_nvme_ns *ns)
677 {
678 	if (!ns->id) {
679 		return;
680 	}
681 
682 	if (ns->nsdata_nvm) {
683 		spdk_free(ns->nsdata_nvm);
684 		ns->nsdata_nvm = NULL;
685 	}
686 }
687 
688 void
689 nvme_ns_destruct(struct spdk_nvme_ns *ns)
690 {
691 }
692 
693 int
694 nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
695 		  struct spdk_nvme_ctrlr *ctrlr)
696 {
697 	return 0;
698 }
699 
700 void
701 spdk_pci_device_detach(struct spdk_pci_device *device)
702 {
703 }
704 
705 #define DECLARE_AND_CONSTRUCT_CTRLR()	\
706 	struct spdk_nvme_ctrlr	ctrlr = {};	\
707 	struct spdk_nvme_qpair	adminq = {};	\
708 	struct nvme_request	req;		\
709 						\
710 	STAILQ_INIT(&adminq.free_req);		\
711 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);	\
712 	ctrlr.adminq = &adminq;					\
713 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
714 
715 static void
716 test_nvme_ctrlr_init_en_1_rdy_0(void)
717 {
718 	DECLARE_AND_CONSTRUCT_CTRLR();
719 
720 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
721 
722 	/*
723 	 * Initial state: CC.EN = 1, CSTS.RDY = 0
724 	 */
725 	g_ut_nvme_regs.cc.bits.en = 1;
726 	g_ut_nvme_regs.csts.bits.rdy = 0;
727 
728 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
729 	ctrlr.cdata.nn = 1;
730 	ctrlr.page_size = 0x1000;
731 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
732 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
733 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
734 	}
735 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
736 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
737 
738 	/*
739 	 * Transition to CSTS.RDY = 1.
740 	 * init() should set CC.EN = 0.
741 	 */
742 	g_ut_nvme_regs.csts.bits.rdy = 1;
743 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
744 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_EN_0);
745 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
746 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
747 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
748 
749 	/*
750 	 * Transition to CSTS.RDY = 0.
751 	 */
752 	g_ut_nvme_regs.csts.bits.rdy = 0;
753 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
754 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
755 
756 	/*
757 	 * Start enabling the controller.
758 	 */
759 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
760 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
761 
762 	/*
763 	 * Transition to CC.EN = 1
764 	 */
765 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
766 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
767 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
768 
769 	/*
770 	 * Transition to CSTS.RDY = 1.
771 	 */
772 	g_ut_nvme_regs.csts.bits.rdy = 1;
773 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
774 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
775 
776 	/*
777 	 * Transition to READY.
778 	 */
779 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
780 		nvme_ctrlr_process_init(&ctrlr);
781 	}
782 
783 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
784 	nvme_ctrlr_destruct(&ctrlr);
785 }
786 
787 static void
788 test_nvme_ctrlr_init_en_1_rdy_1(void)
789 {
790 	DECLARE_AND_CONSTRUCT_CTRLR();
791 
792 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
793 
794 	/*
795 	 * Initial state: CC.EN = 1, CSTS.RDY = 1
796 	 * init() should set CC.EN = 0.
797 	 */
798 	g_ut_nvme_regs.cc.bits.en = 1;
799 	g_ut_nvme_regs.csts.bits.rdy = 1;
800 
801 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
802 	ctrlr.cdata.nn = 1;
803 	ctrlr.page_size = 0x1000;
804 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
805 	while (ctrlr.state != NVME_CTRLR_STATE_SET_EN_0) {
806 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
807 	}
808 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
809 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
810 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
811 
812 	/*
813 	 * Transition to CSTS.RDY = 0.
814 	 */
815 	g_ut_nvme_regs.csts.bits.rdy = 0;
816 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
817 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
818 
819 	/*
820 	 * Start enabling the controller.
821 	 */
822 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
823 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
824 
825 	/*
826 	 * Transition to CC.EN = 1
827 	 */
828 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
829 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
830 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
831 
832 	/*
833 	 * Transition to CSTS.RDY = 1.
834 	 */
835 	g_ut_nvme_regs.csts.bits.rdy = 1;
836 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
837 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
838 
839 	/*
840 	 * Transition to READY.
841 	 */
842 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
843 		nvme_ctrlr_process_init(&ctrlr);
844 	}
845 
846 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
847 	nvme_ctrlr_destruct(&ctrlr);
848 }
849 
850 static void
851 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
852 {
853 	DECLARE_AND_CONSTRUCT_CTRLR();
854 
855 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
856 
857 	/*
858 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
859 	 * init() should set CC.EN = 1.
860 	 */
861 	g_ut_nvme_regs.cc.bits.en = 0;
862 	g_ut_nvme_regs.csts.bits.rdy = 0;
863 
864 	/*
865 	 * Default round robin enabled
866 	 */
867 	g_ut_nvme_regs.cap.bits.ams = 0x0;
868 	ctrlr.cap = g_ut_nvme_regs.cap;
869 
870 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
871 	ctrlr.cdata.nn = 1;
872 	ctrlr.page_size = 0x1000;
873 	/*
874 	 * Case 1: default round robin arbitration mechanism selected
875 	 */
876 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
877 
878 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
879 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
880 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
881 	}
882 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
883 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
884 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
885 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
886 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
887 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
888 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
889 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
890 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
891 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
892 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
893 
894 	/*
895 	 * Complete and destroy the controller
896 	 */
897 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
898 	nvme_ctrlr_destruct(&ctrlr);
899 
900 	/*
901 	 * Reset to initial state
902 	 */
903 	g_ut_nvme_regs.cc.bits.en = 0;
904 	g_ut_nvme_regs.csts.bits.rdy = 0;
905 
906 	/*
907 	 * Case 2: weighted round robin arbitration mechanism selected
908 	 */
909 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
910 	ctrlr.cdata.nn = 1;
911 	ctrlr.page_size = 0x1000;
912 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
913 
914 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
915 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
916 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
917 	}
918 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
919 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
920 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
921 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
922 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
923 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
924 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
925 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
926 
927 	/*
928 	 * Complete and destroy the controller
929 	 */
930 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
931 	nvme_ctrlr_destruct(&ctrlr);
932 
933 	/*
934 	 * Reset to initial state
935 	 */
936 	g_ut_nvme_regs.cc.bits.en = 0;
937 	g_ut_nvme_regs.csts.bits.rdy = 0;
938 
939 	/*
940 	 * Case 3: vendor specific arbitration mechanism selected
941 	 */
942 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
943 	ctrlr.cdata.nn = 1;
944 	ctrlr.page_size = 0x1000;
945 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
946 
947 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
948 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
949 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
950 	}
951 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
952 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
953 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
954 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
955 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
956 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
957 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
958 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
959 
960 	/*
961 	 * Complete and destroy the controller
962 	 */
963 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
964 	nvme_ctrlr_destruct(&ctrlr);
965 
966 	/*
967 	 * Reset to initial state
968 	 */
969 	g_ut_nvme_regs.cc.bits.en = 0;
970 	g_ut_nvme_regs.csts.bits.rdy = 0;
971 
972 	/*
973 	 * Case 4: invalid arbitration mechanism selected
974 	 */
975 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
976 	ctrlr.cdata.nn = 1;
977 	ctrlr.page_size = 0x1000;
978 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
979 
980 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
981 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
982 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
983 	}
984 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
985 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
986 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
987 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
988 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
989 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
990 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
991 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
992 
993 	/*
994 	 * Complete and destroy the controller
995 	 */
996 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
997 	nvme_ctrlr_destruct(&ctrlr);
998 
999 	/*
1000 	 * Reset to initial state
1001 	 */
1002 	g_ut_nvme_regs.cc.bits.en = 0;
1003 	g_ut_nvme_regs.csts.bits.rdy = 0;
1004 
1005 	/*
1006 	 * Case 5: reset to default round robin arbitration mechanism
1007 	 */
1008 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1009 	ctrlr.cdata.nn = 1;
1010 	ctrlr.page_size = 0x1000;
1011 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1012 
1013 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1014 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1015 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1016 	}
1017 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1018 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1019 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1020 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1021 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1022 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1023 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1024 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1025 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1026 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1027 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1028 
1029 	/*
1030 	 * Transition to CSTS.RDY = 1.
1031 	 */
1032 	g_ut_nvme_regs.csts.bits.rdy = 1;
1033 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1034 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1035 
1036 	/*
1037 	 * Transition to READY.
1038 	 */
1039 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1040 		nvme_ctrlr_process_init(&ctrlr);
1041 	}
1042 
1043 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1044 	nvme_ctrlr_destruct(&ctrlr);
1045 }
1046 
1047 static void
1048 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
1049 {
1050 	DECLARE_AND_CONSTRUCT_CTRLR();
1051 
1052 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1053 
1054 	/*
1055 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1056 	 * init() should set CC.EN = 1.
1057 	 */
1058 	g_ut_nvme_regs.cc.bits.en = 0;
1059 	g_ut_nvme_regs.csts.bits.rdy = 0;
1060 
1061 	/*
1062 	 * Weighted round robin enabled
1063 	 */
1064 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
1065 	ctrlr.cap = g_ut_nvme_regs.cap;
1066 
1067 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1068 	ctrlr.cdata.nn = 1;
1069 	ctrlr.page_size = 0x1000;
1070 	/*
1071 	 * Case 1: default round robin arbitration mechanism selected
1072 	 */
1073 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1074 
1075 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1076 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1077 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1078 	}
1079 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1080 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1081 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1082 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1083 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1084 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1085 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1086 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1087 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1088 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1089 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1090 
1091 	/*
1092 	 * Complete and destroy the controller
1093 	 */
1094 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1095 	nvme_ctrlr_destruct(&ctrlr);
1096 
1097 	/*
1098 	 * Reset to initial state
1099 	 */
1100 	g_ut_nvme_regs.cc.bits.en = 0;
1101 	g_ut_nvme_regs.csts.bits.rdy = 0;
1102 
1103 	/*
1104 	 * Case 2: weighted round robin arbitration mechanism selected
1105 	 */
1106 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1107 	ctrlr.cdata.nn = 1;
1108 	ctrlr.page_size = 0x1000;
1109 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1110 
1111 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1112 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1113 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1114 	}
1115 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1116 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1117 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1118 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1119 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1120 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1121 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1122 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1123 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1124 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
1125 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
1126 
1127 	/*
1128 	 * Complete and destroy the controller
1129 	 */
1130 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1131 	nvme_ctrlr_destruct(&ctrlr);
1132 
1133 	/*
1134 	 * Reset to initial state
1135 	 */
1136 	g_ut_nvme_regs.cc.bits.en = 0;
1137 	g_ut_nvme_regs.csts.bits.rdy = 0;
1138 
1139 	/*
1140 	 * Case 3: vendor specific arbitration mechanism selected
1141 	 */
1142 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1143 	ctrlr.cdata.nn = 1;
1144 	ctrlr.page_size = 0x1000;
1145 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1146 
1147 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1148 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1149 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1150 	}
1151 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1152 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1153 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1154 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1155 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1156 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1157 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1158 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1159 
1160 	/*
1161 	 * Complete and destroy the controller
1162 	 */
1163 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1164 	nvme_ctrlr_destruct(&ctrlr);
1165 
1166 	/*
1167 	 * Reset to initial state
1168 	 */
1169 	g_ut_nvme_regs.cc.bits.en = 0;
1170 	g_ut_nvme_regs.csts.bits.rdy = 0;
1171 
1172 	/*
1173 	 * Case 4: invalid arbitration mechanism selected
1174 	 */
1175 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1176 	ctrlr.cdata.nn = 1;
1177 	ctrlr.page_size = 0x1000;
1178 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1179 
1180 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1181 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1182 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1183 	}
1184 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1185 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1186 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1187 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1188 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1189 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1190 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1191 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1192 
1193 	/*
1194 	 * Complete and destroy the controller
1195 	 */
1196 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1197 	nvme_ctrlr_destruct(&ctrlr);
1198 
1199 	/*
1200 	 * Reset to initial state
1201 	 */
1202 	g_ut_nvme_regs.cc.bits.en = 0;
1203 	g_ut_nvme_regs.csts.bits.rdy = 0;
1204 
1205 	/*
1206 	 * Case 5: reset to weighted round robin arbitration mechanism
1207 	 */
1208 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1209 	ctrlr.cdata.nn = 1;
1210 	ctrlr.page_size = 0x1000;
1211 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1212 
1213 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1214 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1215 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1216 	}
1217 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1218 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1219 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1220 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1221 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1222 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1223 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1224 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1225 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1226 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
1227 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
1228 
1229 	/*
1230 	 * Transition to CSTS.RDY = 1.
1231 	 */
1232 	g_ut_nvme_regs.csts.bits.rdy = 1;
1233 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1234 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1235 
1236 	/*
1237 	 * Transition to READY.
1238 	 */
1239 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1240 		nvme_ctrlr_process_init(&ctrlr);
1241 	}
1242 
1243 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1244 	nvme_ctrlr_destruct(&ctrlr);
1245 }
1246 static void
1247 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
1248 {
1249 	DECLARE_AND_CONSTRUCT_CTRLR();
1250 
1251 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1252 
1253 	/*
1254 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1255 	 * init() should set CC.EN = 1.
1256 	 */
1257 	g_ut_nvme_regs.cc.bits.en = 0;
1258 	g_ut_nvme_regs.csts.bits.rdy = 0;
1259 
1260 	/*
1261 	 * Default round robin enabled
1262 	 */
1263 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
1264 	ctrlr.cap = g_ut_nvme_regs.cap;
1265 
1266 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1267 	ctrlr.cdata.nn = 1;
1268 	ctrlr.page_size = 0x1000;
1269 	/*
1270 	 * Case 1: default round robin arbitration mechanism selected
1271 	 */
1272 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1273 
1274 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1275 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1276 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1277 	}
1278 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1279 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1280 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1281 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1282 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1283 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1284 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1285 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1286 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1287 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1288 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1289 
1290 	/*
1291 	 * Complete and destroy the controller
1292 	 */
1293 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1294 	nvme_ctrlr_destruct(&ctrlr);
1295 
1296 	/*
1297 	 * Reset to initial state
1298 	 */
1299 	g_ut_nvme_regs.cc.bits.en = 0;
1300 	g_ut_nvme_regs.csts.bits.rdy = 0;
1301 
1302 	/*
1303 	 * Case 2: weighted round robin arbitration mechanism selected
1304 	 */
1305 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1306 	ctrlr.cdata.nn = 1;
1307 	ctrlr.page_size = 0x1000;
1308 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1309 
1310 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1311 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1312 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1313 	}
1314 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1315 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1316 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1317 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1318 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1319 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1320 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1321 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1322 
1323 	/*
1324 	 * Complete and destroy the controller
1325 	 */
1326 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1327 	nvme_ctrlr_destruct(&ctrlr);
1328 
1329 	/*
1330 	 * Reset to initial state
1331 	 */
1332 	g_ut_nvme_regs.cc.bits.en = 0;
1333 	g_ut_nvme_regs.csts.bits.rdy = 0;
1334 
1335 	/*
1336 	 * Case 3: vendor specific arbitration mechanism selected
1337 	 */
1338 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1339 	ctrlr.cdata.nn = 1;
1340 	ctrlr.page_size = 0x1000;
1341 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1342 
1343 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1344 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1345 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1346 	}
1347 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1348 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1349 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1350 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1351 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1352 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1353 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1354 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1355 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1356 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1357 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1358 
1359 	/*
1360 	 * Complete and destroy the controller
1361 	 */
1362 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1363 	nvme_ctrlr_destruct(&ctrlr);
1364 
1365 	/*
1366 	 * Reset to initial state
1367 	 */
1368 	g_ut_nvme_regs.cc.bits.en = 0;
1369 	g_ut_nvme_regs.csts.bits.rdy = 0;
1370 
1371 	/*
1372 	 * Case 4: invalid arbitration mechanism selected
1373 	 */
1374 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1375 	ctrlr.cdata.nn = 1;
1376 	ctrlr.page_size = 0x1000;
1377 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1378 
1379 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1380 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1381 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1382 	}
1383 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1384 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1385 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1386 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1387 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1388 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1389 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1390 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1391 
1392 	/*
1393 	 * Complete and destroy the controller
1394 	 */
1395 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1396 	nvme_ctrlr_destruct(&ctrlr);
1397 
1398 	/*
1399 	 * Reset to initial state
1400 	 */
1401 	g_ut_nvme_regs.cc.bits.en = 0;
1402 	g_ut_nvme_regs.csts.bits.rdy = 0;
1403 
1404 	/*
1405 	 * Case 5: reset to vendor specific arbitration mechanism
1406 	 */
1407 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1408 	ctrlr.cdata.nn = 1;
1409 	ctrlr.page_size = 0x1000;
1410 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1411 
1412 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1413 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1414 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1415 	}
1416 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1417 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1418 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1419 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1420 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1421 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1422 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1423 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1424 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1425 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1426 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1427 
1428 	/*
1429 	 * Transition to CSTS.RDY = 1.
1430 	 */
1431 	g_ut_nvme_regs.csts.bits.rdy = 1;
1432 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1433 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1434 
1435 	/*
1436 	 * Transition to READY.
1437 	 */
1438 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1439 		nvme_ctrlr_process_init(&ctrlr);
1440 	}
1441 
1442 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1443 	nvme_ctrlr_destruct(&ctrlr);
1444 }
1445 
1446 static void
1447 test_nvme_ctrlr_init_en_0_rdy_0(void)
1448 {
1449 	DECLARE_AND_CONSTRUCT_CTRLR();
1450 
1451 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1452 
1453 	/*
1454 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1455 	 * init() should set CC.EN = 1.
1456 	 */
1457 	g_ut_nvme_regs.cc.bits.en = 0;
1458 	g_ut_nvme_regs.csts.bits.rdy = 0;
1459 
1460 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1461 	ctrlr.cdata.nn = 1;
1462 	ctrlr.page_size = 0x1000;
1463 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1464 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1465 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1466 	}
1467 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1468 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1469 
1470 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1471 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1472 
1473 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1474 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1475 
1476 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1477 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1478 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1479 
1480 	/*
1481 	 * Transition to CSTS.RDY = 1.
1482 	 */
1483 	g_ut_nvme_regs.csts.bits.rdy = 1;
1484 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1485 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1486 
1487 	/*
1488 	 * Transition to READY.
1489 	 */
1490 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1491 		nvme_ctrlr_process_init(&ctrlr);
1492 	}
1493 
1494 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1495 	nvme_ctrlr_destruct(&ctrlr);
1496 }
1497 
1498 static void
1499 test_nvme_ctrlr_init_en_0_rdy_1(void)
1500 {
1501 	DECLARE_AND_CONSTRUCT_CTRLR();
1502 
1503 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1504 
1505 	/*
1506 	 * Initial state: CC.EN = 0, CSTS.RDY = 1
1507 	 */
1508 	g_ut_nvme_regs.cc.bits.en = 0;
1509 	g_ut_nvme_regs.csts.bits.rdy = 1;
1510 
1511 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1512 	ctrlr.cdata.nn = 1;
1513 	ctrlr.page_size = 0x1000;
1514 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1515 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1516 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1517 	}
1518 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1519 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1520 
1521 	/*
1522 	 * Transition to CSTS.RDY = 0.
1523 	 */
1524 	g_ut_nvme_regs.csts.bits.rdy = 0;
1525 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1526 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1527 
1528 	/*
1529 	 * Start enabling the controller.
1530 	 */
1531 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1532 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1533 
1534 	/*
1535 	 * Transition to CC.EN = 1
1536 	 */
1537 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1538 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1539 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1540 
1541 	/*
1542 	 * Transition to CSTS.RDY = 1.
1543 	 */
1544 	g_ut_nvme_regs.csts.bits.rdy = 1;
1545 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1546 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1547 
1548 	/*
1549 	 * Transition to READY.
1550 	 */
1551 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1552 		nvme_ctrlr_process_init(&ctrlr);
1553 	}
1554 
1555 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1556 	nvme_ctrlr_destruct(&ctrlr);
1557 }
1558 
1559 static void
1560 setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
1561 {
1562 	uint32_t i;
1563 
1564 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
1565 
1566 	ctrlr->page_size = 0x1000;
1567 	ctrlr->opts.num_io_queues = num_io_queues;
1568 	ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
1569 	ctrlr->state = NVME_CTRLR_STATE_READY;
1570 	SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
1571 
1572 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1573 	for (i = 1; i <= num_io_queues; i++) {
1574 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1575 	}
1576 }
1577 
1578 static void
1579 cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
1580 {
1581 	nvme_ctrlr_destruct(ctrlr);
1582 }
1583 
1584 static void
1585 test_alloc_io_qpair_rr_1(void)
1586 {
1587 	struct spdk_nvme_io_qpair_opts opts;
1588 	struct spdk_nvme_ctrlr ctrlr = {};
1589 	struct spdk_nvme_qpair *q0;
1590 
1591 	setup_qpairs(&ctrlr, 1);
1592 
1593 	/*
1594 	 * Fake to simulate the controller with default round robin
1595 	 * arbitration mechanism.
1596 	 */
1597 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
1598 
1599 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1600 
1601 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1602 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1603 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1604 	/* Only 1 I/O qpair was allocated, so this should fail */
1605 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
1606 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1607 
1608 	/*
1609 	 * Now that the qpair has been returned to the free list,
1610 	 * we should be able to allocate it again.
1611 	 */
1612 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1613 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1614 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1615 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1616 
1617 	/* Only 0 qprio is acceptable for default round robin arbitration mechanism */
1618 	opts.qprio = 1;
1619 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1620 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1621 
1622 	opts.qprio = 2;
1623 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1624 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1625 
1626 	opts.qprio = 3;
1627 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1628 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1629 
1630 	/* Only 0 ~ 3 qprio is acceptable */
1631 	opts.qprio = 4;
1632 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1633 	opts.qprio = 0;
1634 
1635 	/* IO qpair can only be created when ctrlr is in READY state */
1636 	ctrlr.state = NVME_CTRLR_STATE_ENABLE;
1637 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1638 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1639 	ctrlr.state = NVME_CTRLR_STATE_READY;
1640 
1641 	cleanup_qpairs(&ctrlr);
1642 }
1643 
1644 static void
1645 test_alloc_io_qpair_wrr_1(void)
1646 {
1647 	struct spdk_nvme_io_qpair_opts opts;
1648 	struct spdk_nvme_ctrlr ctrlr = {};
1649 	struct spdk_nvme_qpair *q0, *q1;
1650 
1651 	setup_qpairs(&ctrlr, 2);
1652 
1653 	/*
1654 	 * Fake to simulate the controller with weighted round robin
1655 	 * arbitration mechanism.
1656 	 */
1657 	ctrlr.process_init_cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1658 
1659 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1660 
1661 	/*
1662 	 * Allocate 2 qpairs and free them
1663 	 */
1664 	opts.qprio = 0;
1665 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1666 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1667 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1668 
1669 	opts.qprio = 1;
1670 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1671 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1672 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1673 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1674 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1675 
1676 	/*
1677 	 * Allocate 2 qpairs and free them in the reverse order
1678 	 */
1679 	opts.qprio = 2;
1680 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1681 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1682 	SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
1683 
1684 	opts.qprio = 3;
1685 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1686 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1687 	SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
1688 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1689 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1690 
1691 	/* Only 0 ~ 3 qprio is acceptable */
1692 	opts.qprio = 4;
1693 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1694 
1695 	cleanup_qpairs(&ctrlr);
1696 }
1697 
1698 static void
1699 test_alloc_io_qpair_wrr_2(void)
1700 {
1701 	struct spdk_nvme_io_qpair_opts opts;
1702 	struct spdk_nvme_ctrlr ctrlr = {};
1703 	struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
1704 
1705 	setup_qpairs(&ctrlr, 4);
1706 
1707 	/*
1708 	 * Fake to simulate the controller with weighted round robin
1709 	 * arbitration mechanism.
1710 	 */
1711 	ctrlr.process_init_cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1712 
1713 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1714 
1715 	opts.qprio = 0;
1716 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1717 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1718 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1719 
1720 	opts.qprio = 1;
1721 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1722 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1723 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1724 
1725 	opts.qprio = 2;
1726 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1727 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1728 	SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
1729 
1730 	opts.qprio = 3;
1731 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1732 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1733 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1734 
1735 	/* Only 4 I/O qpairs was allocated, so this should fail */
1736 	opts.qprio = 0;
1737 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1738 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1739 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1740 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1741 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1742 
1743 	/*
1744 	 * Now that the qpair has been returned to the free list,
1745 	 * we should be able to allocate it again.
1746 	 *
1747 	 * Allocate 4 I/O qpairs and half of them with same qprio.
1748 	 */
1749 	opts.qprio = 1;
1750 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1751 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1752 	SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
1753 
1754 	opts.qprio = 1;
1755 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1756 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1757 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1758 
1759 	opts.qprio = 3;
1760 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1761 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1762 	SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
1763 
1764 	opts.qprio = 3;
1765 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1766 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1767 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1768 
1769 	/*
1770 	 * Free all I/O qpairs in reverse order
1771 	 */
1772 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1773 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1774 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1775 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1776 
1777 	cleanup_qpairs(&ctrlr);
1778 }
1779 
1780 bool g_connect_qpair_called = false;
1781 int g_connect_qpair_return_code = 0;
1782 int
1783 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1784 {
1785 	g_connect_qpair_called = true;
1786 	qpair->state = NVME_QPAIR_CONNECTED;
1787 	return g_connect_qpair_return_code;
1788 }
1789 
1790 static void
1791 test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
1792 {
1793 	struct spdk_nvme_ctrlr	ctrlr = {};
1794 	struct spdk_nvme_qpair	qpair = {};
1795 	int rc;
1796 
1797 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
1798 
1799 	/* Various states of controller disconnect. */
1800 	qpair.id = 1;
1801 	qpair.ctrlr = &ctrlr;
1802 	ctrlr.is_removed = 1;
1803 	ctrlr.is_failed = 0;
1804 	ctrlr.is_resetting = 0;
1805 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1806 	CU_ASSERT(rc == -ENODEV)
1807 
1808 	ctrlr.is_removed = 0;
1809 	ctrlr.is_failed = 1;
1810 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1811 	CU_ASSERT(rc == -ENXIO)
1812 
1813 	ctrlr.is_failed = 0;
1814 	ctrlr.is_resetting = 1;
1815 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1816 	CU_ASSERT(rc == -EAGAIN)
1817 
1818 	/* Confirm precedence for controller states: removed > resetting > failed */
1819 	ctrlr.is_removed = 1;
1820 	ctrlr.is_failed = 1;
1821 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1822 	CU_ASSERT(rc == -ENODEV)
1823 
1824 	ctrlr.is_removed = 0;
1825 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1826 	CU_ASSERT(rc == -EAGAIN)
1827 
1828 	ctrlr.is_resetting = 0;
1829 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1830 	CU_ASSERT(rc == -ENXIO)
1831 
1832 	/* qpair not failed. Make sure we don't call down to the transport */
1833 	ctrlr.is_failed = 0;
1834 	qpair.state = NVME_QPAIR_CONNECTED;
1835 	g_connect_qpair_called = false;
1836 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1837 	CU_ASSERT(g_connect_qpair_called == false);
1838 	CU_ASSERT(rc == 0)
1839 
1840 	/* transport qpair is failed. make sure we call down to the transport */
1841 	qpair.state = NVME_QPAIR_DISCONNECTED;
1842 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1843 	CU_ASSERT(g_connect_qpair_called == true);
1844 	CU_ASSERT(rc == 0)
1845 
1846 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
1847 }
1848 
1849 static void
1850 test_nvme_ctrlr_fail(void)
1851 {
1852 	struct spdk_nvme_ctrlr	ctrlr = {};
1853 
1854 	ctrlr.opts.num_io_queues = 0;
1855 	nvme_ctrlr_fail(&ctrlr, false);
1856 
1857 	CU_ASSERT(ctrlr.is_failed == true);
1858 }
1859 
1860 static void
1861 test_nvme_ctrlr_construct_intel_support_log_page_list(void)
1862 {
1863 	bool	res;
1864 	struct spdk_nvme_ctrlr				ctrlr = {};
1865 	struct spdk_nvme_intel_log_page_directory	payload = {};
1866 	struct spdk_pci_id				pci_id = {};
1867 
1868 	/* Get quirks for a device with all 0 vendor/device id */
1869 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1870 	CU_ASSERT(ctrlr.quirks == 0);
1871 
1872 	/* Set the vendor to Intel, but provide no device id */
1873 	pci_id.class_id = SPDK_PCI_CLASS_NVME;
1874 	ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1875 	payload.temperature_statistics_log_len = 1;
1876 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1877 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1878 
1879 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1880 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1881 	CU_ASSERT(res == true);
1882 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1883 	CU_ASSERT(res == true);
1884 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1885 	CU_ASSERT(res == false);
1886 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1887 	CU_ASSERT(res == false);
1888 
1889 	/* set valid vendor id, device id and sub device id */
1890 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1891 	payload.temperature_statistics_log_len = 0;
1892 	pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1893 	pci_id.device_id = 0x0953;
1894 	pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
1895 	pci_id.subdevice_id = 0x3702;
1896 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1897 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1898 
1899 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1900 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1901 	CU_ASSERT(res == true);
1902 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1903 	CU_ASSERT(res == false);
1904 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1905 	CU_ASSERT(res == true);
1906 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1907 	CU_ASSERT(res == false);
1908 }
1909 
1910 static void
1911 test_nvme_ctrlr_set_supported_features(void)
1912 {
1913 	bool	res;
1914 	struct spdk_nvme_ctrlr			ctrlr = {};
1915 
1916 	/* set a invalid vendor id */
1917 	ctrlr.cdata.vid = 0xFFFF;
1918 	nvme_ctrlr_set_supported_features(&ctrlr);
1919 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1920 	CU_ASSERT(res == true);
1921 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1922 	CU_ASSERT(res == false);
1923 
1924 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1925 	nvme_ctrlr_set_supported_features(&ctrlr);
1926 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1927 	CU_ASSERT(res == true);
1928 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1929 	CU_ASSERT(res == true);
1930 }
1931 
1932 static void
1933 test_nvme_ctrlr_set_host_feature(void)
1934 {
1935 	DECLARE_AND_CONSTRUCT_CTRLR();
1936 
1937 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1938 
1939 	ctrlr.cdata.ctratt.bits.elbas = 0;
1940 	ctrlr.state = NVME_CTRLR_STATE_SET_HOST_FEATURE;
1941 
1942 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1943 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_DB_BUF_CFG);
1944 
1945 	ctrlr.cdata.ctratt.bits.elbas = 1;
1946 	ctrlr.state = NVME_CTRLR_STATE_SET_HOST_FEATURE;
1947 
1948 	while (ctrlr.state != NVME_CTRLR_STATE_SET_DB_BUF_CFG) {
1949 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1950 	}
1951 
1952 	CU_ASSERT(ctrlr.tmp_ptr == NULL);
1953 	CU_ASSERT(ctrlr.feature_supported[SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT] == true);
1954 
1955 	nvme_ctrlr_destruct(&ctrlr);
1956 }
1957 
1958 static void
1959 test_ctrlr_get_default_ctrlr_opts(void)
1960 {
1961 	struct spdk_nvme_ctrlr_opts opts = {};
1962 
1963 	CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id, UT_HOSTID) == 0);
1964 
1965 	memset(&opts, 0, sizeof(opts));
1966 
1967 	/* set a smaller opts_size */
1968 	CU_ASSERT(sizeof(opts) > 8);
1969 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
1970 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1971 	CU_ASSERT_FALSE(opts.use_cmb_sqs);
1972 	/* check below fields are not initialized by default value */
1973 	CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
1974 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
1975 	CU_ASSERT_EQUAL(opts.io_queue_size, 0);
1976 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1977 	for (int i = 0; i < 8; i++) {
1978 		CU_ASSERT(opts.host_id[i] == 0);
1979 	}
1980 	for (int i = 0; i < 16; i++) {
1981 		CU_ASSERT(opts.extended_host_id[i] == 0);
1982 	}
1983 	CU_ASSERT(strlen(opts.hostnqn) == 0);
1984 	CU_ASSERT(strlen(opts.src_addr) == 0);
1985 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1986 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
1987 
1988 	/* set a consistent opts_size */
1989 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
1990 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1991 	CU_ASSERT_FALSE(opts.use_cmb_sqs);
1992 	CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
1993 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
1994 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1995 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1996 	for (int i = 0; i < 8; i++) {
1997 		CU_ASSERT(opts.host_id[i] == 0);
1998 	}
1999 	CU_ASSERT_STRING_EQUAL(opts.hostnqn,
2000 			       "nqn.2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
2001 	CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
2002 			 sizeof(opts.extended_host_id)) == 0);
2003 	CU_ASSERT(strlen(opts.src_addr) == 0);
2004 	CU_ASSERT(strlen(opts.src_svcid) == 0);
2005 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
2006 }
2007 
2008 static void
2009 test_ctrlr_get_default_io_qpair_opts(void)
2010 {
2011 	struct spdk_nvme_ctrlr ctrlr = {};
2012 	struct spdk_nvme_io_qpair_opts opts = {};
2013 
2014 	memset(&opts, 0, sizeof(opts));
2015 
2016 	/* set a smaller opts_size */
2017 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
2018 	CU_ASSERT(sizeof(opts) > 8);
2019 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
2020 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
2021 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
2022 	/* check below field is not initialized by default value */
2023 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
2024 
2025 	/* set a consistent opts_size */
2026 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
2027 	ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
2028 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
2029 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
2030 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
2031 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
2032 	CU_ASSERT_EQUAL(opts.delay_cmd_submit, false);
2033 	CU_ASSERT_EQUAL(opts.sq.vaddr, NULL);
2034 	CU_ASSERT_EQUAL(opts.sq.paddr, 0);
2035 	CU_ASSERT_EQUAL(opts.sq.buffer_size, 0);
2036 	CU_ASSERT_EQUAL(opts.cq.vaddr, NULL);
2037 	CU_ASSERT_EQUAL(opts.cq.paddr, 0);
2038 	CU_ASSERT_EQUAL(opts.cq.buffer_size, 0);
2039 	CU_ASSERT_EQUAL(opts.create_only, false);
2040 	CU_ASSERT_EQUAL(opts.async_mode, false);
2041 	CU_ASSERT_EQUAL(opts.disable_pcie_sgl_merge, false);
2042 	CU_ASSERT_EQUAL(opts.opts_size, sizeof(opts));
2043 }
2044 
2045 #if 0 /* TODO: move to PCIe-specific unit test */
2046 static void
2047 test_nvme_ctrlr_alloc_cmb(void)
2048 {
2049 	int			rc;
2050 	uint64_t		offset;
2051 	struct spdk_nvme_ctrlr	ctrlr = {};
2052 
2053 	ctrlr.cmb_size = 0x1000000;
2054 	ctrlr.cmb_current_offset = 0x100;
2055 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
2056 	CU_ASSERT(rc == 0);
2057 	CU_ASSERT(offset == 0x1000);
2058 	CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
2059 
2060 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
2061 	CU_ASSERT(rc == 0);
2062 	CU_ASSERT(offset == 0x2000);
2063 	CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
2064 
2065 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
2066 	CU_ASSERT(rc == 0);
2067 	CU_ASSERT(offset == 0x100000);
2068 	CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
2069 
2070 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
2071 	CU_ASSERT(rc == -1);
2072 }
2073 #endif
2074 
2075 static void
2076 test_spdk_nvme_ctrlr_update_firmware(void)
2077 {
2078 	struct spdk_nvme_ctrlr ctrlr = {};
2079 	void *payload = NULL;
2080 	int point_payload = 1;
2081 	int slot = 0;
2082 	int ret = 0;
2083 	struct spdk_nvme_status status;
2084 	enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
2085 
2086 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
2087 
2088 	/* Set invalid size check function return value */
2089 	set_size = 5;
2090 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2091 	CU_ASSERT(ret == -1);
2092 
2093 	/* When payload is NULL but set_size < min_page_size */
2094 	set_size = 4;
2095 	ctrlr.min_page_size = 5;
2096 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2097 	CU_ASSERT(ret == -1);
2098 
2099 	/* When payload not NULL but min_page_size is 0 */
2100 	set_size = 4;
2101 	ctrlr.min_page_size = 0;
2102 	payload = &point_payload;
2103 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2104 	CU_ASSERT(ret == -1);
2105 
2106 	/* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
2107 	set_status_cpl = 1;
2108 	set_size = 4;
2109 	ctrlr.min_page_size = 5;
2110 	payload = &point_payload;
2111 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2112 	CU_ASSERT(ret == -ENXIO);
2113 
2114 	/* Check firmware image download and set status.cpl value is 0 */
2115 	set_status_cpl = 0;
2116 	set_size = 4;
2117 	ctrlr.min_page_size = 5;
2118 	payload = &point_payload;
2119 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2120 	CU_ASSERT(ret == -1);
2121 
2122 	/* Check firmware commit */
2123 	ctrlr.is_resetting = false;
2124 	set_status_cpl = 0;
2125 	slot = 1;
2126 	set_size = 4;
2127 	ctrlr.min_page_size = 5;
2128 	payload = &point_payload;
2129 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2130 	CU_ASSERT(ret == -ENXIO);
2131 
2132 	/* Set size check firmware download and firmware commit */
2133 	ctrlr.is_resetting = true;
2134 	set_status_cpl = 0;
2135 	slot = 1;
2136 	set_size = 4;
2137 	ctrlr.min_page_size = 5;
2138 	payload = &point_payload;
2139 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2140 	CU_ASSERT(ret == 0);
2141 
2142 	/* nvme_wait_for_completion returns an error */
2143 	g_wait_for_completion_return_val = -1;
2144 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2145 	CU_ASSERT(ret == -ENXIO);
2146 	CU_ASSERT(g_failed_status != NULL);
2147 	CU_ASSERT(g_failed_status->timed_out == true);
2148 	/* status should be freed by callback, which is not triggered in test env.
2149 	   Store status to global variable and free it manually.
2150 	   If spdk_nvme_ctrlr_update_firmware changes its behaviour and frees the status
2151 	   itself, we'll get a double free here.. */
2152 	free(g_failed_status);
2153 	g_failed_status = NULL;
2154 	g_wait_for_completion_return_val = 0;
2155 
2156 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
2157 	set_status_cpl = 0;
2158 }
2159 
2160 int
2161 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
2162 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
2163 {
2164 	fake_cpl_sc(cb_fn, cb_arg);
2165 	return 0;
2166 }
2167 
2168 static void
2169 test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
2170 {
2171 	struct spdk_nvme_ctrlr ctrlr = {};
2172 	int ret = -1;
2173 
2174 	ctrlr.cdata.oacs.doorbell_buffer_config = 1;
2175 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2176 	ctrlr.page_size = 0x1000;
2177 	MOCK_CLEAR(spdk_malloc);
2178 	MOCK_CLEAR(spdk_zmalloc);
2179 	ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
2180 	CU_ASSERT(ret == 0);
2181 	nvme_ctrlr_free_doorbell_buffer(&ctrlr);
2182 }
2183 
2184 static void
2185 test_nvme_ctrlr_test_active_ns(void)
2186 {
2187 	uint32_t		nsid, minor;
2188 	size_t			ns_id_count;
2189 	struct spdk_nvme_ctrlr	ctrlr = {};
2190 	uint32_t		active_ns_list[1531];
2191 
2192 	for (nsid = 1; nsid <= 1531; nsid++) {
2193 		active_ns_list[nsid - 1] = nsid;
2194 	}
2195 
2196 	g_active_ns_list = active_ns_list;
2197 
2198 	ctrlr.page_size = 0x1000;
2199 
2200 	for (minor = 0; minor <= 2; minor++) {
2201 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2202 		ctrlr.state = NVME_CTRLR_STATE_READY;
2203 
2204 		ctrlr.vs.bits.mjr = 1;
2205 		ctrlr.vs.bits.mnr = minor;
2206 		ctrlr.vs.bits.ter = 0;
2207 		ctrlr.cdata.nn = 1531;
2208 
2209 		RB_INIT(&ctrlr.ns);
2210 
2211 		g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2212 		nvme_ctrlr_identify_active_ns(&ctrlr);
2213 
2214 		for (nsid = 1; nsid <= ctrlr.cdata.nn; nsid++) {
2215 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
2216 		}
2217 
2218 		for (; nsid <= 1559; nsid++) {
2219 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
2220 		}
2221 
2222 		g_active_ns_list_length = 0;
2223 		if (minor <= 1) {
2224 			ctrlr.cdata.nn = 0;
2225 		}
2226 		nvme_ctrlr_identify_active_ns(&ctrlr);
2227 		CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
2228 
2229 		g_active_ns_list_length = 1;
2230 		if (minor <= 1) {
2231 			ctrlr.cdata.nn = 1;
2232 		}
2233 		nvme_ctrlr_identify_active_ns(&ctrlr);
2234 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
2235 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
2236 		nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
2237 		CU_ASSERT(nsid == 1);
2238 
2239 		if (minor >= 2) {
2240 			/* For NVMe 1.2 and newer, the namespace list can have "holes" where
2241 			 * some namespaces are not active. Test this. */
2242 			g_active_ns_list_length = 2;
2243 			g_active_ns_list[1] = 3;
2244 			nvme_ctrlr_identify_active_ns(&ctrlr);
2245 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
2246 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
2247 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
2248 			nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
2249 			CU_ASSERT(nsid == 3);
2250 			nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
2251 			CU_ASSERT(nsid == 0);
2252 
2253 			/* Reset the active namespace list array */
2254 			g_active_ns_list[1] = 2;
2255 		}
2256 
2257 		g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2258 		if (minor <= 1) {
2259 			ctrlr.cdata.nn = 1531;
2260 		}
2261 		nvme_ctrlr_identify_active_ns(&ctrlr);
2262 
2263 		ns_id_count = 0;
2264 		for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
2265 		     nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
2266 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
2267 			ns_id_count++;
2268 		}
2269 		CU_ASSERT(ns_id_count == ctrlr.cdata.nn);
2270 
2271 		nvme_ctrlr_destruct(&ctrlr);
2272 	}
2273 
2274 	g_active_ns_list = NULL;
2275 	g_active_ns_list_length = 0;
2276 }
2277 
2278 static void
2279 test_nvme_ctrlr_test_active_ns_error_case(void)
2280 {
2281 	int rc;
2282 	struct spdk_nvme_ctrlr	ctrlr = {.state = NVME_CTRLR_STATE_READY};
2283 
2284 	ctrlr.page_size = 0x1000;
2285 	ctrlr.vs.bits.mjr = 1;
2286 	ctrlr.vs.bits.mnr = 2;
2287 	ctrlr.vs.bits.ter = 0;
2288 	ctrlr.cdata.nn = 2;
2289 
2290 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2291 	rc = nvme_ctrlr_identify_active_ns(&ctrlr);
2292 	CU_ASSERT(rc == -ENXIO);
2293 	set_status_code = SPDK_NVME_SC_SUCCESS;
2294 }
2295 
2296 static void
2297 test_nvme_ctrlr_init_delay(void)
2298 {
2299 	DECLARE_AND_CONSTRUCT_CTRLR();
2300 
2301 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
2302 
2303 	/*
2304 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
2305 	 * init() should set CC.EN = 1.
2306 	 */
2307 	g_ut_nvme_regs.cc.bits.en = 0;
2308 	g_ut_nvme_regs.csts.bits.rdy = 0;
2309 
2310 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2311 	/* Test that the initialization delay works correctly.  We only
2312 	 * do the initialization delay on SSDs that require it, so
2313 	 * set that quirk here.
2314 	 */
2315 	ctrlr.quirks = NVME_QUIRK_DELAY_BEFORE_INIT;
2316 	ctrlr.cdata.nn = 1;
2317 	ctrlr.page_size = 0x1000;
2318 	ctrlr.state = NVME_CTRLR_STATE_INIT_DELAY;
2319 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2320 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
2321 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
2322 
2323 	/* delay 1s, just return as sleep time isn't enough */
2324 	spdk_delay_us(1 * spdk_get_ticks_hz());
2325 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2326 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
2327 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
2328 
2329 	/* sleep timeout, start to initialize */
2330 	spdk_delay_us(2 * spdk_get_ticks_hz());
2331 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
2332 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2333 	}
2334 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2335 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
2336 
2337 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2338 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
2339 
2340 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2341 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
2342 
2343 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2344 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
2345 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
2346 
2347 	/*
2348 	 * Transition to CSTS.RDY = 1.
2349 	 */
2350 	g_ut_nvme_regs.csts.bits.rdy = 1;
2351 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2352 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
2353 
2354 	/*
2355 	 * Transition to READY.
2356 	 */
2357 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2358 		nvme_ctrlr_process_init(&ctrlr);
2359 	}
2360 
2361 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
2362 	nvme_ctrlr_destruct(&ctrlr);
2363 }
2364 
2365 static void
2366 test_spdk_nvme_ctrlr_set_trid(void)
2367 {
2368 	struct spdk_nvme_ctrlr ctrlr = {{0}};
2369 	struct spdk_nvme_transport_id new_trid = {{0}};
2370 
2371 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
2372 
2373 	ctrlr.is_failed = false;
2374 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2375 	snprintf(ctrlr.trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
2376 	snprintf(ctrlr.trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
2377 	snprintf(ctrlr.trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
2378 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EPERM);
2379 
2380 	ctrlr.is_failed = true;
2381 	new_trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2382 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
2383 	CU_ASSERT(ctrlr.trid.trtype == SPDK_NVME_TRANSPORT_RDMA);
2384 
2385 	new_trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2386 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode2");
2387 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
2388 	CU_ASSERT(strncmp(ctrlr.trid.subnqn, "nqn.2016-06.io.spdk:cnode1", SPDK_NVMF_NQN_MAX_LEN) == 0);
2389 
2390 
2391 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
2392 	snprintf(new_trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
2393 	snprintf(new_trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4421");
2394 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == 0);
2395 	CU_ASSERT(strncmp(ctrlr.trid.traddr, "192.168.100.9", SPDK_NVMF_TRADDR_MAX_LEN) == 0);
2396 	CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
2397 
2398 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
2399 }
2400 
2401 static void
2402 test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
2403 {
2404 	struct spdk_nvme_ctrlr_data cdata = {};
2405 	DECLARE_AND_CONSTRUCT_CTRLR();
2406 	/* equivalent of 4096 bytes */
2407 	cdata.nvmf_specific.ioccsz = 260;
2408 	cdata.nvmf_specific.icdoff = 1;
2409 	g_cdata = &cdata;
2410 
2411 	/* Check PCI trtype, */
2412 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2413 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2414 
2415 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2416 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2417 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2418 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2419 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2420 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2421 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2422 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2423 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2424 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2425 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2426 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2427 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2428 
2429 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2430 	CU_ASSERT(ctrlr.icdoff == 0);
2431 
2432 	nvme_ctrlr_destruct(&ctrlr);
2433 
2434 	/* Check RDMA trtype, */
2435 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2436 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2437 
2438 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2439 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2440 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2441 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2442 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2443 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2444 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2445 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2446 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2447 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2448 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2449 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2450 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2451 
2452 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2453 	CU_ASSERT(ctrlr.icdoff == 1);
2454 	ctrlr.ioccsz_bytes = 0;
2455 	ctrlr.icdoff = 0;
2456 
2457 	nvme_ctrlr_destruct(&ctrlr);
2458 
2459 	/* Check TCP trtype, */
2460 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2461 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2462 
2463 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2464 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2465 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2466 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2467 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2468 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2469 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2470 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2471 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2472 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2473 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2474 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2475 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2476 
2477 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2478 	CU_ASSERT(ctrlr.icdoff == 1);
2479 	ctrlr.ioccsz_bytes = 0;
2480 	ctrlr.icdoff = 0;
2481 
2482 	nvme_ctrlr_destruct(&ctrlr);
2483 
2484 	/* Check FC trtype, */
2485 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2486 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
2487 
2488 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2489 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2490 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2491 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2492 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2493 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2494 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2495 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2496 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2497 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2498 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2499 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2500 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2501 
2502 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2503 	CU_ASSERT(ctrlr.icdoff == 1);
2504 	ctrlr.ioccsz_bytes = 0;
2505 	ctrlr.icdoff = 0;
2506 
2507 	nvme_ctrlr_destruct(&ctrlr);
2508 
2509 	/* Check CUSTOM trtype, */
2510 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2511 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
2512 
2513 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2514 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2515 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2516 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2517 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2518 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2519 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2520 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2521 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2522 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2523 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2524 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2525 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2526 
2527 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2528 	CU_ASSERT(ctrlr.icdoff == 0);
2529 
2530 	nvme_ctrlr_destruct(&ctrlr);
2531 
2532 	/* Check CUSTOM_FABRICS trtype, */
2533 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2534 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM_FABRICS;
2535 
2536 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2537 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2538 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2539 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2540 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2541 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2542 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2543 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2544 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2545 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2546 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2547 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2548 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2549 
2550 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2551 	CU_ASSERT(ctrlr.icdoff == 1);
2552 	ctrlr.ioccsz_bytes = 0;
2553 	ctrlr.icdoff = 0;
2554 
2555 	nvme_ctrlr_destruct(&ctrlr);
2556 
2557 	g_cdata = NULL;
2558 }
2559 
2560 static void
2561 test_nvme_ctrlr_init_set_num_queues(void)
2562 {
2563 	DECLARE_AND_CONSTRUCT_CTRLR();
2564 
2565 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2566 
2567 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2568 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2569 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2570 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2571 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2572 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2573 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2574 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2575 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2576 
2577 	ctrlr.opts.num_io_queues = 64;
2578 	/* Num queues is zero-based. So, use 31 to get 32 queues */
2579 	fake_cpl.cdw0 = 31 + (31 << 16);
2580 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_ACTIVE_NS */
2581 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2582 	CU_ASSERT(ctrlr.opts.num_io_queues == 32);
2583 	fake_cpl.cdw0 = 0;
2584 
2585 	nvme_ctrlr_destruct(&ctrlr);
2586 }
2587 
2588 static void
2589 test_nvme_ctrlr_init_set_keep_alive_timeout(void)
2590 {
2591 	DECLARE_AND_CONSTRUCT_CTRLR();
2592 
2593 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2594 
2595 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2596 	ctrlr.cdata.kas = 1;
2597 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2598 	fake_cpl.cdw0 = 120000;
2599 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_IOCS_SPECIFIC */
2600 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2601 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 120000);
2602 	fake_cpl.cdw0 = 0;
2603 
2604 	/* Target does not support Get Feature "Keep Alive Timer" */
2605 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2606 	ctrlr.cdata.kas = 1;
2607 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2608 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2609 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_IOCS_SPECIFIC */
2610 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2611 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 60000);
2612 	set_status_code = SPDK_NVME_SC_SUCCESS;
2613 
2614 	/* Target fails Get Feature "Keep Alive Timer" for another reason */
2615 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2616 	ctrlr.cdata.kas = 1;
2617 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2618 	set_status_code = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2619 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> ERROR */
2620 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
2621 	set_status_code = SPDK_NVME_SC_SUCCESS;
2622 
2623 	nvme_ctrlr_destruct(&ctrlr);
2624 }
2625 
2626 static void
2627 test_alloc_io_qpair_fail(void)
2628 {
2629 	struct spdk_nvme_ctrlr ctrlr = {};
2630 	struct spdk_nvme_qpair *q0;
2631 
2632 	setup_qpairs(&ctrlr, 1);
2633 
2634 	/* Modify the connect_qpair return code to inject a failure */
2635 	g_connect_qpair_return_code = 1;
2636 
2637 	/* Attempt to allocate a qpair, this should fail */
2638 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
2639 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
2640 
2641 	/* Verify that the qpair is removed from the lists */
2642 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.active_io_qpairs));
2643 
2644 	g_connect_qpair_return_code = 0;
2645 	cleanup_qpairs(&ctrlr);
2646 }
2647 
2648 static void
2649 test_nvme_ctrlr_add_remove_process(void)
2650 {
2651 	struct spdk_nvme_ctrlr ctrlr = {};
2652 	void *devhandle = (void *)0xDEADBEEF;
2653 	struct spdk_nvme_ctrlr_process *proc = NULL;
2654 	int rc;
2655 
2656 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2657 	TAILQ_INIT(&ctrlr.active_procs);
2658 
2659 	rc = nvme_ctrlr_add_process(&ctrlr, devhandle);
2660 	CU_ASSERT(rc == 0);
2661 	proc = TAILQ_FIRST(&ctrlr.active_procs);
2662 	SPDK_CU_ASSERT_FATAL(proc != NULL);
2663 	CU_ASSERT(proc->is_primary == true);
2664 	CU_ASSERT(proc->pid == getpid());
2665 	CU_ASSERT(proc->devhandle == (void *)0xDEADBEEF);
2666 	CU_ASSERT(proc->ref == 0);
2667 
2668 	nvme_ctrlr_remove_process(&ctrlr, proc);
2669 	CU_ASSERT(TAILQ_EMPTY(&ctrlr.active_procs));
2670 }
2671 
2672 static void
2673 test_nvme_ctrlr_set_arbitration_feature(void)
2674 {
2675 	struct spdk_nvme_ctrlr ctrlr = {};
2676 
2677 	ctrlr.opts.arbitration_burst = 6;
2678 	ctrlr.flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
2679 	ctrlr.opts.low_priority_weight = 1;
2680 	ctrlr.opts.medium_priority_weight = 2;
2681 	ctrlr.opts.high_priority_weight = 3;
2682 	/* g_ut_cdw11 used to record value command feature set. */
2683 	g_ut_cdw11 = 0;
2684 
2685 	/* arbitration_burst count available. */
2686 	nvme_ctrlr_set_arbitration_feature(&ctrlr);
2687 	CU_ASSERT((uint8_t)g_ut_cdw11 == 6);
2688 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 8) == 1);
2689 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 16) == 2);
2690 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 24) == 3);
2691 
2692 	/* arbitration_burst unavailable. */
2693 	g_ut_cdw11 = 0;
2694 	ctrlr.opts.arbitration_burst = 8;
2695 
2696 	nvme_ctrlr_set_arbitration_feature(&ctrlr);
2697 	CU_ASSERT(g_ut_cdw11 == 0);
2698 }
2699 
2700 static void
2701 test_nvme_ctrlr_set_state(void)
2702 {
2703 	struct spdk_nvme_ctrlr ctrlr = {};
2704 	MOCK_SET(spdk_get_ticks, 0);
2705 
2706 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 1000);
2707 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2708 	CU_ASSERT(ctrlr.state_timeout_tsc == 1000000);
2709 
2710 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 0);
2711 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2712 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
2713 
2714 	/* Time out ticks causes integer overflow. */
2715 	MOCK_SET(spdk_get_ticks, UINT64_MAX);
2716 
2717 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 1000);
2718 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2719 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
2720 	MOCK_CLEAR(spdk_get_ticks);
2721 }
2722 
2723 static void
2724 test_nvme_ctrlr_active_ns_list_v0(void)
2725 {
2726 	DECLARE_AND_CONSTRUCT_CTRLR();
2727 
2728 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2729 
2730 	ctrlr.vs.bits.mjr = 1;
2731 	ctrlr.vs.bits.mnr = 0;
2732 	ctrlr.vs.bits.ter = 0;
2733 	ctrlr.cdata.nn = 1024;
2734 
2735 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2736 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2737 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2738 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2739 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2740 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2741 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2742 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 1024);
2743 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2744 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2745 
2746 	nvme_ctrlr_destruct(&ctrlr);
2747 }
2748 
2749 static void
2750 test_nvme_ctrlr_active_ns_list_v2(void)
2751 {
2752 	uint32_t i;
2753 	uint32_t active_ns_list[1024];
2754 	DECLARE_AND_CONSTRUCT_CTRLR();
2755 
2756 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2757 
2758 	ctrlr.vs.bits.mjr = 1;
2759 	ctrlr.vs.bits.mnr = 2;
2760 	ctrlr.vs.bits.ter = 0;
2761 	ctrlr.cdata.nn = 4096;
2762 
2763 	g_active_ns_list = active_ns_list;
2764 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2765 
2766 	/* No active namespaces */
2767 	memset(active_ns_list, 0, sizeof(active_ns_list));
2768 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2769 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2770 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2771 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2772 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2773 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2774 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
2775 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2776 
2777 	nvme_ctrlr_destruct(&ctrlr);
2778 
2779 	/* 1024 active namespaces - one full page */
2780 	memset(active_ns_list, 0, sizeof(active_ns_list));
2781 	for (i = 0; i < 1024; ++i) {
2782 		active_ns_list[i] = i + 1;
2783 	}
2784 
2785 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2786 
2787 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2788 	g_active_ns_list = active_ns_list;
2789 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2790 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2791 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2792 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2793 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2794 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2795 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2796 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 1024);
2797 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2798 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2799 
2800 	nvme_ctrlr_destruct(&ctrlr);
2801 
2802 	/* 1023 active namespaces - full page minus one	 */
2803 	memset(active_ns_list, 0, sizeof(active_ns_list));
2804 	for (i = 0; i < 1023; ++i) {
2805 		active_ns_list[i] = i + 1;
2806 	}
2807 
2808 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2809 
2810 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2811 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2812 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2813 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2814 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1023));
2815 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2816 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2817 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2818 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 0);
2819 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2820 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2821 
2822 	nvme_ctrlr_destruct(&ctrlr);
2823 
2824 	g_active_ns_list = NULL;
2825 	g_active_ns_list_length = 0;
2826 }
2827 
2828 static void
2829 test_nvme_ctrlr_ns_mgmt(void)
2830 {
2831 	DECLARE_AND_CONSTRUCT_CTRLR();
2832 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2833 	uint32_t active_ns_list2[] = { 1, 2, 3, 100, 1024 };
2834 	struct spdk_nvme_ns_data nsdata = {};
2835 	struct spdk_nvme_ctrlr_list ctrlr_list = {};
2836 	uint32_t nsid;
2837 
2838 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2839 
2840 	ctrlr.vs.bits.mjr = 1;
2841 	ctrlr.vs.bits.mnr = 2;
2842 	ctrlr.vs.bits.ter = 0;
2843 	ctrlr.cdata.nn = 4096;
2844 
2845 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2846 	g_active_ns_list = active_ns_list;
2847 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2848 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2849 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2850 	}
2851 
2852 	fake_cpl.cdw0 = 3;
2853 	nsid = spdk_nvme_ctrlr_create_ns(&ctrlr, &nsdata);
2854 	fake_cpl.cdw0 = 0;
2855 	CU_ASSERT(nsid == 3);
2856 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2857 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2858 
2859 	g_active_ns_list = active_ns_list2;
2860 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
2861 	CU_ASSERT(spdk_nvme_ctrlr_attach_ns(&ctrlr, 3, &ctrlr_list) == 0);
2862 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2863 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2864 
2865 	g_active_ns_list = active_ns_list;
2866 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2867 	CU_ASSERT(spdk_nvme_ctrlr_detach_ns(&ctrlr, 3, &ctrlr_list) == 0);
2868 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2869 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2870 
2871 	CU_ASSERT(spdk_nvme_ctrlr_delete_ns(&ctrlr, 3) == 0);
2872 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2873 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2874 	g_active_ns_list = NULL;
2875 	g_active_ns_list_length = 0;
2876 
2877 	nvme_ctrlr_destruct(&ctrlr);
2878 }
2879 
2880 static void
2881 check_en_set_rdy(void)
2882 {
2883 	if (g_ut_nvme_regs.cc.bits.en == 1) {
2884 		g_ut_nvme_regs.csts.bits.rdy = 1;
2885 	}
2886 }
2887 
2888 static void
2889 test_nvme_ctrlr_reset(void)
2890 {
2891 	DECLARE_AND_CONSTRUCT_CTRLR();
2892 	struct spdk_nvme_ctrlr_data cdata = { .nn = 4096 };
2893 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2894 	uint32_t active_ns_list2[] = { 1, 100, 1024 };
2895 
2896 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2897 
2898 	g_ut_nvme_regs.vs.bits.mjr = 1;
2899 	g_ut_nvme_regs.vs.bits.mnr = 2;
2900 	g_ut_nvme_regs.vs.bits.ter = 0;
2901 	nvme_ctrlr_get_vs(&ctrlr, &ctrlr.vs);
2902 	ctrlr.cdata.nn = 2048;
2903 
2904 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2905 	g_active_ns_list = active_ns_list;
2906 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2907 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2908 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2909 	}
2910 	CU_ASSERT(spdk_nvme_ctrlr_get_num_ns(&ctrlr) == 2048);
2911 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 2) != NULL);
2912 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2));
2913 
2914 	/* Reset controller with changed number of namespaces */
2915 	g_cdata = &cdata;
2916 	g_active_ns_list = active_ns_list2;
2917 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
2918 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);
2919 	g_ut_nvme_regs.cc.raw = 0;
2920 	g_ut_nvme_regs.csts.raw = 0;
2921 	g_set_reg_cb = check_en_set_rdy;
2922 	g_wait_for_completion_return_val = -ENXIO;
2923 	CU_ASSERT(spdk_nvme_ctrlr_reset(&ctrlr) == 0);
2924 	g_set_reg_cb = NULL;
2925 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
2926 	g_cdata = NULL;
2927 	g_active_ns_list = NULL;
2928 	g_active_ns_list_length = 0;
2929 
2930 	CU_ASSERT(spdk_nvme_ctrlr_get_num_ns(&ctrlr) == 4096);
2931 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 2) != NULL);
2932 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2));
2933 
2934 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
2935 	nvme_ctrlr_destruct(&ctrlr);
2936 
2937 	g_wait_for_completion_return_val = 0;
2938 }
2939 
2940 static uint32_t g_aer_cb_counter;
2941 
2942 static void
2943 aer_cb(void *aer_cb_arg, const struct spdk_nvme_cpl *cpl)
2944 {
2945 	g_aer_cb_counter++;
2946 }
2947 
2948 static void
2949 test_nvme_ctrlr_aer_callback(void)
2950 {
2951 	DECLARE_AND_CONSTRUCT_CTRLR();
2952 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2953 	union spdk_nvme_async_event_completion	aer_event = {
2954 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
2955 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
2956 	};
2957 	struct spdk_nvme_cpl aer_cpl = {
2958 		.status.sct = SPDK_NVME_SCT_GENERIC,
2959 		.status.sc = SPDK_NVME_SC_SUCCESS,
2960 		.cdw0 = aer_event.raw
2961 	};
2962 
2963 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2964 
2965 	ctrlr.vs.bits.mjr = 1;
2966 	ctrlr.vs.bits.mnr = 2;
2967 	ctrlr.vs.bits.ter = 0;
2968 	ctrlr.cdata.nn = 4096;
2969 
2970 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
2971 	g_active_ns_list = active_ns_list;
2972 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2973 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2974 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2975 	}
2976 
2977 	CU_ASSERT(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
2978 	spdk_nvme_ctrlr_register_aer_callback(&ctrlr, aer_cb, NULL);
2979 
2980 	/* Async event */
2981 	g_aer_cb_counter = 0;
2982 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
2983 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
2984 	CU_ASSERT(g_aer_cb_counter == 1);
2985 	g_active_ns_list = NULL;
2986 	g_active_ns_list_length = 0;
2987 
2988 	nvme_ctrlr_free_processes(&ctrlr);
2989 	nvme_ctrlr_destruct(&ctrlr);
2990 }
2991 
2992 static void
2993 test_nvme_ctrlr_ns_attr_changed(void)
2994 {
2995 	DECLARE_AND_CONSTRUCT_CTRLR();
2996 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2997 	uint32_t active_ns_list2[] = { 1, 2, 1024 };
2998 	uint32_t active_ns_list3[] = { 1, 2, 101, 1024 };
2999 	union spdk_nvme_async_event_completion	aer_event = {
3000 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
3001 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
3002 	};
3003 	struct spdk_nvme_cpl aer_cpl = {
3004 		.status.sct = SPDK_NVME_SCT_GENERIC,
3005 		.status.sc = SPDK_NVME_SC_SUCCESS,
3006 		.cdw0 = aer_event.raw
3007 	};
3008 
3009 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3010 
3011 	ctrlr.vs.bits.mjr = 1;
3012 	ctrlr.vs.bits.mnr = 3;
3013 	ctrlr.vs.bits.ter = 0;
3014 	ctrlr.cap.bits.css |= SPDK_NVME_CAP_CSS_IOCS;
3015 	ctrlr.cdata.nn = 4096;
3016 
3017 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
3018 	g_active_ns_list = active_ns_list;
3019 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
3020 
3021 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
3022 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3023 	}
3024 
3025 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 100));
3026 
3027 	CU_ASSERT(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
3028 	spdk_nvme_ctrlr_register_aer_callback(&ctrlr, aer_cb, NULL);
3029 
3030 	/* Remove NS 100 */
3031 	g_aer_cb_counter = 0;
3032 	g_active_ns_list = active_ns_list2;
3033 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
3034 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3035 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3036 	CU_ASSERT(g_aer_cb_counter == 1);
3037 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 100));
3038 
3039 	/* Add NS 101 */
3040 	g_active_ns_list = active_ns_list3;
3041 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list3);
3042 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3043 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3044 	CU_ASSERT(g_aer_cb_counter == 2);
3045 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 101));
3046 
3047 	g_active_ns_list = NULL;
3048 	g_active_ns_list_length = 0;
3049 	nvme_ctrlr_free_processes(&ctrlr);
3050 	nvme_ctrlr_destruct(&ctrlr);
3051 }
3052 
3053 static void
3054 test_nvme_ctrlr_identify_namespaces_iocs_specific_next(void)
3055 {
3056 	struct spdk_nvme_ctrlr ctrlr = {};
3057 	uint32_t prev_nsid;
3058 	struct spdk_nvme_ns ns[5] = {};
3059 	struct spdk_nvme_ctrlr ns_ctrlr[5] = {};
3060 	int rc = 0;
3061 	int i;
3062 
3063 	RB_INIT(&ctrlr.ns);
3064 	for (i = 0; i < 5; i++) {
3065 		ns[i].id = i + 1;
3066 		ns[i].active = true;
3067 	}
3068 
3069 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
3070 
3071 	ctrlr.cdata.nn = 5;
3072 	/* case 1: No first/next active NS, move on to the next state, expect: pass */
3073 	prev_nsid = 0;
3074 	ctrlr.active_ns_count = 0;
3075 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3076 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3077 	CU_ASSERT(rc == 0);
3078 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES);
3079 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3080 
3081 	/* case 2: move on to the next active NS, and no namespace with (supported) iocs specific data found , expect: pass */
3082 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3083 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3084 	prev_nsid = 1;
3085 	for (i = 0; i < 5; i++) {
3086 		RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns[i]);
3087 	}
3088 	ctrlr.active_ns_count = 5;
3089 	ns[1].csi = SPDK_NVME_CSI_NVM;
3090 	ns[1].id = 2;
3091 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3092 	CU_ASSERT(rc == 0);
3093 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES);
3094 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3095 
3096 	/* case 3: ns.csi is SPDK_NVME_CSI_ZNS, do not loop, expect: pass */
3097 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3098 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3099 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3100 	prev_nsid = 0;
3101 	ctrlr.active_ns_count = 5;
3102 
3103 	for (int i = 0; i < 5; i++) {
3104 		ns[i].csi = SPDK_NVME_CSI_NVM;
3105 		ns[i].id = i + 1;
3106 		ns[i].ctrlr = &ns_ctrlr[i];
3107 	}
3108 	ns[4].csi = SPDK_NVME_CSI_ZNS;
3109 	ns_ctrlr[4].opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3110 
3111 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3112 	CU_ASSERT(rc == 0);
3113 	CU_ASSERT(ctrlr.state == 0);
3114 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3115 	CU_ASSERT(ns_ctrlr[4].state == NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC);
3116 	CU_ASSERT(ns_ctrlr[4].state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3117 
3118 	for (int i = 0; i < 5; i++) {
3119 		nvme_ns_free_zns_specific_data(&ns[i]);
3120 	}
3121 
3122 	/* case 4: nvme_ctrlr_identify_ns_iocs_specific_async return 1, expect: false */
3123 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3124 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3125 	prev_nsid = 1;
3126 	ctrlr.active_ns_count = 5;
3127 	ns[1].csi = SPDK_NVME_CSI_ZNS;
3128 	g_fail_next_identify = true;
3129 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3130 	CU_ASSERT(rc == 1);
3131 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
3132 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3133 
3134 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
3135 }
3136 
3137 static void
3138 test_nvme_ctrlr_set_supported_log_pages(void)
3139 {
3140 	int rc;
3141 	struct spdk_nvme_ctrlr ctrlr = {};
3142 
3143 	/* ana supported */
3144 	memset(&ctrlr, 0, sizeof(ctrlr));
3145 	ctrlr.cdata.cmic.ana_reporting = true;
3146 	ctrlr.cdata.lpa.celp = 1;
3147 	ctrlr.cdata.nanagrpid = 1;
3148 	ctrlr.active_ns_count = 1;
3149 
3150 	rc = nvme_ctrlr_set_supported_log_pages(&ctrlr);
3151 	CU_ASSERT(rc == 0);
3152 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ERROR] == true);
3153 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] == true);
3154 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] == true);
3155 	CU_ASSERT(ctrlr.ana_log_page_size == sizeof(struct spdk_nvme_ana_page) +
3156 		  sizeof(struct spdk_nvme_ana_group_descriptor) * 1 + sizeof(uint32_t) * 1);
3157 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS] == true);
3158 	free(ctrlr.ana_log_page);
3159 	free(ctrlr.copied_ana_desc);
3160 }
3161 
3162 static void
3163 test_nvme_ctrlr_set_intel_supported_log_pages(void)
3164 {
3165 	DECLARE_AND_CONSTRUCT_CTRLR();
3166 
3167 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3168 
3169 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3170 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
3171 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
3172 	ctrlr.state = NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES;
3173 
3174 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
3175 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES);
3176 
3177 	set_status_code = SPDK_NVME_SC_SUCCESS;
3178 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
3179 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES);
3180 
3181 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ERROR] == true);
3182 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] == true);
3183 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] == true);
3184 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] == true);
3185 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] == true);
3186 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] == true);
3187 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_SMART] == true);
3188 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] == true);
3189 
3190 	nvme_ctrlr_destruct(&ctrlr);
3191 }
3192 
3193 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
3194 				 sizeof(uint32_t))
3195 static void
3196 test_nvme_ctrlr_parse_ana_log_page(void)
3197 {
3198 	int rc, i;
3199 	struct spdk_nvme_ctrlr ctrlr = {};
3200 	struct spdk_nvme_ns ns[3] = {};
3201 	struct spdk_nvme_ana_page ana_hdr;
3202 	char _ana_desc[UT_ANA_DESC_SIZE];
3203 	struct spdk_nvme_ana_group_descriptor *ana_desc;
3204 	uint32_t offset;
3205 
3206 	RB_INIT(&ctrlr.ns);
3207 	for (i = 0; i < 3; i++) {
3208 		ns[i].id = i + 1;
3209 		ns[i].active = true;
3210 		RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns[i]);
3211 	}
3212 
3213 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
3214 
3215 	ctrlr.cdata.nn = 3;
3216 	ctrlr.cdata.nanagrpid = 3;
3217 	ctrlr.active_ns_count = 3;
3218 
3219 	rc = nvme_ctrlr_update_ana_log_page(&ctrlr);
3220 	CU_ASSERT(rc == 0);
3221 	CU_ASSERT(ctrlr.ana_log_page != NULL);
3222 	CU_ASSERT(ctrlr.copied_ana_desc != NULL);
3223 
3224 	/*
3225 	 * Create ANA log page data - There are three ANA groups.
3226 	 * Each ANA group has a namespace and has a different ANA state.
3227 	 */
3228 	memset(&ana_hdr, 0, sizeof(ana_hdr));
3229 	ana_hdr.num_ana_group_desc = 3;
3230 
3231 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= ctrlr.ana_log_page_size);
3232 	memcpy((char *)ctrlr.ana_log_page, (char *)&ana_hdr, sizeof(ana_hdr));
3233 	offset = sizeof(ana_hdr);
3234 
3235 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
3236 	memset(ana_desc, 0, UT_ANA_DESC_SIZE);
3237 	ana_desc->num_of_nsid = 1;
3238 
3239 	ana_desc->ana_group_id = 1;
3240 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3241 	ana_desc->nsid[0] = 3;
3242 
3243 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3244 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3245 	offset += UT_ANA_DESC_SIZE;
3246 
3247 	ana_desc->ana_group_id = 2;
3248 	ana_desc->ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3249 	ana_desc->nsid[0] = 2;
3250 
3251 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3252 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3253 	offset += UT_ANA_DESC_SIZE;
3254 
3255 	ana_desc->ana_group_id = 3;
3256 	ana_desc->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3257 	ana_desc->nsid[0] = 1;
3258 
3259 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3260 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3261 
3262 	/* Parse the created ANA log page data, and update ANA states. */
3263 	rc = nvme_ctrlr_parse_ana_log_page(&ctrlr, nvme_ctrlr_update_ns_ana_states,
3264 					   &ctrlr);
3265 	CU_ASSERT(rc == 0);
3266 	CU_ASSERT(ns[0].ana_group_id == 3);
3267 	CU_ASSERT(ns[0].ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3268 	CU_ASSERT(ns[1].ana_group_id == 2);
3269 	CU_ASSERT(ns[1].ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3270 	CU_ASSERT(ns[2].ana_group_id == 1);
3271 	CU_ASSERT(ns[2].ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3272 
3273 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
3274 
3275 	free(ctrlr.ana_log_page);
3276 	free(ctrlr.copied_ana_desc);
3277 }
3278 
3279 static void
3280 test_nvme_ctrlr_ana_resize(void)
3281 {
3282 	DECLARE_AND_CONSTRUCT_CTRLR();
3283 	uint32_t active_ns_list[] = { 1, 2, 3, 4 };
3284 	struct spdk_nvme_ana_page ana_hdr = {
3285 		.change_count = 0,
3286 		.num_ana_group_desc = 1
3287 	};
3288 	uint8_t ana_desc_buf[sizeof(struct spdk_nvme_ana_group_descriptor) + 4 * sizeof(uint32_t)] = {};
3289 	struct spdk_nvme_ana_group_descriptor *ana_desc =
3290 		(struct spdk_nvme_ana_group_descriptor *)ana_desc_buf;
3291 	struct spdk_nvme_ns *ns;
3292 	union spdk_nvme_async_event_completion aer_event = {
3293 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
3294 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
3295 	};
3296 	struct spdk_nvme_cpl aer_cpl = {
3297 		.status.sct = SPDK_NVME_SCT_GENERIC,
3298 		.status.sc = SPDK_NVME_SC_SUCCESS,
3299 		.cdw0 = aer_event.raw
3300 	};
3301 	uint32_t i;
3302 
3303 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3304 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
3305 
3306 	ctrlr.vs.bits.mjr = 1;
3307 	ctrlr.vs.bits.mnr = 4;
3308 	ctrlr.vs.bits.ter = 0;
3309 	ctrlr.cdata.nn = 4096;
3310 	ctrlr.cdata.cmic.ana_reporting = true;
3311 	ctrlr.cdata.nanagrpid = 1;
3312 
3313 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
3314 	/* Start with 2 active namespaces */
3315 	g_active_ns_list = active_ns_list;
3316 	g_active_ns_list_length = 2;
3317 	g_ana_hdr = &ana_hdr;
3318 	g_ana_descs = &ana_desc;
3319 	ana_desc->ana_group_id = 1;
3320 	ana_desc->ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3321 	ana_desc->num_of_nsid = 2;
3322 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3323 		ana_desc->nsid[i] = i + 1;
3324 	}
3325 
3326 	/* Bring controller to ready state */
3327 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
3328 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3329 	}
3330 
3331 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3332 		ns = spdk_nvme_ctrlr_get_ns(&ctrlr, i + 1);
3333 		CU_ASSERT(ns->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3334 	}
3335 
3336 	/* Add more namespaces */
3337 	g_active_ns_list_length = 4;
3338 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3339 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3340 
3341 	/* Update ANA log with new namespaces */
3342 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3343 	ana_desc->num_of_nsid = 4;
3344 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3345 		ana_desc->nsid[i] = i + 1;
3346 	}
3347 	aer_event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
3348 	aer_cpl.cdw0 = aer_event.raw;
3349 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3350 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3351 
3352 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3353 		ns = spdk_nvme_ctrlr_get_ns(&ctrlr, i + 1);
3354 		CU_ASSERT(ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3355 	}
3356 
3357 	g_active_ns_list = NULL;
3358 	g_active_ns_list_length = 0;
3359 	g_ana_hdr = NULL;
3360 	g_ana_descs = NULL;
3361 	nvme_ctrlr_free_processes(&ctrlr);
3362 	nvme_ctrlr_destruct(&ctrlr);
3363 }
3364 
3365 static void
3366 test_nvme_ctrlr_get_memory_domains(void)
3367 {
3368 	struct spdk_nvme_ctrlr ctrlr = {};
3369 
3370 	MOCK_SET(nvme_transport_ctrlr_get_memory_domains, 1);
3371 	CU_ASSERT(spdk_nvme_ctrlr_get_memory_domains(&ctrlr, NULL, 0) == 1);
3372 
3373 	MOCK_SET(nvme_transport_ctrlr_get_memory_domains, 0);
3374 	CU_ASSERT(spdk_nvme_ctrlr_get_memory_domains(&ctrlr, NULL, 0) == 0);
3375 
3376 	MOCK_CLEAR(nvme_transport_ctrlr_get_memory_domains);
3377 }
3378 
3379 static void
3380 test_nvme_transport_ctrlr_ready(void)
3381 {
3382 	DECLARE_AND_CONSTRUCT_CTRLR();
3383 
3384 	/* Transport init succeeded */
3385 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3386 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3387 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
3388 
3389 	/* Transport init failed */
3390 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3391 	MOCK_SET(nvme_transport_ctrlr_ready, -1);
3392 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == -1);
3393 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
3394 	MOCK_CLEAR(nvme_transport_ctrlr_ready);
3395 }
3396 
3397 static void
3398 test_nvme_ctrlr_disable(void)
3399 {
3400 	DECLARE_AND_CONSTRUCT_CTRLR();
3401 	int rc;
3402 
3403 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3404 
3405 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3406 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3407 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
3408 
3409 	/* Start a Controller Level Reset. */
3410 	ctrlr.is_disconnecting = true;
3411 	nvme_ctrlr_disable(&ctrlr);
3412 
3413 	g_ut_nvme_regs.cc.bits.en = 0;
3414 
3415 	rc = nvme_ctrlr_disable_poll(&ctrlr);
3416 	CU_ASSERT(rc == -EAGAIN);
3417 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
3418 
3419 	g_ut_nvme_regs.csts.bits.rdy = 0;
3420 
3421 	rc = nvme_ctrlr_disable_poll(&ctrlr);
3422 	CU_ASSERT(rc == 0);
3423 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
3424 
3425 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
3426 	nvme_ctrlr_destruct(&ctrlr);
3427 }
3428 
3429 static void
3430 test_nvme_numa_id(void)
3431 {
3432 	struct spdk_nvme_ctrlr ctrlr = {};
3433 
3434 	ctrlr.numa.id = 3;
3435 	ctrlr.numa.id_valid = 0;
3436 	CU_ASSERT(spdk_nvme_ctrlr_get_numa_id(&ctrlr) == SPDK_ENV_NUMA_ID_ANY);
3437 
3438 	ctrlr.numa.id_valid = 1;
3439 	CU_ASSERT(spdk_nvme_ctrlr_get_numa_id(&ctrlr) == 3);
3440 
3441 	ctrlr.numa.id = SPDK_ENV_NUMA_ID_ANY;
3442 	CU_ASSERT(spdk_nvme_ctrlr_get_numa_id(&ctrlr) == SPDK_ENV_NUMA_ID_ANY);
3443 }
3444 
3445 int
3446 main(int argc, char **argv)
3447 {
3448 	CU_pSuite	suite = NULL;
3449 	unsigned int	num_failures;
3450 
3451 	CU_initialize_registry();
3452 
3453 	suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
3454 
3455 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_0);
3456 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_1);
3457 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0);
3458 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_1);
3459 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_rr);
3460 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr);
3461 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_vs);
3462 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_delay);
3463 	CU_ADD_TEST(suite, test_alloc_io_qpair_rr_1);
3464 	CU_ADD_TEST(suite, test_ctrlr_get_default_ctrlr_opts);
3465 	CU_ADD_TEST(suite, test_ctrlr_get_default_io_qpair_opts);
3466 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_1);
3467 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_2);
3468 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_update_firmware);
3469 	CU_ADD_TEST(suite, test_nvme_ctrlr_fail);
3470 	CU_ADD_TEST(suite, test_nvme_ctrlr_construct_intel_support_log_page_list);
3471 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_features);
3472 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_host_feature);
3473 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_doorbell_buffer_config);
3474 #if 0 /* TODO: move to PCIe-specific unit test */
3475 	CU_ADD_TEST(suite, test_nvme_ctrlr_alloc_cmb);
3476 #endif
3477 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns);
3478 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns_error_case);
3479 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_reconnect_io_qpair);
3480 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_set_trid);
3481 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_nvmf_ioccsz);
3482 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_num_queues);
3483 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_keep_alive_timeout);
3484 	CU_ADD_TEST(suite, test_alloc_io_qpair_fail);
3485 	CU_ADD_TEST(suite, test_nvme_ctrlr_add_remove_process);
3486 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_arbitration_feature);
3487 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_state);
3488 	CU_ADD_TEST(suite, test_nvme_ctrlr_active_ns_list_v0);
3489 	CU_ADD_TEST(suite, test_nvme_ctrlr_active_ns_list_v2);
3490 	CU_ADD_TEST(suite, test_nvme_ctrlr_ns_mgmt);
3491 	CU_ADD_TEST(suite, test_nvme_ctrlr_reset);
3492 	CU_ADD_TEST(suite, test_nvme_ctrlr_aer_callback);
3493 	CU_ADD_TEST(suite, test_nvme_ctrlr_ns_attr_changed);
3494 	CU_ADD_TEST(suite, test_nvme_ctrlr_identify_namespaces_iocs_specific_next);
3495 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_log_pages);
3496 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_intel_supported_log_pages);
3497 	CU_ADD_TEST(suite, test_nvme_ctrlr_parse_ana_log_page);
3498 	CU_ADD_TEST(suite, test_nvme_ctrlr_ana_resize);
3499 	CU_ADD_TEST(suite, test_nvme_ctrlr_get_memory_domains);
3500 	CU_ADD_TEST(suite, test_nvme_transport_ctrlr_ready);
3501 	CU_ADD_TEST(suite, test_nvme_ctrlr_disable);
3502 	CU_ADD_TEST(suite, test_nvme_numa_id);
3503 
3504 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3505 	CU_cleanup_registry();
3506 	return num_failures;
3507 }
3508