xref: /spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c (revision 00bbcea09ebf0738680e69d794cbbe340f1499b3)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "spdk/log.h"
12 
13 #include "common/lib/test_env.c"
14 
15 #include "nvme/nvme_ctrlr.c"
16 #include "nvme/nvme_quirks.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvme)
19 
20 pid_t g_spdk_nvme_pid;
21 
22 struct nvme_driver _g_nvme_driver = {
23 	.lock = PTHREAD_MUTEX_INITIALIZER,
24 };
25 
26 struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
27 
28 struct spdk_nvme_registers g_ut_nvme_regs = {};
29 typedef void (*set_reg_cb)(void);
30 set_reg_cb g_set_reg_cb;
31 
32 __thread int    nvme_thread_ioq_index = -1;
33 
34 uint32_t set_size = 1;
35 
36 int set_status_cpl = -1;
37 
38 #define UT_HOSTID "e53e9258-c93b-48b5-be1a-f025af6d232a"
39 
40 DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
41 	    (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
42 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
43 DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
44 DEFINE_STUB_V(nvme_ns_set_id_desc_list_data, (struct spdk_nvme_ns *ns));
45 DEFINE_STUB_V(nvme_ns_free_iocs_specific_data, (struct spdk_nvme_ns *ns));
46 DEFINE_STUB_V(nvme_qpair_abort_all_queued_reqs, (struct spdk_nvme_qpair *qpair));
47 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
48 		struct spdk_nvme_qpair *qpair), 0);
49 DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));
50 DEFINE_STUB(nvme_io_msg_process, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
51 DEFINE_STUB(nvme_transport_ctrlr_reserve_cmb, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
52 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_receive, int, (struct spdk_nvme_ctrlr *ctrlr,
53 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
54 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
55 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_send, int, (struct spdk_nvme_ctrlr *ctrlr,
56 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
57 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
58 DEFINE_STUB_V(nvme_qpair_abort_queued_reqs, (struct spdk_nvme_qpair *qpair));
59 
60 int
61 nvme_get_default_hostnqn(char *buf, int len)
62 {
63 	const char *nqn = "nqn.2014-08.org.nvmexpress:uuid:" UT_HOSTID;
64 
65 	SPDK_CU_ASSERT_FATAL(len >= (int)strlen(nqn));
66 	memcpy(buf, nqn, strlen(nqn));
67 
68 	return 0;
69 }
70 
71 DEFINE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domains, int);
72 int
73 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
74 					struct spdk_memory_domain **domains, int array_size)
75 {
76 	HANDLE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domains);
77 
78 	return 0;
79 }
80 
81 DEFINE_RETURN_MOCK(nvme_transport_ctrlr_ready, int);
82 int
83 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
84 {
85 	HANDLE_RETURN_MOCK(nvme_transport_ctrlr_ready);
86 	return 0;
87 }
88 
89 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
90 		const struct spdk_nvme_ctrlr_opts *opts,
91 		void *devhandle)
92 {
93 	return NULL;
94 }
95 
96 int
97 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
98 {
99 	nvme_ctrlr_destruct_finish(ctrlr);
100 
101 	return 0;
102 }
103 
104 int
105 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
106 {
107 	return 0;
108 }
109 
110 int
111 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
112 {
113 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
114 	*(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
115 	if (g_set_reg_cb) {
116 		g_set_reg_cb();
117 	}
118 	return 0;
119 }
120 
121 int
122 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
123 {
124 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
125 	*(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
126 	if (g_set_reg_cb) {
127 		g_set_reg_cb();
128 	}
129 	return 0;
130 }
131 
132 int
133 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
134 {
135 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
136 	*value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
137 	return 0;
138 }
139 
140 int
141 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
142 {
143 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
144 	*value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
145 	return 0;
146 }
147 
148 int
149 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
150 				     uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg)
151 {
152 	struct spdk_nvme_cpl cpl = {};
153 
154 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
155 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
156 
157 	nvme_transport_ctrlr_set_reg_4(ctrlr, offset, value);
158 	cb_fn(cb_arg, value, &cpl);
159 	return 0;
160 }
161 
162 int
163 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
164 				     uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg)
165 {
166 	struct spdk_nvme_cpl cpl = {};
167 
168 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
169 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
170 
171 	nvme_transport_ctrlr_set_reg_8(ctrlr, offset, value);
172 	cb_fn(cb_arg, value, &cpl);
173 	return 0;
174 }
175 
176 int
177 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
178 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
179 {
180 	struct spdk_nvme_cpl cpl = {};
181 	uint32_t value;
182 
183 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
184 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
185 
186 	nvme_transport_ctrlr_get_reg_4(ctrlr, offset, &value);
187 	cb_fn(cb_arg, value, &cpl);
188 	return 0;
189 }
190 
191 int
192 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
193 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
194 {
195 	struct spdk_nvme_cpl cpl = {};
196 	uint64_t value;
197 
198 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
199 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
200 
201 	nvme_transport_ctrlr_get_reg_8(ctrlr, offset, &value);
202 	cb_fn(cb_arg, value, &cpl);
203 	return 0;
204 }
205 
206 uint32_t
207 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
208 {
209 	return UINT32_MAX;
210 }
211 
212 uint16_t
213 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
214 {
215 	return 1;
216 }
217 
218 void *
219 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
220 {
221 	return NULL;
222 }
223 
224 int
225 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
226 {
227 	return 0;
228 }
229 
230 int
231 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
232 {
233 	return 0;
234 }
235 
236 int
237 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
238 {
239 	return 0;
240 }
241 
242 void *
243 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
244 {
245 	return NULL;
246 }
247 
248 int
249 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
250 {
251 	return 0;
252 }
253 
254 struct spdk_nvme_qpair *
255 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
256 				     const struct spdk_nvme_io_qpair_opts *opts)
257 {
258 	struct spdk_nvme_qpair *qpair;
259 
260 	qpair = calloc(1, sizeof(*qpair));
261 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
262 
263 	qpair->ctrlr = ctrlr;
264 	qpair->id = qid;
265 	qpair->qprio = opts->qprio;
266 
267 	return qpair;
268 }
269 
270 void
271 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
272 {
273 	free(qpair);
274 }
275 
276 void
277 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
278 {
279 }
280 
281 int
282 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
283 {
284 	return 0;
285 }
286 
287 void
288 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
289 {
290 }
291 
292 void
293 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
294 {
295 }
296 
297 int
298 nvme_driver_init(void)
299 {
300 	return 0;
301 }
302 
303 int
304 nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
305 		struct spdk_nvme_ctrlr *ctrlr,
306 		enum spdk_nvme_qprio qprio,
307 		uint32_t num_requests, bool async)
308 {
309 	qpair->id = id;
310 	qpair->qprio = qprio;
311 	qpair->ctrlr = ctrlr;
312 	qpair->async = async;
313 
314 	return 0;
315 }
316 
317 static struct spdk_nvme_cpl fake_cpl = {};
318 static enum spdk_nvme_generic_command_status_code set_status_code = SPDK_NVME_SC_SUCCESS;
319 
320 static void
321 fake_cpl_sc(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
322 {
323 	fake_cpl.status.sc = set_status_code;
324 	cb_fn(cb_arg, &fake_cpl);
325 }
326 
327 static uint32_t g_ut_cdw11;
328 
329 int
330 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
331 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
332 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
333 {
334 	g_ut_cdw11 = cdw11;
335 	fake_cpl_sc(cb_fn, cb_arg);
336 	return 0;
337 }
338 
339 int
340 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
341 				uint32_t cdw11, void *payload, uint32_t payload_size,
342 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
343 {
344 	fake_cpl_sc(cb_fn, cb_arg);
345 	return 0;
346 }
347 
348 struct spdk_nvme_ana_page *g_ana_hdr;
349 struct spdk_nvme_ana_group_descriptor **g_ana_descs;
350 
351 int
352 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
353 				 uint32_t nsid, void *payload, uint32_t payload_size,
354 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
355 {
356 	if ((log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) && g_ana_hdr) {
357 		uint32_t i;
358 		uint8_t *ptr = payload;
359 
360 		memset(payload, 0, payload_size);
361 		memcpy(ptr, g_ana_hdr, sizeof(*g_ana_hdr));
362 		ptr += sizeof(*g_ana_hdr);
363 		for (i = 0; i < g_ana_hdr->num_ana_group_desc; ++i) {
364 			uint32_t desc_size = sizeof(**g_ana_descs) +
365 					     g_ana_descs[i]->num_of_nsid * sizeof(uint32_t);
366 			memcpy(ptr, g_ana_descs[i], desc_size);
367 			ptr += desc_size;
368 		}
369 	} else if (log_page == SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY) {
370 		struct spdk_nvme_intel_log_page_directory *log_page_directory = payload;
371 		log_page_directory->read_latency_log_len = true;
372 		log_page_directory->write_latency_log_len = true;
373 		log_page_directory->temperature_statistics_log_len = true;
374 		log_page_directory->smart_log_len = true;
375 		log_page_directory->marketing_description_log_len =  true;
376 	}
377 
378 	fake_cpl_sc(cb_fn, cb_arg);
379 	return 0;
380 }
381 
382 int
383 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
384 				     uint32_t nsid, void *payload, uint32_t payload_size,
385 				     uint64_t offset, uint32_t cdw10, uint32_t cdw11,
386 				     uint32_t cdw14, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
387 {
388 	fake_cpl_sc(cb_fn, cb_arg);
389 	return 0;
390 }
391 
392 int
393 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
394 {
395 	CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
396 	STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
397 
398 	/*
399 	 * For the purposes of this unit test, we don't need to bother emulating request submission.
400 	 */
401 
402 	return 0;
403 }
404 
405 static int32_t g_wait_for_completion_return_val;
406 
407 int32_t
408 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
409 {
410 	return g_wait_for_completion_return_val;
411 }
412 
413 void
414 nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
415 {
416 }
417 
418 
419 void
420 nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
421 {
422 	struct nvme_completion_poll_status	*status = arg;
423 	/* This should not happen it test env since this callback is always called
424 	 * before wait_for_completion_* while this field can only be set to true in
425 	 * wait_for_completion_* functions */
426 	CU_ASSERT(status->timed_out == false);
427 
428 	status->cpl = *cpl;
429 	status->done = true;
430 }
431 
432 static struct nvme_completion_poll_status *g_failed_status;
433 
434 int
435 nvme_wait_for_completion_robust_lock_timeout(
436 	struct spdk_nvme_qpair *qpair,
437 	struct nvme_completion_poll_status *status,
438 	pthread_mutex_t *robust_mutex,
439 	uint64_t timeout_in_usecs)
440 {
441 	if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
442 		g_failed_status = status;
443 		status->timed_out = true;
444 		return -1;
445 	}
446 
447 	status->done = true;
448 	if (set_status_cpl == 1) {
449 		status->cpl.status.sc = 1;
450 	}
451 	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
452 }
453 
454 int
455 nvme_wait_for_completion_robust_lock(
456 	struct spdk_nvme_qpair *qpair,
457 	struct nvme_completion_poll_status *status,
458 	pthread_mutex_t *robust_mutex)
459 {
460 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, robust_mutex, 0);
461 }
462 
463 int
464 nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
465 			 struct nvme_completion_poll_status *status)
466 {
467 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, 0);
468 }
469 
470 int
471 nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
472 				 struct nvme_completion_poll_status *status,
473 				 uint64_t timeout_in_usecs)
474 {
475 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, timeout_in_usecs);
476 }
477 
478 int
479 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
480 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
481 				      void *cb_arg)
482 {
483 	fake_cpl_sc(cb_fn, cb_arg);
484 	return 0;
485 }
486 
487 static uint32_t *g_active_ns_list = NULL;
488 static uint32_t g_active_ns_list_length = 0;
489 static struct spdk_nvme_ctrlr_data *g_cdata = NULL;
490 static bool g_fail_next_identify = false;
491 
492 int
493 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
494 			uint8_t csi, void *payload, size_t payload_size,
495 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
496 {
497 	if (g_fail_next_identify) {
498 		g_fail_next_identify = false;
499 		return 1;
500 	}
501 
502 	memset(payload, 0, payload_size);
503 	if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
504 		uint32_t count = 0;
505 		uint32_t i = 0;
506 		struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
507 
508 		if (g_active_ns_list == NULL) {
509 			for (i = 1; i <= ctrlr->cdata.nn; i++) {
510 				if (i <= nsid) {
511 					continue;
512 				}
513 
514 				ns_list->ns_list[count++] = i;
515 				if (count == SPDK_COUNTOF(ns_list->ns_list)) {
516 					break;
517 				}
518 			}
519 		} else {
520 			for (i = 0; i < g_active_ns_list_length; i++) {
521 				uint32_t cur_nsid = g_active_ns_list[i];
522 				if (cur_nsid <= nsid) {
523 					continue;
524 				}
525 
526 				ns_list->ns_list[count++] = cur_nsid;
527 				if (count == SPDK_COUNTOF(ns_list->ns_list)) {
528 					break;
529 				}
530 			}
531 		}
532 	} else if (cns == SPDK_NVME_IDENTIFY_CTRLR) {
533 		if (g_cdata) {
534 			memcpy(payload, g_cdata, sizeof(*g_cdata));
535 		}
536 	} else if (cns == SPDK_NVME_IDENTIFY_NS_IOCS) {
537 		return 0;
538 	}
539 
540 	fake_cpl_sc(cb_fn, cb_arg);
541 	return 0;
542 }
543 
544 int
545 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
546 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
547 {
548 	fake_cpl_sc(cb_fn, cb_arg);
549 	return 0;
550 }
551 
552 int
553 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
554 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
555 {
556 	CU_ASSERT(0);
557 	return -1;
558 }
559 
560 int
561 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
562 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
563 {
564 	return 0;
565 }
566 
567 int
568 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
569 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
570 {
571 	return 0;
572 }
573 
574 int
575 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
576 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
577 {
578 	fake_cpl_sc(cb_fn, cb_arg);
579 	return 0;
580 }
581 
582 int
583 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
584 			 void *cb_arg)
585 {
586 	return 0;
587 }
588 
589 int
590 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
591 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
592 {
593 	return 0;
594 }
595 
596 int
597 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
598 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
599 				   void *payload, uint32_t payload_size, uint32_t cdw12,
600 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
601 {
602 	return 0;
603 }
604 
605 int
606 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
607 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
608 				      void *payload, uint32_t payload_size, uint32_t cdw12,
609 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
610 {
611 	return 0;
612 }
613 
614 int
615 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
616 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
617 {
618 	CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
619 	if (fw_commit->fs == 0) {
620 		return -1;
621 	}
622 	set_status_cpl = 1;
623 	if (ctrlr->is_resetting == true) {
624 		set_status_cpl = 0;
625 	}
626 	return 0;
627 }
628 
629 int
630 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
631 				 uint32_t size, uint32_t offset, void *payload,
632 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
633 {
634 	if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
635 		return -1;
636 	}
637 	CU_ASSERT(offset == 0);
638 	return 0;
639 }
640 
641 bool
642 nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns)
643 {
644 	switch (ns->csi) {
645 	case SPDK_NVME_CSI_NVM:
646 		/*
647 		 * NVM Command Set Specific Identify Namespace data structure
648 		 * is currently all-zeroes, reserved for future use.
649 		 */
650 		return false;
651 	case SPDK_NVME_CSI_ZNS:
652 		return true;
653 	default:
654 		SPDK_WARNLOG("Unsupported CSI: %u for NSID: %u\n", ns->csi, ns->id);
655 		return false;
656 	}
657 }
658 
659 void
660 nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns)
661 {
662 	if (!ns->id) {
663 		return;
664 	}
665 
666 	if (ns->nsdata_zns) {
667 		spdk_free(ns->nsdata_zns);
668 		ns->nsdata_zns = NULL;
669 	}
670 }
671 
672 void
673 nvme_ns_free_nvm_specific_data(struct spdk_nvme_ns *ns)
674 {
675 	if (!ns->id) {
676 		return;
677 	}
678 
679 	if (ns->nsdata_nvm) {
680 		spdk_free(ns->nsdata_nvm);
681 		ns->nsdata_nvm = NULL;
682 	}
683 }
684 
685 void
686 nvme_ns_destruct(struct spdk_nvme_ns *ns)
687 {
688 }
689 
690 int
691 nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
692 		  struct spdk_nvme_ctrlr *ctrlr)
693 {
694 	return 0;
695 }
696 
697 void
698 spdk_pci_device_detach(struct spdk_pci_device *device)
699 {
700 }
701 
702 #define DECLARE_AND_CONSTRUCT_CTRLR()	\
703 	struct spdk_nvme_ctrlr	ctrlr = {};	\
704 	struct spdk_nvme_qpair	adminq = {};	\
705 	struct nvme_request	req;		\
706 						\
707 	STAILQ_INIT(&adminq.free_req);		\
708 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);	\
709 	ctrlr.adminq = &adminq;					\
710 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
711 
712 static void
713 test_nvme_ctrlr_init_en_1_rdy_0(void)
714 {
715 	DECLARE_AND_CONSTRUCT_CTRLR();
716 
717 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
718 
719 	/*
720 	 * Initial state: CC.EN = 1, CSTS.RDY = 0
721 	 */
722 	g_ut_nvme_regs.cc.bits.en = 1;
723 	g_ut_nvme_regs.csts.bits.rdy = 0;
724 
725 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
726 	ctrlr.cdata.nn = 1;
727 	ctrlr.page_size = 0x1000;
728 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
729 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
730 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
731 	}
732 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
733 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
734 
735 	/*
736 	 * Transition to CSTS.RDY = 1.
737 	 * init() should set CC.EN = 0.
738 	 */
739 	g_ut_nvme_regs.csts.bits.rdy = 1;
740 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
741 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_EN_0);
742 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
743 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
744 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
745 
746 	/*
747 	 * Transition to CSTS.RDY = 0.
748 	 */
749 	g_ut_nvme_regs.csts.bits.rdy = 0;
750 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
751 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
752 
753 	/*
754 	 * Start enabling the controller.
755 	 */
756 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
757 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
758 
759 	/*
760 	 * Transition to CC.EN = 1
761 	 */
762 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
763 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
764 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
765 
766 	/*
767 	 * Transition to CSTS.RDY = 1.
768 	 */
769 	g_ut_nvme_regs.csts.bits.rdy = 1;
770 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
771 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
772 
773 	/*
774 	 * Transition to READY.
775 	 */
776 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
777 		nvme_ctrlr_process_init(&ctrlr);
778 	}
779 
780 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
781 	nvme_ctrlr_destruct(&ctrlr);
782 }
783 
784 static void
785 test_nvme_ctrlr_init_en_1_rdy_1(void)
786 {
787 	DECLARE_AND_CONSTRUCT_CTRLR();
788 
789 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
790 
791 	/*
792 	 * Initial state: CC.EN = 1, CSTS.RDY = 1
793 	 * init() should set CC.EN = 0.
794 	 */
795 	g_ut_nvme_regs.cc.bits.en = 1;
796 	g_ut_nvme_regs.csts.bits.rdy = 1;
797 
798 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
799 	ctrlr.cdata.nn = 1;
800 	ctrlr.page_size = 0x1000;
801 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
802 	while (ctrlr.state != NVME_CTRLR_STATE_SET_EN_0) {
803 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
804 	}
805 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
806 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
807 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
808 
809 	/*
810 	 * Transition to CSTS.RDY = 0.
811 	 */
812 	g_ut_nvme_regs.csts.bits.rdy = 0;
813 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
814 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
815 
816 	/*
817 	 * Start enabling the controller.
818 	 */
819 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
820 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
821 
822 	/*
823 	 * Transition to CC.EN = 1
824 	 */
825 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
826 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
827 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
828 
829 	/*
830 	 * Transition to CSTS.RDY = 1.
831 	 */
832 	g_ut_nvme_regs.csts.bits.rdy = 1;
833 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
834 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
835 
836 	/*
837 	 * Transition to READY.
838 	 */
839 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
840 		nvme_ctrlr_process_init(&ctrlr);
841 	}
842 
843 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
844 	nvme_ctrlr_destruct(&ctrlr);
845 }
846 
847 static void
848 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
849 {
850 	DECLARE_AND_CONSTRUCT_CTRLR();
851 
852 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
853 
854 	/*
855 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
856 	 * init() should set CC.EN = 1.
857 	 */
858 	g_ut_nvme_regs.cc.bits.en = 0;
859 	g_ut_nvme_regs.csts.bits.rdy = 0;
860 
861 	/*
862 	 * Default round robin enabled
863 	 */
864 	g_ut_nvme_regs.cap.bits.ams = 0x0;
865 	ctrlr.cap = g_ut_nvme_regs.cap;
866 
867 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
868 	ctrlr.cdata.nn = 1;
869 	ctrlr.page_size = 0x1000;
870 	/*
871 	 * Case 1: default round robin arbitration mechanism selected
872 	 */
873 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
874 
875 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
876 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
877 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
878 	}
879 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
880 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
881 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
882 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
883 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
884 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
885 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
886 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
887 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
888 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
889 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
890 
891 	/*
892 	 * Complete and destroy the controller
893 	 */
894 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
895 	nvme_ctrlr_destruct(&ctrlr);
896 
897 	/*
898 	 * Reset to initial state
899 	 */
900 	g_ut_nvme_regs.cc.bits.en = 0;
901 	g_ut_nvme_regs.csts.bits.rdy = 0;
902 
903 	/*
904 	 * Case 2: weighted round robin arbitration mechanism selected
905 	 */
906 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
907 	ctrlr.cdata.nn = 1;
908 	ctrlr.page_size = 0x1000;
909 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
910 
911 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
912 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
913 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
914 	}
915 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
916 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
917 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
918 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
919 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
920 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
921 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
922 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
923 
924 	/*
925 	 * Complete and destroy the controller
926 	 */
927 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
928 	nvme_ctrlr_destruct(&ctrlr);
929 
930 	/*
931 	 * Reset to initial state
932 	 */
933 	g_ut_nvme_regs.cc.bits.en = 0;
934 	g_ut_nvme_regs.csts.bits.rdy = 0;
935 
936 	/*
937 	 * Case 3: vendor specific arbitration mechanism selected
938 	 */
939 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
940 	ctrlr.cdata.nn = 1;
941 	ctrlr.page_size = 0x1000;
942 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
943 
944 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
945 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
946 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
947 	}
948 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
949 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
950 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
951 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
952 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
953 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
954 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
955 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
956 
957 	/*
958 	 * Complete and destroy the controller
959 	 */
960 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
961 	nvme_ctrlr_destruct(&ctrlr);
962 
963 	/*
964 	 * Reset to initial state
965 	 */
966 	g_ut_nvme_regs.cc.bits.en = 0;
967 	g_ut_nvme_regs.csts.bits.rdy = 0;
968 
969 	/*
970 	 * Case 4: invalid arbitration mechanism selected
971 	 */
972 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
973 	ctrlr.cdata.nn = 1;
974 	ctrlr.page_size = 0x1000;
975 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
976 
977 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
978 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
979 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
980 	}
981 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
982 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
983 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
984 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
985 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
986 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
987 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
988 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
989 
990 	/*
991 	 * Complete and destroy the controller
992 	 */
993 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
994 	nvme_ctrlr_destruct(&ctrlr);
995 
996 	/*
997 	 * Reset to initial state
998 	 */
999 	g_ut_nvme_regs.cc.bits.en = 0;
1000 	g_ut_nvme_regs.csts.bits.rdy = 0;
1001 
1002 	/*
1003 	 * Case 5: reset to default round robin arbitration mechanism
1004 	 */
1005 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1006 	ctrlr.cdata.nn = 1;
1007 	ctrlr.page_size = 0x1000;
1008 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1009 
1010 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1011 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1012 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1013 	}
1014 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1015 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1016 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1017 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1018 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1019 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1020 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1021 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1022 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1023 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1024 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1025 
1026 	/*
1027 	 * Transition to CSTS.RDY = 1.
1028 	 */
1029 	g_ut_nvme_regs.csts.bits.rdy = 1;
1030 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1031 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1032 
1033 	/*
1034 	 * Transition to READY.
1035 	 */
1036 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1037 		nvme_ctrlr_process_init(&ctrlr);
1038 	}
1039 
1040 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1041 	nvme_ctrlr_destruct(&ctrlr);
1042 }
1043 
1044 static void
1045 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
1046 {
1047 	DECLARE_AND_CONSTRUCT_CTRLR();
1048 
1049 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1050 
1051 	/*
1052 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1053 	 * init() should set CC.EN = 1.
1054 	 */
1055 	g_ut_nvme_regs.cc.bits.en = 0;
1056 	g_ut_nvme_regs.csts.bits.rdy = 0;
1057 
1058 	/*
1059 	 * Weighted round robin enabled
1060 	 */
1061 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
1062 	ctrlr.cap = g_ut_nvme_regs.cap;
1063 
1064 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1065 	ctrlr.cdata.nn = 1;
1066 	ctrlr.page_size = 0x1000;
1067 	/*
1068 	 * Case 1: default round robin arbitration mechanism selected
1069 	 */
1070 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1071 
1072 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1073 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1074 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1075 	}
1076 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1077 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1078 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1079 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1080 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1081 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1082 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1083 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1084 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1085 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1086 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1087 
1088 	/*
1089 	 * Complete and destroy the controller
1090 	 */
1091 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1092 	nvme_ctrlr_destruct(&ctrlr);
1093 
1094 	/*
1095 	 * Reset to initial state
1096 	 */
1097 	g_ut_nvme_regs.cc.bits.en = 0;
1098 	g_ut_nvme_regs.csts.bits.rdy = 0;
1099 
1100 	/*
1101 	 * Case 2: weighted round robin arbitration mechanism selected
1102 	 */
1103 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1104 	ctrlr.cdata.nn = 1;
1105 	ctrlr.page_size = 0x1000;
1106 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1107 
1108 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1109 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1110 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1111 	}
1112 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1113 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1114 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1115 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1116 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1117 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1118 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1119 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1120 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1121 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
1122 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
1123 
1124 	/*
1125 	 * Complete and destroy the controller
1126 	 */
1127 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1128 	nvme_ctrlr_destruct(&ctrlr);
1129 
1130 	/*
1131 	 * Reset to initial state
1132 	 */
1133 	g_ut_nvme_regs.cc.bits.en = 0;
1134 	g_ut_nvme_regs.csts.bits.rdy = 0;
1135 
1136 	/*
1137 	 * Case 3: vendor specific arbitration mechanism selected
1138 	 */
1139 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1140 	ctrlr.cdata.nn = 1;
1141 	ctrlr.page_size = 0x1000;
1142 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1143 
1144 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1145 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1146 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1147 	}
1148 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1149 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1150 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1151 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1152 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1153 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1154 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1155 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1156 
1157 	/*
1158 	 * Complete and destroy the controller
1159 	 */
1160 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1161 	nvme_ctrlr_destruct(&ctrlr);
1162 
1163 	/*
1164 	 * Reset to initial state
1165 	 */
1166 	g_ut_nvme_regs.cc.bits.en = 0;
1167 	g_ut_nvme_regs.csts.bits.rdy = 0;
1168 
1169 	/*
1170 	 * Case 4: invalid arbitration mechanism selected
1171 	 */
1172 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1173 	ctrlr.cdata.nn = 1;
1174 	ctrlr.page_size = 0x1000;
1175 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1176 
1177 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1178 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1179 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1180 	}
1181 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1182 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1183 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1184 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1185 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1186 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1187 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1188 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1189 
1190 	/*
1191 	 * Complete and destroy the controller
1192 	 */
1193 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1194 	nvme_ctrlr_destruct(&ctrlr);
1195 
1196 	/*
1197 	 * Reset to initial state
1198 	 */
1199 	g_ut_nvme_regs.cc.bits.en = 0;
1200 	g_ut_nvme_regs.csts.bits.rdy = 0;
1201 
1202 	/*
1203 	 * Case 5: reset to weighted round robin arbitration mechanism
1204 	 */
1205 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1206 	ctrlr.cdata.nn = 1;
1207 	ctrlr.page_size = 0x1000;
1208 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1209 
1210 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1211 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1212 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1213 	}
1214 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1215 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1216 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1217 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1218 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1219 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1220 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1221 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1222 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1223 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
1224 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
1225 
1226 	/*
1227 	 * Transition to CSTS.RDY = 1.
1228 	 */
1229 	g_ut_nvme_regs.csts.bits.rdy = 1;
1230 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1231 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1232 
1233 	/*
1234 	 * Transition to READY.
1235 	 */
1236 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1237 		nvme_ctrlr_process_init(&ctrlr);
1238 	}
1239 
1240 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1241 	nvme_ctrlr_destruct(&ctrlr);
1242 }
1243 static void
1244 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
1245 {
1246 	DECLARE_AND_CONSTRUCT_CTRLR();
1247 
1248 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1249 
1250 	/*
1251 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1252 	 * init() should set CC.EN = 1.
1253 	 */
1254 	g_ut_nvme_regs.cc.bits.en = 0;
1255 	g_ut_nvme_regs.csts.bits.rdy = 0;
1256 
1257 	/*
1258 	 * Default round robin enabled
1259 	 */
1260 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
1261 	ctrlr.cap = g_ut_nvme_regs.cap;
1262 
1263 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1264 	ctrlr.cdata.nn = 1;
1265 	ctrlr.page_size = 0x1000;
1266 	/*
1267 	 * Case 1: default round robin arbitration mechanism selected
1268 	 */
1269 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1270 
1271 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1272 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1273 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1274 	}
1275 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1276 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1277 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1278 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1279 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1280 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1281 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1282 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1283 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1284 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1285 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1286 
1287 	/*
1288 	 * Complete and destroy the controller
1289 	 */
1290 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1291 	nvme_ctrlr_destruct(&ctrlr);
1292 
1293 	/*
1294 	 * Reset to initial state
1295 	 */
1296 	g_ut_nvme_regs.cc.bits.en = 0;
1297 	g_ut_nvme_regs.csts.bits.rdy = 0;
1298 
1299 	/*
1300 	 * Case 2: weighted round robin arbitration mechanism selected
1301 	 */
1302 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1303 	ctrlr.cdata.nn = 1;
1304 	ctrlr.page_size = 0x1000;
1305 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1306 
1307 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1308 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1309 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1310 	}
1311 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1312 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1313 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1314 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1315 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1316 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1317 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1318 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1319 
1320 	/*
1321 	 * Complete and destroy the controller
1322 	 */
1323 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1324 	nvme_ctrlr_destruct(&ctrlr);
1325 
1326 	/*
1327 	 * Reset to initial state
1328 	 */
1329 	g_ut_nvme_regs.cc.bits.en = 0;
1330 	g_ut_nvme_regs.csts.bits.rdy = 0;
1331 
1332 	/*
1333 	 * Case 3: vendor specific arbitration mechanism selected
1334 	 */
1335 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1336 	ctrlr.cdata.nn = 1;
1337 	ctrlr.page_size = 0x1000;
1338 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1339 
1340 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1341 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1342 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1343 	}
1344 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1345 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1346 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1347 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1348 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1349 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1350 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1351 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1352 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1353 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1354 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1355 
1356 	/*
1357 	 * Complete and destroy the controller
1358 	 */
1359 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1360 	nvme_ctrlr_destruct(&ctrlr);
1361 
1362 	/*
1363 	 * Reset to initial state
1364 	 */
1365 	g_ut_nvme_regs.cc.bits.en = 0;
1366 	g_ut_nvme_regs.csts.bits.rdy = 0;
1367 
1368 	/*
1369 	 * Case 4: invalid arbitration mechanism selected
1370 	 */
1371 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1372 	ctrlr.cdata.nn = 1;
1373 	ctrlr.page_size = 0x1000;
1374 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1375 
1376 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1377 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1378 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1379 	}
1380 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1381 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1382 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1383 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1384 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1385 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1386 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1387 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1388 
1389 	/*
1390 	 * Complete and destroy the controller
1391 	 */
1392 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1393 	nvme_ctrlr_destruct(&ctrlr);
1394 
1395 	/*
1396 	 * Reset to initial state
1397 	 */
1398 	g_ut_nvme_regs.cc.bits.en = 0;
1399 	g_ut_nvme_regs.csts.bits.rdy = 0;
1400 
1401 	/*
1402 	 * Case 5: reset to vendor specific arbitration mechanism
1403 	 */
1404 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1405 	ctrlr.cdata.nn = 1;
1406 	ctrlr.page_size = 0x1000;
1407 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1408 
1409 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1410 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1411 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1412 	}
1413 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1414 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1415 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1416 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1417 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1418 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1419 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1420 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1421 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1422 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1423 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1424 
1425 	/*
1426 	 * Transition to CSTS.RDY = 1.
1427 	 */
1428 	g_ut_nvme_regs.csts.bits.rdy = 1;
1429 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1430 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1431 
1432 	/*
1433 	 * Transition to READY.
1434 	 */
1435 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1436 		nvme_ctrlr_process_init(&ctrlr);
1437 	}
1438 
1439 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1440 	nvme_ctrlr_destruct(&ctrlr);
1441 }
1442 
1443 static void
1444 test_nvme_ctrlr_init_en_0_rdy_0(void)
1445 {
1446 	DECLARE_AND_CONSTRUCT_CTRLR();
1447 
1448 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1449 
1450 	/*
1451 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1452 	 * init() should set CC.EN = 1.
1453 	 */
1454 	g_ut_nvme_regs.cc.bits.en = 0;
1455 	g_ut_nvme_regs.csts.bits.rdy = 0;
1456 
1457 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1458 	ctrlr.cdata.nn = 1;
1459 	ctrlr.page_size = 0x1000;
1460 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1461 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1462 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1463 	}
1464 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1465 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1466 
1467 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1468 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1469 
1470 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1471 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1472 
1473 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1474 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1475 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1476 
1477 	/*
1478 	 * Transition to CSTS.RDY = 1.
1479 	 */
1480 	g_ut_nvme_regs.csts.bits.rdy = 1;
1481 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1482 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1483 
1484 	/*
1485 	 * Transition to READY.
1486 	 */
1487 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1488 		nvme_ctrlr_process_init(&ctrlr);
1489 	}
1490 
1491 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1492 	nvme_ctrlr_destruct(&ctrlr);
1493 }
1494 
1495 static void
1496 test_nvme_ctrlr_init_en_0_rdy_1(void)
1497 {
1498 	DECLARE_AND_CONSTRUCT_CTRLR();
1499 
1500 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1501 
1502 	/*
1503 	 * Initial state: CC.EN = 0, CSTS.RDY = 1
1504 	 */
1505 	g_ut_nvme_regs.cc.bits.en = 0;
1506 	g_ut_nvme_regs.csts.bits.rdy = 1;
1507 
1508 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1509 	ctrlr.cdata.nn = 1;
1510 	ctrlr.page_size = 0x1000;
1511 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1512 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1513 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1514 	}
1515 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1516 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1517 
1518 	/*
1519 	 * Transition to CSTS.RDY = 0.
1520 	 */
1521 	g_ut_nvme_regs.csts.bits.rdy = 0;
1522 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1523 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1524 
1525 	/*
1526 	 * Start enabling the controller.
1527 	 */
1528 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1529 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1530 
1531 	/*
1532 	 * Transition to CC.EN = 1
1533 	 */
1534 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1535 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1536 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1537 
1538 	/*
1539 	 * Transition to CSTS.RDY = 1.
1540 	 */
1541 	g_ut_nvme_regs.csts.bits.rdy = 1;
1542 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1543 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1544 
1545 	/*
1546 	 * Transition to READY.
1547 	 */
1548 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1549 		nvme_ctrlr_process_init(&ctrlr);
1550 	}
1551 
1552 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1553 	nvme_ctrlr_destruct(&ctrlr);
1554 }
1555 
1556 static void
1557 setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
1558 {
1559 	uint32_t i;
1560 
1561 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
1562 
1563 	ctrlr->page_size = 0x1000;
1564 	ctrlr->opts.num_io_queues = num_io_queues;
1565 	ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
1566 	ctrlr->state = NVME_CTRLR_STATE_READY;
1567 	SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
1568 
1569 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1570 	for (i = 1; i <= num_io_queues; i++) {
1571 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1572 	}
1573 }
1574 
1575 static void
1576 cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
1577 {
1578 	nvme_ctrlr_destruct(ctrlr);
1579 }
1580 
1581 static void
1582 test_alloc_io_qpair_rr_1(void)
1583 {
1584 	struct spdk_nvme_io_qpair_opts opts;
1585 	struct spdk_nvme_ctrlr ctrlr = {};
1586 	struct spdk_nvme_qpair *q0;
1587 
1588 	setup_qpairs(&ctrlr, 1);
1589 
1590 	/*
1591 	 * Fake to simulate the controller with default round robin
1592 	 * arbitration mechanism.
1593 	 */
1594 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
1595 
1596 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1597 
1598 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1599 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1600 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1601 	/* Only 1 I/O qpair was allocated, so this should fail */
1602 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
1603 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1604 
1605 	/*
1606 	 * Now that the qpair has been returned to the free list,
1607 	 * we should be able to allocate it again.
1608 	 */
1609 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1610 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1611 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1612 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1613 
1614 	/* Only 0 qprio is acceptable for default round robin arbitration mechanism */
1615 	opts.qprio = 1;
1616 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1617 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1618 
1619 	opts.qprio = 2;
1620 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1621 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1622 
1623 	opts.qprio = 3;
1624 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1625 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1626 
1627 	/* Only 0 ~ 3 qprio is acceptable */
1628 	opts.qprio = 4;
1629 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1630 	opts.qprio = 0;
1631 
1632 	/* IO qpair can only be created when ctrlr is in READY state */
1633 	ctrlr.state = NVME_CTRLR_STATE_ENABLE;
1634 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1635 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1636 	ctrlr.state = NVME_CTRLR_STATE_READY;
1637 
1638 	cleanup_qpairs(&ctrlr);
1639 }
1640 
1641 static void
1642 test_alloc_io_qpair_wrr_1(void)
1643 {
1644 	struct spdk_nvme_io_qpair_opts opts;
1645 	struct spdk_nvme_ctrlr ctrlr = {};
1646 	struct spdk_nvme_qpair *q0, *q1;
1647 
1648 	setup_qpairs(&ctrlr, 2);
1649 
1650 	/*
1651 	 * Fake to simulate the controller with weighted round robin
1652 	 * arbitration mechanism.
1653 	 */
1654 	ctrlr.process_init_cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1655 
1656 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1657 
1658 	/*
1659 	 * Allocate 2 qpairs and free them
1660 	 */
1661 	opts.qprio = 0;
1662 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1663 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1664 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1665 
1666 	opts.qprio = 1;
1667 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1668 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1669 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1670 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1671 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1672 
1673 	/*
1674 	 * Allocate 2 qpairs and free them in the reverse order
1675 	 */
1676 	opts.qprio = 2;
1677 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1678 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1679 	SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
1680 
1681 	opts.qprio = 3;
1682 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1683 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1684 	SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
1685 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1686 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1687 
1688 	/* Only 0 ~ 3 qprio is acceptable */
1689 	opts.qprio = 4;
1690 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1691 
1692 	cleanup_qpairs(&ctrlr);
1693 }
1694 
1695 static void
1696 test_alloc_io_qpair_wrr_2(void)
1697 {
1698 	struct spdk_nvme_io_qpair_opts opts;
1699 	struct spdk_nvme_ctrlr ctrlr = {};
1700 	struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
1701 
1702 	setup_qpairs(&ctrlr, 4);
1703 
1704 	/*
1705 	 * Fake to simulate the controller with weighted round robin
1706 	 * arbitration mechanism.
1707 	 */
1708 	ctrlr.process_init_cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1709 
1710 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1711 
1712 	opts.qprio = 0;
1713 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1714 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1715 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1716 
1717 	opts.qprio = 1;
1718 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1719 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1720 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1721 
1722 	opts.qprio = 2;
1723 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1724 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1725 	SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
1726 
1727 	opts.qprio = 3;
1728 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1729 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1730 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1731 
1732 	/* Only 4 I/O qpairs was allocated, so this should fail */
1733 	opts.qprio = 0;
1734 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1735 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1736 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1737 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1738 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1739 
1740 	/*
1741 	 * Now that the qpair has been returned to the free list,
1742 	 * we should be able to allocate it again.
1743 	 *
1744 	 * Allocate 4 I/O qpairs and half of them with same qprio.
1745 	 */
1746 	opts.qprio = 1;
1747 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1748 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1749 	SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
1750 
1751 	opts.qprio = 1;
1752 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1753 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1754 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1755 
1756 	opts.qprio = 3;
1757 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1758 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1759 	SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
1760 
1761 	opts.qprio = 3;
1762 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1763 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1764 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1765 
1766 	/*
1767 	 * Free all I/O qpairs in reverse order
1768 	 */
1769 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1770 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1771 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1772 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1773 
1774 	cleanup_qpairs(&ctrlr);
1775 }
1776 
1777 bool g_connect_qpair_called = false;
1778 int g_connect_qpair_return_code = 0;
1779 int
1780 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1781 {
1782 	g_connect_qpair_called = true;
1783 	qpair->state = NVME_QPAIR_CONNECTED;
1784 	return g_connect_qpair_return_code;
1785 }
1786 
1787 static void
1788 test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
1789 {
1790 	struct spdk_nvme_ctrlr	ctrlr = {};
1791 	struct spdk_nvme_qpair	qpair = {};
1792 	int rc;
1793 
1794 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
1795 
1796 	/* Various states of controller disconnect. */
1797 	qpair.id = 1;
1798 	qpair.ctrlr = &ctrlr;
1799 	ctrlr.is_removed = 1;
1800 	ctrlr.is_failed = 0;
1801 	ctrlr.is_resetting = 0;
1802 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1803 	CU_ASSERT(rc == -ENODEV)
1804 
1805 	ctrlr.is_removed = 0;
1806 	ctrlr.is_failed = 1;
1807 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1808 	CU_ASSERT(rc == -ENXIO)
1809 
1810 	ctrlr.is_failed = 0;
1811 	ctrlr.is_resetting = 1;
1812 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1813 	CU_ASSERT(rc == -EAGAIN)
1814 
1815 	/* Confirm precedence for controller states: removed > resetting > failed */
1816 	ctrlr.is_removed = 1;
1817 	ctrlr.is_failed = 1;
1818 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1819 	CU_ASSERT(rc == -ENODEV)
1820 
1821 	ctrlr.is_removed = 0;
1822 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1823 	CU_ASSERT(rc == -EAGAIN)
1824 
1825 	ctrlr.is_resetting = 0;
1826 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1827 	CU_ASSERT(rc == -ENXIO)
1828 
1829 	/* qpair not failed. Make sure we don't call down to the transport */
1830 	ctrlr.is_failed = 0;
1831 	qpair.state = NVME_QPAIR_CONNECTED;
1832 	g_connect_qpair_called = false;
1833 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1834 	CU_ASSERT(g_connect_qpair_called == false);
1835 	CU_ASSERT(rc == 0)
1836 
1837 	/* transport qpair is failed. make sure we call down to the transport */
1838 	qpair.state = NVME_QPAIR_DISCONNECTED;
1839 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1840 	CU_ASSERT(g_connect_qpair_called == true);
1841 	CU_ASSERT(rc == 0)
1842 
1843 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
1844 }
1845 
1846 static void
1847 test_nvme_ctrlr_fail(void)
1848 {
1849 	struct spdk_nvme_ctrlr	ctrlr = {};
1850 
1851 	ctrlr.opts.num_io_queues = 0;
1852 	nvme_ctrlr_fail(&ctrlr, false);
1853 
1854 	CU_ASSERT(ctrlr.is_failed == true);
1855 }
1856 
1857 static void
1858 test_nvme_ctrlr_construct_intel_support_log_page_list(void)
1859 {
1860 	bool	res;
1861 	struct spdk_nvme_ctrlr				ctrlr = {};
1862 	struct spdk_nvme_intel_log_page_directory	payload = {};
1863 	struct spdk_pci_id				pci_id = {};
1864 
1865 	/* Get quirks for a device with all 0 vendor/device id */
1866 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1867 	CU_ASSERT(ctrlr.quirks == 0);
1868 
1869 	/* Set the vendor to Intel, but provide no device id */
1870 	pci_id.class_id = SPDK_PCI_CLASS_NVME;
1871 	ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1872 	payload.temperature_statistics_log_len = 1;
1873 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1874 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1875 
1876 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1877 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1878 	CU_ASSERT(res == true);
1879 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1880 	CU_ASSERT(res == true);
1881 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1882 	CU_ASSERT(res == false);
1883 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1884 	CU_ASSERT(res == false);
1885 
1886 	/* set valid vendor id, device id and sub device id */
1887 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1888 	payload.temperature_statistics_log_len = 0;
1889 	pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1890 	pci_id.device_id = 0x0953;
1891 	pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
1892 	pci_id.subdevice_id = 0x3702;
1893 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1894 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1895 
1896 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1897 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1898 	CU_ASSERT(res == true);
1899 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1900 	CU_ASSERT(res == false);
1901 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1902 	CU_ASSERT(res == true);
1903 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1904 	CU_ASSERT(res == false);
1905 }
1906 
1907 static void
1908 test_nvme_ctrlr_set_supported_features(void)
1909 {
1910 	bool	res;
1911 	struct spdk_nvme_ctrlr			ctrlr = {};
1912 
1913 	/* set a invalid vendor id */
1914 	ctrlr.cdata.vid = 0xFFFF;
1915 	nvme_ctrlr_set_supported_features(&ctrlr);
1916 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1917 	CU_ASSERT(res == true);
1918 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1919 	CU_ASSERT(res == false);
1920 
1921 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1922 	nvme_ctrlr_set_supported_features(&ctrlr);
1923 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1924 	CU_ASSERT(res == true);
1925 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1926 	CU_ASSERT(res == true);
1927 }
1928 
1929 static void
1930 test_nvme_ctrlr_set_host_feature(void)
1931 {
1932 	DECLARE_AND_CONSTRUCT_CTRLR();
1933 
1934 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1935 
1936 	ctrlr.cdata.ctratt.bits.elbas = 0;
1937 	ctrlr.state = NVME_CTRLR_STATE_SET_HOST_FEATURE;
1938 
1939 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1940 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_DB_BUF_CFG);
1941 
1942 	ctrlr.cdata.ctratt.bits.elbas = 1;
1943 	ctrlr.state = NVME_CTRLR_STATE_SET_HOST_FEATURE;
1944 
1945 	while (ctrlr.state != NVME_CTRLR_STATE_SET_DB_BUF_CFG) {
1946 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1947 	}
1948 
1949 	CU_ASSERT(ctrlr.tmp_ptr == NULL);
1950 	CU_ASSERT(ctrlr.feature_supported[SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT] == true);
1951 
1952 	nvme_ctrlr_destruct(&ctrlr);
1953 }
1954 
1955 static void
1956 test_ctrlr_get_default_ctrlr_opts(void)
1957 {
1958 	struct spdk_nvme_ctrlr_opts opts = {};
1959 
1960 	CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id, UT_HOSTID) == 0);
1961 
1962 	memset(&opts, 0, sizeof(opts));
1963 
1964 	/* set a smaller opts_size */
1965 	CU_ASSERT(sizeof(opts) > 8);
1966 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
1967 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1968 	CU_ASSERT_FALSE(opts.use_cmb_sqs);
1969 	/* check below fields are not initialized by default value */
1970 	CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
1971 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
1972 	CU_ASSERT_EQUAL(opts.io_queue_size, 0);
1973 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1974 	for (int i = 0; i < 8; i++) {
1975 		CU_ASSERT(opts.host_id[i] == 0);
1976 	}
1977 	for (int i = 0; i < 16; i++) {
1978 		CU_ASSERT(opts.extended_host_id[i] == 0);
1979 	}
1980 	CU_ASSERT(strlen(opts.hostnqn) == 0);
1981 	CU_ASSERT(strlen(opts.src_addr) == 0);
1982 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1983 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
1984 
1985 	/* set a consistent opts_size */
1986 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
1987 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1988 	CU_ASSERT_FALSE(opts.use_cmb_sqs);
1989 	CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
1990 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
1991 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1992 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1993 	for (int i = 0; i < 8; i++) {
1994 		CU_ASSERT(opts.host_id[i] == 0);
1995 	}
1996 	CU_ASSERT_STRING_EQUAL(opts.hostnqn,
1997 			       "nqn.2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
1998 	CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
1999 			 sizeof(opts.extended_host_id)) == 0);
2000 	CU_ASSERT(strlen(opts.src_addr) == 0);
2001 	CU_ASSERT(strlen(opts.src_svcid) == 0);
2002 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
2003 }
2004 
2005 static void
2006 test_ctrlr_get_default_io_qpair_opts(void)
2007 {
2008 	struct spdk_nvme_ctrlr ctrlr = {};
2009 	struct spdk_nvme_io_qpair_opts opts = {};
2010 
2011 	memset(&opts, 0, sizeof(opts));
2012 
2013 	/* set a smaller opts_size */
2014 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
2015 	CU_ASSERT(sizeof(opts) > 8);
2016 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
2017 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
2018 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
2019 	/* check below field is not initialized by default value */
2020 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
2021 
2022 	/* set a consistent opts_size */
2023 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
2024 	ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
2025 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
2026 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
2027 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
2028 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
2029 }
2030 
2031 #if 0 /* TODO: move to PCIe-specific unit test */
2032 static void
2033 test_nvme_ctrlr_alloc_cmb(void)
2034 {
2035 	int			rc;
2036 	uint64_t		offset;
2037 	struct spdk_nvme_ctrlr	ctrlr = {};
2038 
2039 	ctrlr.cmb_size = 0x1000000;
2040 	ctrlr.cmb_current_offset = 0x100;
2041 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
2042 	CU_ASSERT(rc == 0);
2043 	CU_ASSERT(offset == 0x1000);
2044 	CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
2045 
2046 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
2047 	CU_ASSERT(rc == 0);
2048 	CU_ASSERT(offset == 0x2000);
2049 	CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
2050 
2051 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
2052 	CU_ASSERT(rc == 0);
2053 	CU_ASSERT(offset == 0x100000);
2054 	CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
2055 
2056 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
2057 	CU_ASSERT(rc == -1);
2058 }
2059 #endif
2060 
2061 static void
2062 test_spdk_nvme_ctrlr_update_firmware(void)
2063 {
2064 	struct spdk_nvme_ctrlr ctrlr = {};
2065 	void *payload = NULL;
2066 	int point_payload = 1;
2067 	int slot = 0;
2068 	int ret = 0;
2069 	struct spdk_nvme_status status;
2070 	enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
2071 
2072 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
2073 
2074 	/* Set invalid size check function return value */
2075 	set_size = 5;
2076 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2077 	CU_ASSERT(ret == -1);
2078 
2079 	/* When payload is NULL but set_size < min_page_size */
2080 	set_size = 4;
2081 	ctrlr.min_page_size = 5;
2082 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2083 	CU_ASSERT(ret == -1);
2084 
2085 	/* When payload not NULL but min_page_size is 0 */
2086 	set_size = 4;
2087 	ctrlr.min_page_size = 0;
2088 	payload = &point_payload;
2089 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2090 	CU_ASSERT(ret == -1);
2091 
2092 	/* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
2093 	set_status_cpl = 1;
2094 	set_size = 4;
2095 	ctrlr.min_page_size = 5;
2096 	payload = &point_payload;
2097 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2098 	CU_ASSERT(ret == -ENXIO);
2099 
2100 	/* Check firmware image download and set status.cpl value is 0 */
2101 	set_status_cpl = 0;
2102 	set_size = 4;
2103 	ctrlr.min_page_size = 5;
2104 	payload = &point_payload;
2105 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2106 	CU_ASSERT(ret == -1);
2107 
2108 	/* Check firmware commit */
2109 	ctrlr.is_resetting = false;
2110 	set_status_cpl = 0;
2111 	slot = 1;
2112 	set_size = 4;
2113 	ctrlr.min_page_size = 5;
2114 	payload = &point_payload;
2115 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2116 	CU_ASSERT(ret == -ENXIO);
2117 
2118 	/* Set size check firmware download and firmware commit */
2119 	ctrlr.is_resetting = true;
2120 	set_status_cpl = 0;
2121 	slot = 1;
2122 	set_size = 4;
2123 	ctrlr.min_page_size = 5;
2124 	payload = &point_payload;
2125 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2126 	CU_ASSERT(ret == 0);
2127 
2128 	/* nvme_wait_for_completion returns an error */
2129 	g_wait_for_completion_return_val = -1;
2130 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2131 	CU_ASSERT(ret == -ENXIO);
2132 	CU_ASSERT(g_failed_status != NULL);
2133 	CU_ASSERT(g_failed_status->timed_out == true);
2134 	/* status should be freed by callback, which is not triggered in test env.
2135 	   Store status to global variable and free it manually.
2136 	   If spdk_nvme_ctrlr_update_firmware changes its behaviour and frees the status
2137 	   itself, we'll get a double free here.. */
2138 	free(g_failed_status);
2139 	g_failed_status = NULL;
2140 	g_wait_for_completion_return_val = 0;
2141 
2142 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
2143 	set_status_cpl = 0;
2144 }
2145 
2146 int
2147 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
2148 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
2149 {
2150 	fake_cpl_sc(cb_fn, cb_arg);
2151 	return 0;
2152 }
2153 
2154 static void
2155 test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
2156 {
2157 	struct spdk_nvme_ctrlr ctrlr = {};
2158 	int ret = -1;
2159 
2160 	ctrlr.cdata.oacs.doorbell_buffer_config = 1;
2161 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2162 	ctrlr.page_size = 0x1000;
2163 	MOCK_CLEAR(spdk_malloc);
2164 	MOCK_CLEAR(spdk_zmalloc);
2165 	ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
2166 	CU_ASSERT(ret == 0);
2167 	nvme_ctrlr_free_doorbell_buffer(&ctrlr);
2168 }
2169 
2170 static void
2171 test_nvme_ctrlr_test_active_ns(void)
2172 {
2173 	uint32_t		nsid, minor;
2174 	size_t			ns_id_count;
2175 	struct spdk_nvme_ctrlr	ctrlr = {};
2176 	uint32_t		active_ns_list[1531];
2177 
2178 	for (nsid = 1; nsid <= 1531; nsid++) {
2179 		active_ns_list[nsid - 1] = nsid;
2180 	}
2181 
2182 	g_active_ns_list = active_ns_list;
2183 
2184 	ctrlr.page_size = 0x1000;
2185 
2186 	for (minor = 0; minor <= 2; minor++) {
2187 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2188 		ctrlr.state = NVME_CTRLR_STATE_READY;
2189 
2190 		ctrlr.vs.bits.mjr = 1;
2191 		ctrlr.vs.bits.mnr = minor;
2192 		ctrlr.vs.bits.ter = 0;
2193 		ctrlr.cdata.nn = 1531;
2194 
2195 		RB_INIT(&ctrlr.ns);
2196 
2197 		g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2198 		nvme_ctrlr_identify_active_ns(&ctrlr);
2199 
2200 		for (nsid = 1; nsid <= ctrlr.cdata.nn; nsid++) {
2201 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
2202 		}
2203 
2204 		for (; nsid <= 1559; nsid++) {
2205 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
2206 		}
2207 
2208 		g_active_ns_list_length = 0;
2209 		if (minor <= 1) {
2210 			ctrlr.cdata.nn = 0;
2211 		}
2212 		nvme_ctrlr_identify_active_ns(&ctrlr);
2213 		CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
2214 
2215 		g_active_ns_list_length = 1;
2216 		if (minor <= 1) {
2217 			ctrlr.cdata.nn = 1;
2218 		}
2219 		nvme_ctrlr_identify_active_ns(&ctrlr);
2220 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
2221 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
2222 		nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
2223 		CU_ASSERT(nsid == 1);
2224 
2225 		if (minor >= 2) {
2226 			/* For NVMe 1.2 and newer, the namespace list can have "holes" where
2227 			 * some namespaces are not active. Test this. */
2228 			g_active_ns_list_length = 2;
2229 			g_active_ns_list[1] = 3;
2230 			nvme_ctrlr_identify_active_ns(&ctrlr);
2231 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
2232 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
2233 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
2234 			nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
2235 			CU_ASSERT(nsid == 3);
2236 			nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
2237 			CU_ASSERT(nsid == 0);
2238 
2239 			/* Reset the active namespace list array */
2240 			g_active_ns_list[1] = 2;
2241 		}
2242 
2243 		g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2244 		if (minor <= 1) {
2245 			ctrlr.cdata.nn = 1531;
2246 		}
2247 		nvme_ctrlr_identify_active_ns(&ctrlr);
2248 
2249 		ns_id_count = 0;
2250 		for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
2251 		     nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
2252 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
2253 			ns_id_count++;
2254 		}
2255 		CU_ASSERT(ns_id_count == ctrlr.cdata.nn);
2256 
2257 		nvme_ctrlr_destruct(&ctrlr);
2258 	}
2259 
2260 	g_active_ns_list = NULL;
2261 	g_active_ns_list_length = 0;
2262 }
2263 
2264 static void
2265 test_nvme_ctrlr_test_active_ns_error_case(void)
2266 {
2267 	int rc;
2268 	struct spdk_nvme_ctrlr	ctrlr = {.state = NVME_CTRLR_STATE_READY};
2269 
2270 	ctrlr.page_size = 0x1000;
2271 	ctrlr.vs.bits.mjr = 1;
2272 	ctrlr.vs.bits.mnr = 2;
2273 	ctrlr.vs.bits.ter = 0;
2274 	ctrlr.cdata.nn = 2;
2275 
2276 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2277 	rc = nvme_ctrlr_identify_active_ns(&ctrlr);
2278 	CU_ASSERT(rc == -ENXIO);
2279 	set_status_code = SPDK_NVME_SC_SUCCESS;
2280 }
2281 
2282 static void
2283 test_nvme_ctrlr_init_delay(void)
2284 {
2285 	DECLARE_AND_CONSTRUCT_CTRLR();
2286 
2287 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
2288 
2289 	/*
2290 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
2291 	 * init() should set CC.EN = 1.
2292 	 */
2293 	g_ut_nvme_regs.cc.bits.en = 0;
2294 	g_ut_nvme_regs.csts.bits.rdy = 0;
2295 
2296 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2297 	/* Test that the initialization delay works correctly.  We only
2298 	 * do the initialization delay on SSDs that require it, so
2299 	 * set that quirk here.
2300 	 */
2301 	ctrlr.quirks = NVME_QUIRK_DELAY_BEFORE_INIT;
2302 	ctrlr.cdata.nn = 1;
2303 	ctrlr.page_size = 0x1000;
2304 	ctrlr.state = NVME_CTRLR_STATE_INIT_DELAY;
2305 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2306 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
2307 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
2308 
2309 	/* delay 1s, just return as sleep time isn't enough */
2310 	spdk_delay_us(1 * spdk_get_ticks_hz());
2311 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2312 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
2313 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
2314 
2315 	/* sleep timeout, start to initialize */
2316 	spdk_delay_us(2 * spdk_get_ticks_hz());
2317 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
2318 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2319 	}
2320 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2321 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
2322 
2323 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2324 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
2325 
2326 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2327 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
2328 
2329 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2330 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
2331 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
2332 
2333 	/*
2334 	 * Transition to CSTS.RDY = 1.
2335 	 */
2336 	g_ut_nvme_regs.csts.bits.rdy = 1;
2337 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2338 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
2339 
2340 	/*
2341 	 * Transition to READY.
2342 	 */
2343 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2344 		nvme_ctrlr_process_init(&ctrlr);
2345 	}
2346 
2347 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
2348 	nvme_ctrlr_destruct(&ctrlr);
2349 }
2350 
2351 static void
2352 test_spdk_nvme_ctrlr_set_trid(void)
2353 {
2354 	struct spdk_nvme_ctrlr ctrlr = {{0}};
2355 	struct spdk_nvme_transport_id new_trid = {{0}};
2356 
2357 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
2358 
2359 	ctrlr.is_failed = false;
2360 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2361 	snprintf(ctrlr.trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
2362 	snprintf(ctrlr.trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
2363 	snprintf(ctrlr.trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
2364 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EPERM);
2365 
2366 	ctrlr.is_failed = true;
2367 	new_trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2368 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
2369 	CU_ASSERT(ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA);
2370 
2371 	new_trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2372 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode2");
2373 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
2374 	CU_ASSERT(strncmp(ctrlr.trid.subnqn, "nqn.2016-06.io.spdk:cnode1", SPDK_NVMF_NQN_MAX_LEN) == 0);
2375 
2376 
2377 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
2378 	snprintf(new_trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
2379 	snprintf(new_trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4421");
2380 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == 0);
2381 	CU_ASSERT(strncmp(ctrlr.trid.traddr, "192.168.100.9", SPDK_NVMF_TRADDR_MAX_LEN) == 0);
2382 	CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
2383 
2384 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
2385 }
2386 
2387 static void
2388 test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
2389 {
2390 	struct spdk_nvme_ctrlr_data cdata = {};
2391 	DECLARE_AND_CONSTRUCT_CTRLR();
2392 	/* equivalent of 4096 bytes */
2393 	cdata.nvmf_specific.ioccsz = 260;
2394 	cdata.nvmf_specific.icdoff = 1;
2395 	g_cdata = &cdata;
2396 
2397 	/* Check PCI trtype, */
2398 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2399 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2400 
2401 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2402 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2403 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2404 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2405 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2406 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2407 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2408 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2409 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2410 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2411 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2412 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2413 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2414 
2415 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2416 	CU_ASSERT(ctrlr.icdoff == 0);
2417 
2418 	nvme_ctrlr_destruct(&ctrlr);
2419 
2420 	/* Check RDMA trtype, */
2421 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2422 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2423 
2424 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2425 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2426 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2427 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2428 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2429 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2430 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2431 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2432 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2433 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2434 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2435 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2436 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2437 
2438 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2439 	CU_ASSERT(ctrlr.icdoff == 1);
2440 	ctrlr.ioccsz_bytes = 0;
2441 	ctrlr.icdoff = 0;
2442 
2443 	nvme_ctrlr_destruct(&ctrlr);
2444 
2445 	/* Check TCP trtype, */
2446 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2447 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2448 
2449 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2450 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2451 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2452 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2453 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2454 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2455 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2456 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2457 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2458 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2459 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2460 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2461 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2462 
2463 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2464 	CU_ASSERT(ctrlr.icdoff == 1);
2465 	ctrlr.ioccsz_bytes = 0;
2466 	ctrlr.icdoff = 0;
2467 
2468 	nvme_ctrlr_destruct(&ctrlr);
2469 
2470 	/* Check FC trtype, */
2471 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2472 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
2473 
2474 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2475 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2476 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2477 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2478 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2479 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2480 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2481 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2482 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2483 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2484 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2485 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2486 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2487 
2488 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2489 	CU_ASSERT(ctrlr.icdoff == 1);
2490 	ctrlr.ioccsz_bytes = 0;
2491 	ctrlr.icdoff = 0;
2492 
2493 	nvme_ctrlr_destruct(&ctrlr);
2494 
2495 	/* Check CUSTOM trtype, */
2496 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2497 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
2498 
2499 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2500 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2501 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2502 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2503 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2504 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2505 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2506 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2507 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2508 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2509 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2510 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2511 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2512 
2513 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2514 	CU_ASSERT(ctrlr.icdoff == 0);
2515 
2516 	nvme_ctrlr_destruct(&ctrlr);
2517 
2518 	/* Check CUSTOM_FABRICS trtype, */
2519 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2520 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM_FABRICS;
2521 
2522 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2523 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2524 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2525 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2526 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2527 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2528 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2529 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2530 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2531 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2532 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2533 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2534 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2535 
2536 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2537 	CU_ASSERT(ctrlr.icdoff == 1);
2538 	ctrlr.ioccsz_bytes = 0;
2539 	ctrlr.icdoff = 0;
2540 
2541 	nvme_ctrlr_destruct(&ctrlr);
2542 
2543 	g_cdata = NULL;
2544 }
2545 
2546 static void
2547 test_nvme_ctrlr_init_set_num_queues(void)
2548 {
2549 	DECLARE_AND_CONSTRUCT_CTRLR();
2550 
2551 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2552 
2553 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2554 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2555 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2556 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2557 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2558 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2559 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2560 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2561 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2562 
2563 	ctrlr.opts.num_io_queues = 64;
2564 	/* Num queues is zero-based. So, use 31 to get 32 queues */
2565 	fake_cpl.cdw0 = 31 + (31 << 16);
2566 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_ACTIVE_NS */
2567 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2568 	CU_ASSERT(ctrlr.opts.num_io_queues == 32);
2569 	fake_cpl.cdw0 = 0;
2570 
2571 	nvme_ctrlr_destruct(&ctrlr);
2572 }
2573 
2574 static void
2575 test_nvme_ctrlr_init_set_keep_alive_timeout(void)
2576 {
2577 	DECLARE_AND_CONSTRUCT_CTRLR();
2578 
2579 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2580 
2581 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2582 	ctrlr.cdata.kas = 1;
2583 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2584 	fake_cpl.cdw0 = 120000;
2585 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_IOCS_SPECIFIC */
2586 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2587 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 120000);
2588 	fake_cpl.cdw0 = 0;
2589 
2590 	/* Target does not support Get Feature "Keep Alive Timer" */
2591 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2592 	ctrlr.cdata.kas = 1;
2593 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2594 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2595 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_IOCS_SPECIFIC */
2596 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2597 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 60000);
2598 	set_status_code = SPDK_NVME_SC_SUCCESS;
2599 
2600 	/* Target fails Get Feature "Keep Alive Timer" for another reason */
2601 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2602 	ctrlr.cdata.kas = 1;
2603 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2604 	set_status_code = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2605 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> ERROR */
2606 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
2607 	set_status_code = SPDK_NVME_SC_SUCCESS;
2608 
2609 	nvme_ctrlr_destruct(&ctrlr);
2610 }
2611 
2612 static void
2613 test_alloc_io_qpair_fail(void)
2614 {
2615 	struct spdk_nvme_ctrlr ctrlr = {};
2616 	struct spdk_nvme_qpair *q0;
2617 
2618 	setup_qpairs(&ctrlr, 1);
2619 
2620 	/* Modify the connect_qpair return code to inject a failure */
2621 	g_connect_qpair_return_code = 1;
2622 
2623 	/* Attempt to allocate a qpair, this should fail */
2624 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
2625 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
2626 
2627 	/* Verify that the qpair is removed from the lists */
2628 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.active_io_qpairs));
2629 
2630 	g_connect_qpair_return_code = 0;
2631 	cleanup_qpairs(&ctrlr);
2632 }
2633 
2634 static void
2635 test_nvme_ctrlr_add_remove_process(void)
2636 {
2637 	struct spdk_nvme_ctrlr ctrlr = {};
2638 	void *devhandle = (void *)0xDEADBEEF;
2639 	struct spdk_nvme_ctrlr_process *proc = NULL;
2640 	int rc;
2641 
2642 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2643 	TAILQ_INIT(&ctrlr.active_procs);
2644 
2645 	rc = nvme_ctrlr_add_process(&ctrlr, devhandle);
2646 	CU_ASSERT(rc == 0);
2647 	proc = TAILQ_FIRST(&ctrlr.active_procs);
2648 	SPDK_CU_ASSERT_FATAL(proc != NULL);
2649 	CU_ASSERT(proc->is_primary == true);
2650 	CU_ASSERT(proc->pid == getpid());
2651 	CU_ASSERT(proc->devhandle == (void *)0xDEADBEEF);
2652 	CU_ASSERT(proc->ref == 0);
2653 
2654 	nvme_ctrlr_remove_process(&ctrlr, proc);
2655 	CU_ASSERT(TAILQ_EMPTY(&ctrlr.active_procs));
2656 }
2657 
2658 static void
2659 test_nvme_ctrlr_set_arbitration_feature(void)
2660 {
2661 	struct spdk_nvme_ctrlr ctrlr = {};
2662 
2663 	ctrlr.opts.arbitration_burst = 6;
2664 	ctrlr.flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
2665 	ctrlr.opts.low_priority_weight = 1;
2666 	ctrlr.opts.medium_priority_weight = 2;
2667 	ctrlr.opts.high_priority_weight = 3;
2668 	/* g_ut_cdw11 used to record value command feature set. */
2669 	g_ut_cdw11 = 0;
2670 
2671 	/* arbitration_burst count available. */
2672 	nvme_ctrlr_set_arbitration_feature(&ctrlr);
2673 	CU_ASSERT((uint8_t)g_ut_cdw11 == 6);
2674 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 8) == 1);
2675 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 16) == 2);
2676 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 24) == 3);
2677 
2678 	/* arbitration_burst unavailable. */
2679 	g_ut_cdw11 = 0;
2680 	ctrlr.opts.arbitration_burst = 8;
2681 
2682 	nvme_ctrlr_set_arbitration_feature(&ctrlr);
2683 	CU_ASSERT(g_ut_cdw11 == 0);
2684 }
2685 
2686 static void
2687 test_nvme_ctrlr_set_state(void)
2688 {
2689 	struct spdk_nvme_ctrlr ctrlr = {};
2690 	MOCK_SET(spdk_get_ticks, 0);
2691 
2692 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 1000);
2693 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2694 	CU_ASSERT(ctrlr.state_timeout_tsc == 1000000);
2695 
2696 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 0);
2697 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2698 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
2699 
2700 	/* Time out ticks causes integer overflow. */
2701 	MOCK_SET(spdk_get_ticks, UINT64_MAX);
2702 
2703 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 1000);
2704 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2705 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
2706 	MOCK_CLEAR(spdk_get_ticks);
2707 }
2708 
2709 static void
2710 test_nvme_ctrlr_active_ns_list_v0(void)
2711 {
2712 	DECLARE_AND_CONSTRUCT_CTRLR();
2713 
2714 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2715 
2716 	ctrlr.vs.bits.mjr = 1;
2717 	ctrlr.vs.bits.mnr = 0;
2718 	ctrlr.vs.bits.ter = 0;
2719 	ctrlr.cdata.nn = 1024;
2720 
2721 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2722 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2723 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2724 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2725 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2726 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2727 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2728 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 1024);
2729 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2730 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2731 
2732 	nvme_ctrlr_destruct(&ctrlr);
2733 }
2734 
2735 static void
2736 test_nvme_ctrlr_active_ns_list_v2(void)
2737 {
2738 	uint32_t i;
2739 	uint32_t active_ns_list[1024];
2740 	DECLARE_AND_CONSTRUCT_CTRLR();
2741 
2742 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2743 
2744 	ctrlr.vs.bits.mjr = 1;
2745 	ctrlr.vs.bits.mnr = 2;
2746 	ctrlr.vs.bits.ter = 0;
2747 	ctrlr.cdata.nn = 4096;
2748 
2749 	g_active_ns_list = active_ns_list;
2750 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2751 
2752 	/* No active namespaces */
2753 	memset(active_ns_list, 0, sizeof(active_ns_list));
2754 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2755 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2756 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2757 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2758 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2759 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2760 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
2761 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2762 
2763 	nvme_ctrlr_destruct(&ctrlr);
2764 
2765 	/* 1024 active namespaces - one full page */
2766 	memset(active_ns_list, 0, sizeof(active_ns_list));
2767 	for (i = 0; i < 1024; ++i) {
2768 		active_ns_list[i] = i + 1;
2769 	}
2770 
2771 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2772 
2773 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2774 	g_active_ns_list = active_ns_list;
2775 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2776 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2777 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2778 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2779 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2780 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2781 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2782 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 1024);
2783 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2784 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2785 
2786 	nvme_ctrlr_destruct(&ctrlr);
2787 
2788 	/* 1023 active namespaces - full page minus one	 */
2789 	memset(active_ns_list, 0, sizeof(active_ns_list));
2790 	for (i = 0; i < 1023; ++i) {
2791 		active_ns_list[i] = i + 1;
2792 	}
2793 
2794 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2795 
2796 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2797 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2798 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2799 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2800 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1023));
2801 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2802 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2803 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2804 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 0);
2805 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2806 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2807 
2808 	nvme_ctrlr_destruct(&ctrlr);
2809 
2810 	g_active_ns_list = NULL;
2811 	g_active_ns_list_length = 0;
2812 }
2813 
2814 static void
2815 test_nvme_ctrlr_ns_mgmt(void)
2816 {
2817 	DECLARE_AND_CONSTRUCT_CTRLR();
2818 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2819 	uint32_t active_ns_list2[] = { 1, 2, 3, 100, 1024 };
2820 	struct spdk_nvme_ns_data nsdata = {};
2821 	struct spdk_nvme_ctrlr_list ctrlr_list = {};
2822 	uint32_t nsid;
2823 
2824 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2825 
2826 	ctrlr.vs.bits.mjr = 1;
2827 	ctrlr.vs.bits.mnr = 2;
2828 	ctrlr.vs.bits.ter = 0;
2829 	ctrlr.cdata.nn = 4096;
2830 
2831 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2832 	g_active_ns_list = active_ns_list;
2833 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2834 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2835 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2836 	}
2837 
2838 	fake_cpl.cdw0 = 3;
2839 	nsid = spdk_nvme_ctrlr_create_ns(&ctrlr, &nsdata);
2840 	fake_cpl.cdw0 = 0;
2841 	CU_ASSERT(nsid == 3);
2842 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2843 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2844 
2845 	g_active_ns_list = active_ns_list2;
2846 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
2847 	CU_ASSERT(spdk_nvme_ctrlr_attach_ns(&ctrlr, 3, &ctrlr_list) == 0);
2848 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2849 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2850 
2851 	g_active_ns_list = active_ns_list;
2852 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2853 	CU_ASSERT(spdk_nvme_ctrlr_detach_ns(&ctrlr, 3, &ctrlr_list) == 0);
2854 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2855 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2856 
2857 	CU_ASSERT(spdk_nvme_ctrlr_delete_ns(&ctrlr, 3) == 0);
2858 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2859 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2860 	g_active_ns_list = NULL;
2861 	g_active_ns_list_length = 0;
2862 
2863 	nvme_ctrlr_destruct(&ctrlr);
2864 }
2865 
2866 static void
2867 check_en_set_rdy(void)
2868 {
2869 	if (g_ut_nvme_regs.cc.bits.en == 1) {
2870 		g_ut_nvme_regs.csts.bits.rdy = 1;
2871 	}
2872 }
2873 
2874 static void
2875 test_nvme_ctrlr_reset(void)
2876 {
2877 	DECLARE_AND_CONSTRUCT_CTRLR();
2878 	struct spdk_nvme_ctrlr_data cdata = { .nn = 4096 };
2879 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2880 	uint32_t active_ns_list2[] = { 1, 100, 1024 };
2881 
2882 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2883 
2884 	g_ut_nvme_regs.vs.bits.mjr = 1;
2885 	g_ut_nvme_regs.vs.bits.mnr = 2;
2886 	g_ut_nvme_regs.vs.bits.ter = 0;
2887 	nvme_ctrlr_get_vs(&ctrlr, &ctrlr.vs);
2888 	ctrlr.cdata.nn = 2048;
2889 
2890 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2891 	g_active_ns_list = active_ns_list;
2892 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2893 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2894 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2895 	}
2896 	CU_ASSERT(spdk_nvme_ctrlr_get_num_ns(&ctrlr) == 2048);
2897 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 2) != NULL);
2898 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2));
2899 
2900 	/* Reset controller with changed number of namespaces */
2901 	g_cdata = &cdata;
2902 	g_active_ns_list = active_ns_list2;
2903 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
2904 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);
2905 	g_ut_nvme_regs.cc.raw = 0;
2906 	g_ut_nvme_regs.csts.raw = 0;
2907 	g_set_reg_cb = check_en_set_rdy;
2908 	g_wait_for_completion_return_val = -ENXIO;
2909 	CU_ASSERT(spdk_nvme_ctrlr_reset(&ctrlr) == 0);
2910 	g_set_reg_cb = NULL;
2911 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
2912 	g_cdata = NULL;
2913 	g_active_ns_list = NULL;
2914 	g_active_ns_list_length = 0;
2915 
2916 	CU_ASSERT(spdk_nvme_ctrlr_get_num_ns(&ctrlr) == 4096);
2917 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 2) != NULL);
2918 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2));
2919 
2920 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
2921 	nvme_ctrlr_destruct(&ctrlr);
2922 
2923 	g_wait_for_completion_return_val = 0;
2924 }
2925 
2926 static uint32_t g_aer_cb_counter;
2927 
2928 static void
2929 aer_cb(void *aer_cb_arg, const struct spdk_nvme_cpl *cpl)
2930 {
2931 	g_aer_cb_counter++;
2932 }
2933 
2934 static void
2935 test_nvme_ctrlr_aer_callback(void)
2936 {
2937 	DECLARE_AND_CONSTRUCT_CTRLR();
2938 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2939 	union spdk_nvme_async_event_completion	aer_event = {
2940 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
2941 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
2942 	};
2943 	struct spdk_nvme_cpl aer_cpl = {
2944 		.status.sct = SPDK_NVME_SCT_GENERIC,
2945 		.status.sc = SPDK_NVME_SC_SUCCESS,
2946 		.cdw0 = aer_event.raw
2947 	};
2948 
2949 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2950 
2951 	ctrlr.vs.bits.mjr = 1;
2952 	ctrlr.vs.bits.mnr = 2;
2953 	ctrlr.vs.bits.ter = 0;
2954 	ctrlr.cdata.nn = 4096;
2955 
2956 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
2957 	g_active_ns_list = active_ns_list;
2958 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2959 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2960 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2961 	}
2962 
2963 	CU_ASSERT(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
2964 	spdk_nvme_ctrlr_register_aer_callback(&ctrlr, aer_cb, NULL);
2965 
2966 	/* Async event */
2967 	g_aer_cb_counter = 0;
2968 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
2969 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
2970 	CU_ASSERT(g_aer_cb_counter == 1);
2971 	g_active_ns_list = NULL;
2972 	g_active_ns_list_length = 0;
2973 
2974 	nvme_ctrlr_free_processes(&ctrlr);
2975 	nvme_ctrlr_destruct(&ctrlr);
2976 }
2977 
2978 static void
2979 test_nvme_ctrlr_ns_attr_changed(void)
2980 {
2981 	DECLARE_AND_CONSTRUCT_CTRLR();
2982 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2983 	uint32_t active_ns_list2[] = { 1, 2, 1024 };
2984 	uint32_t active_ns_list3[] = { 1, 2, 101, 1024 };
2985 	union spdk_nvme_async_event_completion	aer_event = {
2986 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
2987 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
2988 	};
2989 	struct spdk_nvme_cpl aer_cpl = {
2990 		.status.sct = SPDK_NVME_SCT_GENERIC,
2991 		.status.sc = SPDK_NVME_SC_SUCCESS,
2992 		.cdw0 = aer_event.raw
2993 	};
2994 
2995 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2996 
2997 	ctrlr.vs.bits.mjr = 1;
2998 	ctrlr.vs.bits.mnr = 3;
2999 	ctrlr.vs.bits.ter = 0;
3000 	ctrlr.cap.bits.css |= SPDK_NVME_CAP_CSS_IOCS;
3001 	ctrlr.cdata.nn = 4096;
3002 
3003 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
3004 	g_active_ns_list = active_ns_list;
3005 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
3006 
3007 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
3008 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3009 	}
3010 
3011 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 100));
3012 
3013 	CU_ASSERT(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
3014 	spdk_nvme_ctrlr_register_aer_callback(&ctrlr, aer_cb, NULL);
3015 
3016 	/* Remove NS 100 */
3017 	g_aer_cb_counter = 0;
3018 	g_active_ns_list = active_ns_list2;
3019 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
3020 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3021 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3022 	CU_ASSERT(g_aer_cb_counter == 1);
3023 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 100));
3024 
3025 	/* Add NS 101 */
3026 	g_active_ns_list = active_ns_list3;
3027 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list3);
3028 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3029 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3030 	CU_ASSERT(g_aer_cb_counter == 2);
3031 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 101));
3032 
3033 	g_active_ns_list = NULL;
3034 	g_active_ns_list_length = 0;
3035 	nvme_ctrlr_free_processes(&ctrlr);
3036 	nvme_ctrlr_destruct(&ctrlr);
3037 }
3038 
3039 static void
3040 test_nvme_ctrlr_identify_namespaces_iocs_specific_next(void)
3041 {
3042 	struct spdk_nvme_ctrlr ctrlr = {};
3043 	uint32_t prev_nsid;
3044 	struct spdk_nvme_ns ns[5] = {};
3045 	struct spdk_nvme_ctrlr ns_ctrlr[5] = {};
3046 	int rc = 0;
3047 	int i;
3048 
3049 	RB_INIT(&ctrlr.ns);
3050 	for (i = 0; i < 5; i++) {
3051 		ns[i].id = i + 1;
3052 		ns[i].active = true;
3053 	}
3054 
3055 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
3056 
3057 	ctrlr.cdata.nn = 5;
3058 	/* case 1: No first/next active NS, move on to the next state, expect: pass */
3059 	prev_nsid = 0;
3060 	ctrlr.active_ns_count = 0;
3061 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3062 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3063 	CU_ASSERT(rc == 0);
3064 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES);
3065 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3066 
3067 	/* case 2: move on to the next active NS, and no namespace with (supported) iocs specific data found , expect: pass */
3068 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3069 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3070 	prev_nsid = 1;
3071 	for (i = 0; i < 5; i++) {
3072 		RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns[i]);
3073 	}
3074 	ctrlr.active_ns_count = 5;
3075 	ns[1].csi = SPDK_NVME_CSI_NVM;
3076 	ns[1].id = 2;
3077 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3078 	CU_ASSERT(rc == 0);
3079 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES);
3080 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3081 
3082 	/* case 3: ns.csi is SPDK_NVME_CSI_ZNS, do not loop, expect: pass */
3083 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3084 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3085 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3086 	prev_nsid = 0;
3087 	ctrlr.active_ns_count = 5;
3088 
3089 	for (int i = 0; i < 5; i++) {
3090 		ns[i].csi = SPDK_NVME_CSI_NVM;
3091 		ns[i].id = i + 1;
3092 		ns[i].ctrlr = &ns_ctrlr[i];
3093 	}
3094 	ns[4].csi = SPDK_NVME_CSI_ZNS;
3095 	ns_ctrlr[4].opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3096 
3097 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3098 	CU_ASSERT(rc == 0);
3099 	CU_ASSERT(ctrlr.state == 0);
3100 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3101 	CU_ASSERT(ns_ctrlr[4].state == NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC);
3102 	CU_ASSERT(ns_ctrlr[4].state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3103 
3104 	for (int i = 0; i < 5; i++) {
3105 		nvme_ns_free_zns_specific_data(&ns[i]);
3106 	}
3107 
3108 	/* case 4: nvme_ctrlr_identify_ns_iocs_specific_async return 1, expect: false */
3109 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3110 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3111 	prev_nsid = 1;
3112 	ctrlr.active_ns_count = 5;
3113 	ns[1].csi = SPDK_NVME_CSI_ZNS;
3114 	g_fail_next_identify = true;
3115 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3116 	CU_ASSERT(rc == 1);
3117 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
3118 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3119 
3120 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
3121 }
3122 
3123 static void
3124 test_nvme_ctrlr_set_supported_log_pages(void)
3125 {
3126 	int rc;
3127 	struct spdk_nvme_ctrlr ctrlr = {};
3128 
3129 	/* ana supported */
3130 	memset(&ctrlr, 0, sizeof(ctrlr));
3131 	ctrlr.cdata.cmic.ana_reporting = true;
3132 	ctrlr.cdata.lpa.celp = 1;
3133 	ctrlr.cdata.nanagrpid = 1;
3134 	ctrlr.active_ns_count = 1;
3135 
3136 	rc = nvme_ctrlr_set_supported_log_pages(&ctrlr);
3137 	CU_ASSERT(rc == 0);
3138 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ERROR] == true);
3139 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] == true);
3140 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] == true);
3141 	CU_ASSERT(ctrlr.ana_log_page_size == sizeof(struct spdk_nvme_ana_page) +
3142 		  sizeof(struct spdk_nvme_ana_group_descriptor) * 1 + sizeof(uint32_t) * 1);
3143 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS] == true);
3144 	free(ctrlr.ana_log_page);
3145 	free(ctrlr.copied_ana_desc);
3146 }
3147 
3148 static void
3149 test_nvme_ctrlr_set_intel_supported_log_pages(void)
3150 {
3151 	DECLARE_AND_CONSTRUCT_CTRLR();
3152 
3153 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3154 
3155 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3156 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
3157 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
3158 	ctrlr.state = NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES;
3159 
3160 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
3161 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES);
3162 
3163 	set_status_code = SPDK_NVME_SC_SUCCESS;
3164 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
3165 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES);
3166 
3167 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ERROR] == true);
3168 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] == true);
3169 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] == true);
3170 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] == true);
3171 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] == true);
3172 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] == true);
3173 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_SMART] == true);
3174 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] == true);
3175 
3176 	nvme_ctrlr_destruct(&ctrlr);
3177 }
3178 
3179 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
3180 				 sizeof(uint32_t))
3181 static void
3182 test_nvme_ctrlr_parse_ana_log_page(void)
3183 {
3184 	int rc, i;
3185 	struct spdk_nvme_ctrlr ctrlr = {};
3186 	struct spdk_nvme_ns ns[3] = {};
3187 	struct spdk_nvme_ana_page ana_hdr;
3188 	char _ana_desc[UT_ANA_DESC_SIZE];
3189 	struct spdk_nvme_ana_group_descriptor *ana_desc;
3190 	uint32_t offset;
3191 
3192 	RB_INIT(&ctrlr.ns);
3193 	for (i = 0; i < 3; i++) {
3194 		ns[i].id = i + 1;
3195 		ns[i].active = true;
3196 		RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns[i]);
3197 	}
3198 
3199 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
3200 
3201 	ctrlr.cdata.nn = 3;
3202 	ctrlr.cdata.nanagrpid = 3;
3203 	ctrlr.active_ns_count = 3;
3204 
3205 	rc = nvme_ctrlr_update_ana_log_page(&ctrlr);
3206 	CU_ASSERT(rc == 0);
3207 	CU_ASSERT(ctrlr.ana_log_page != NULL);
3208 	CU_ASSERT(ctrlr.copied_ana_desc != NULL);
3209 
3210 	/*
3211 	 * Create ANA log page data - There are three ANA groups.
3212 	 * Each ANA group has a namespace and has a different ANA state.
3213 	 */
3214 	memset(&ana_hdr, 0, sizeof(ana_hdr));
3215 	ana_hdr.num_ana_group_desc = 3;
3216 
3217 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= ctrlr.ana_log_page_size);
3218 	memcpy((char *)ctrlr.ana_log_page, (char *)&ana_hdr, sizeof(ana_hdr));
3219 	offset = sizeof(ana_hdr);
3220 
3221 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
3222 	memset(ana_desc, 0, UT_ANA_DESC_SIZE);
3223 	ana_desc->num_of_nsid = 1;
3224 
3225 	ana_desc->ana_group_id = 1;
3226 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3227 	ana_desc->nsid[0] = 3;
3228 
3229 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3230 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3231 	offset += UT_ANA_DESC_SIZE;
3232 
3233 	ana_desc->ana_group_id = 2;
3234 	ana_desc->ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3235 	ana_desc->nsid[0] = 2;
3236 
3237 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3238 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3239 	offset += UT_ANA_DESC_SIZE;
3240 
3241 	ana_desc->ana_group_id = 3;
3242 	ana_desc->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3243 	ana_desc->nsid[0] = 1;
3244 
3245 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3246 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3247 
3248 	/* Parse the created ANA log page data, and update ANA states. */
3249 	rc = nvme_ctrlr_parse_ana_log_page(&ctrlr, nvme_ctrlr_update_ns_ana_states,
3250 					   &ctrlr);
3251 	CU_ASSERT(rc == 0);
3252 	CU_ASSERT(ns[0].ana_group_id == 3);
3253 	CU_ASSERT(ns[0].ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3254 	CU_ASSERT(ns[1].ana_group_id == 2);
3255 	CU_ASSERT(ns[1].ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3256 	CU_ASSERT(ns[2].ana_group_id == 1);
3257 	CU_ASSERT(ns[2].ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3258 
3259 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
3260 
3261 	free(ctrlr.ana_log_page);
3262 	free(ctrlr.copied_ana_desc);
3263 }
3264 
3265 static void
3266 test_nvme_ctrlr_ana_resize(void)
3267 {
3268 	DECLARE_AND_CONSTRUCT_CTRLR();
3269 	uint32_t active_ns_list[] = { 1, 2, 3, 4 };
3270 	struct spdk_nvme_ana_page ana_hdr = {
3271 		.change_count = 0,
3272 		.num_ana_group_desc = 1
3273 	};
3274 	uint8_t ana_desc_buf[sizeof(struct spdk_nvme_ana_group_descriptor) + 4 * sizeof(uint32_t)] = {};
3275 	struct spdk_nvme_ana_group_descriptor *ana_desc =
3276 		(struct spdk_nvme_ana_group_descriptor *)ana_desc_buf;
3277 	struct spdk_nvme_ns *ns;
3278 	union spdk_nvme_async_event_completion aer_event = {
3279 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
3280 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
3281 	};
3282 	struct spdk_nvme_cpl aer_cpl = {
3283 		.status.sct = SPDK_NVME_SCT_GENERIC,
3284 		.status.sc = SPDK_NVME_SC_SUCCESS,
3285 		.cdw0 = aer_event.raw
3286 	};
3287 	uint32_t i;
3288 
3289 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3290 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
3291 
3292 	ctrlr.vs.bits.mjr = 1;
3293 	ctrlr.vs.bits.mnr = 4;
3294 	ctrlr.vs.bits.ter = 0;
3295 	ctrlr.cdata.nn = 4096;
3296 	ctrlr.cdata.cmic.ana_reporting = true;
3297 	ctrlr.cdata.nanagrpid = 1;
3298 
3299 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
3300 	/* Start with 2 active namespaces */
3301 	g_active_ns_list = active_ns_list;
3302 	g_active_ns_list_length = 2;
3303 	g_ana_hdr = &ana_hdr;
3304 	g_ana_descs = &ana_desc;
3305 	ana_desc->ana_group_id = 1;
3306 	ana_desc->ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3307 	ana_desc->num_of_nsid = 2;
3308 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3309 		ana_desc->nsid[i] = i + 1;
3310 	}
3311 
3312 	/* Bring controller to ready state */
3313 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
3314 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3315 	}
3316 
3317 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3318 		ns = spdk_nvme_ctrlr_get_ns(&ctrlr, i + 1);
3319 		CU_ASSERT(ns->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3320 	}
3321 
3322 	/* Add more namespaces */
3323 	g_active_ns_list_length = 4;
3324 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3325 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3326 
3327 	/* Update ANA log with new namespaces */
3328 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3329 	ana_desc->num_of_nsid = 4;
3330 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3331 		ana_desc->nsid[i] = i + 1;
3332 	}
3333 	aer_event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
3334 	aer_cpl.cdw0 = aer_event.raw;
3335 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3336 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3337 
3338 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3339 		ns = spdk_nvme_ctrlr_get_ns(&ctrlr, i + 1);
3340 		CU_ASSERT(ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3341 	}
3342 
3343 	g_active_ns_list = NULL;
3344 	g_active_ns_list_length = 0;
3345 	g_ana_hdr = NULL;
3346 	g_ana_descs = NULL;
3347 	nvme_ctrlr_free_processes(&ctrlr);
3348 	nvme_ctrlr_destruct(&ctrlr);
3349 }
3350 
3351 static void
3352 test_nvme_ctrlr_get_memory_domains(void)
3353 {
3354 	struct spdk_nvme_ctrlr ctrlr = {};
3355 
3356 	MOCK_SET(nvme_transport_ctrlr_get_memory_domains, 1);
3357 	CU_ASSERT(spdk_nvme_ctrlr_get_memory_domains(&ctrlr, NULL, 0) == 1);
3358 
3359 	MOCK_SET(nvme_transport_ctrlr_get_memory_domains, 0);
3360 	CU_ASSERT(spdk_nvme_ctrlr_get_memory_domains(&ctrlr, NULL, 0) == 0);
3361 
3362 	MOCK_CLEAR(nvme_transport_ctrlr_get_memory_domains);
3363 }
3364 
3365 static void
3366 test_nvme_transport_ctrlr_ready(void)
3367 {
3368 	DECLARE_AND_CONSTRUCT_CTRLR();
3369 
3370 	/* Transport init succeeded */
3371 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3372 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3373 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
3374 
3375 	/* Transport init failed */
3376 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3377 	MOCK_SET(nvme_transport_ctrlr_ready, -1);
3378 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == -1);
3379 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
3380 	MOCK_CLEAR(nvme_transport_ctrlr_ready);
3381 }
3382 
3383 static void
3384 test_nvme_ctrlr_disable(void)
3385 {
3386 	DECLARE_AND_CONSTRUCT_CTRLR();
3387 	int rc;
3388 
3389 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3390 
3391 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3392 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3393 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
3394 
3395 	/* Start a Controller Level Reset. */
3396 	ctrlr.is_disconnecting = true;
3397 	nvme_ctrlr_disable(&ctrlr);
3398 
3399 	g_ut_nvme_regs.cc.bits.en = 0;
3400 
3401 	rc = nvme_ctrlr_disable_poll(&ctrlr);
3402 	CU_ASSERT(rc == -EAGAIN);
3403 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
3404 
3405 	g_ut_nvme_regs.csts.bits.rdy = 0;
3406 
3407 	rc = nvme_ctrlr_disable_poll(&ctrlr);
3408 	CU_ASSERT(rc == 0);
3409 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
3410 
3411 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
3412 	nvme_ctrlr_destruct(&ctrlr);
3413 }
3414 
3415 int
3416 main(int argc, char **argv)
3417 {
3418 	CU_pSuite	suite = NULL;
3419 	unsigned int	num_failures;
3420 
3421 	CU_initialize_registry();
3422 
3423 	suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
3424 
3425 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_0);
3426 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_1);
3427 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0);
3428 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_1);
3429 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_rr);
3430 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr);
3431 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_vs);
3432 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_delay);
3433 	CU_ADD_TEST(suite, test_alloc_io_qpair_rr_1);
3434 	CU_ADD_TEST(suite, test_ctrlr_get_default_ctrlr_opts);
3435 	CU_ADD_TEST(suite, test_ctrlr_get_default_io_qpair_opts);
3436 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_1);
3437 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_2);
3438 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_update_firmware);
3439 	CU_ADD_TEST(suite, test_nvme_ctrlr_fail);
3440 	CU_ADD_TEST(suite, test_nvme_ctrlr_construct_intel_support_log_page_list);
3441 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_features);
3442 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_host_feature);
3443 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_doorbell_buffer_config);
3444 #if 0 /* TODO: move to PCIe-specific unit test */
3445 	CU_ADD_TEST(suite, test_nvme_ctrlr_alloc_cmb);
3446 #endif
3447 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns);
3448 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns_error_case);
3449 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_reconnect_io_qpair);
3450 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_set_trid);
3451 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_nvmf_ioccsz);
3452 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_num_queues);
3453 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_keep_alive_timeout);
3454 	CU_ADD_TEST(suite, test_alloc_io_qpair_fail);
3455 	CU_ADD_TEST(suite, test_nvme_ctrlr_add_remove_process);
3456 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_arbitration_feature);
3457 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_state);
3458 	CU_ADD_TEST(suite, test_nvme_ctrlr_active_ns_list_v0);
3459 	CU_ADD_TEST(suite, test_nvme_ctrlr_active_ns_list_v2);
3460 	CU_ADD_TEST(suite, test_nvme_ctrlr_ns_mgmt);
3461 	CU_ADD_TEST(suite, test_nvme_ctrlr_reset);
3462 	CU_ADD_TEST(suite, test_nvme_ctrlr_aer_callback);
3463 	CU_ADD_TEST(suite, test_nvme_ctrlr_ns_attr_changed);
3464 	CU_ADD_TEST(suite, test_nvme_ctrlr_identify_namespaces_iocs_specific_next);
3465 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_log_pages);
3466 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_intel_supported_log_pages);
3467 	CU_ADD_TEST(suite, test_nvme_ctrlr_parse_ana_log_page);
3468 	CU_ADD_TEST(suite, test_nvme_ctrlr_ana_resize);
3469 	CU_ADD_TEST(suite, test_nvme_ctrlr_get_memory_domains);
3470 	CU_ADD_TEST(suite, test_nvme_transport_ctrlr_ready);
3471 	CU_ADD_TEST(suite, test_nvme_ctrlr_disable);
3472 
3473 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3474 	CU_cleanup_registry();
3475 	return num_failures;
3476 }
3477