xref: /spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c (revision 1e3d25b901a6b9d2dce4999e2ecbc02f98d79f05)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "spdk/log.h"
12 
13 #include "common/lib/test_env.c"
14 
15 #include "nvme/nvme_ctrlr.c"
16 #include "nvme/nvme_quirks.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvme)
19 
20 pid_t g_spdk_nvme_pid;
21 
22 struct nvme_driver _g_nvme_driver = {
23 	.lock = PTHREAD_MUTEX_INITIALIZER,
24 };
25 
26 struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
27 
28 struct spdk_nvme_registers g_ut_nvme_regs = {};
29 typedef void (*set_reg_cb)(void);
30 set_reg_cb g_set_reg_cb;
31 
32 __thread int    nvme_thread_ioq_index = -1;
33 
34 uint32_t set_size = 1;
35 
36 int set_status_cpl = -1;
37 
38 DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
39 	    (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
40 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
41 DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
42 DEFINE_STUB_V(nvme_ns_set_id_desc_list_data, (struct spdk_nvme_ns *ns));
43 DEFINE_STUB_V(nvme_ns_free_iocs_specific_data, (struct spdk_nvme_ns *ns));
44 DEFINE_STUB_V(nvme_qpair_abort_all_queued_reqs, (struct spdk_nvme_qpair *qpair));
45 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
46 		struct spdk_nvme_qpair *qpair), 0);
47 DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));
48 DEFINE_STUB(nvme_io_msg_process, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
49 DEFINE_STUB(nvme_transport_ctrlr_reserve_cmb, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
50 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_receive, int, (struct spdk_nvme_ctrlr *ctrlr,
51 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
52 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
53 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_send, int, (struct spdk_nvme_ctrlr *ctrlr,
54 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
55 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
56 DEFINE_STUB_V(nvme_qpair_abort_queued_reqs, (struct spdk_nvme_qpair *qpair));
57 
58 DEFINE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domains, int);
59 int
60 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
61 					struct spdk_memory_domain **domains, int array_size)
62 {
63 	HANDLE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domains);
64 
65 	return 0;
66 }
67 
68 DEFINE_RETURN_MOCK(nvme_transport_ctrlr_ready, int);
69 int
70 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
71 {
72 	HANDLE_RETURN_MOCK(nvme_transport_ctrlr_ready);
73 	return 0;
74 }
75 
76 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
77 		const struct spdk_nvme_ctrlr_opts *opts,
78 		void *devhandle)
79 {
80 	return NULL;
81 }
82 
83 int
84 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
85 {
86 	nvme_ctrlr_destruct_finish(ctrlr);
87 
88 	return 0;
89 }
90 
91 int
92 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
93 {
94 	return 0;
95 }
96 
97 int
98 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
99 {
100 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
101 	*(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
102 	if (g_set_reg_cb) {
103 		g_set_reg_cb();
104 	}
105 	return 0;
106 }
107 
108 int
109 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
110 {
111 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
112 	*(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
113 	if (g_set_reg_cb) {
114 		g_set_reg_cb();
115 	}
116 	return 0;
117 }
118 
119 int
120 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
121 {
122 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
123 	*value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
124 	return 0;
125 }
126 
127 int
128 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
129 {
130 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
131 	*value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
132 	return 0;
133 }
134 
135 int
136 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
137 				     uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg)
138 {
139 	struct spdk_nvme_cpl cpl = {};
140 
141 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
142 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
143 
144 	nvme_transport_ctrlr_set_reg_4(ctrlr, offset, value);
145 	cb_fn(cb_arg, value, &cpl);
146 	return 0;
147 }
148 
149 int
150 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
151 				     uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg)
152 {
153 	struct spdk_nvme_cpl cpl = {};
154 
155 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
156 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
157 
158 	nvme_transport_ctrlr_set_reg_8(ctrlr, offset, value);
159 	cb_fn(cb_arg, value, &cpl);
160 	return 0;
161 }
162 
163 int
164 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
165 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
166 {
167 	struct spdk_nvme_cpl cpl = {};
168 	uint32_t value;
169 
170 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
171 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
172 
173 	nvme_transport_ctrlr_get_reg_4(ctrlr, offset, &value);
174 	cb_fn(cb_arg, value, &cpl);
175 	return 0;
176 }
177 
178 int
179 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
180 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
181 {
182 	struct spdk_nvme_cpl cpl = {};
183 	uint64_t value;
184 
185 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
186 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
187 
188 	nvme_transport_ctrlr_get_reg_8(ctrlr, offset, &value);
189 	cb_fn(cb_arg, value, &cpl);
190 	return 0;
191 }
192 
193 uint32_t
194 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
195 {
196 	return UINT32_MAX;
197 }
198 
199 uint16_t
200 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
201 {
202 	return 1;
203 }
204 
205 void *
206 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
207 {
208 	return NULL;
209 }
210 
211 int
212 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
213 {
214 	return 0;
215 }
216 
217 int
218 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
219 {
220 	return 0;
221 }
222 
223 int
224 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
225 {
226 	return 0;
227 }
228 
229 void *
230 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
231 {
232 	return NULL;
233 }
234 
235 int
236 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
237 {
238 	return 0;
239 }
240 
241 struct spdk_nvme_qpair *
242 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
243 				     const struct spdk_nvme_io_qpair_opts *opts)
244 {
245 	struct spdk_nvme_qpair *qpair;
246 
247 	qpair = calloc(1, sizeof(*qpair));
248 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
249 
250 	qpair->ctrlr = ctrlr;
251 	qpair->id = qid;
252 	qpair->qprio = opts->qprio;
253 
254 	return qpair;
255 }
256 
257 void
258 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
259 {
260 	free(qpair);
261 }
262 
263 void
264 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
265 {
266 }
267 
268 int
269 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
270 {
271 	return 0;
272 }
273 
274 void
275 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
276 {
277 }
278 
279 void
280 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
281 {
282 }
283 
284 int
285 nvme_driver_init(void)
286 {
287 	return 0;
288 }
289 
290 int
291 nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
292 		struct spdk_nvme_ctrlr *ctrlr,
293 		enum spdk_nvme_qprio qprio,
294 		uint32_t num_requests, bool async)
295 {
296 	qpair->id = id;
297 	qpair->qprio = qprio;
298 	qpair->ctrlr = ctrlr;
299 	qpair->async = async;
300 
301 	return 0;
302 }
303 
304 static struct spdk_nvme_cpl fake_cpl = {};
305 static enum spdk_nvme_generic_command_status_code set_status_code = SPDK_NVME_SC_SUCCESS;
306 
307 static void
308 fake_cpl_sc(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
309 {
310 	fake_cpl.status.sc = set_status_code;
311 	cb_fn(cb_arg, &fake_cpl);
312 }
313 
314 static uint32_t g_ut_cdw11;
315 
316 int
317 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
318 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
319 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
320 {
321 	g_ut_cdw11 = cdw11;
322 	return 0;
323 }
324 
325 int
326 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
327 				uint32_t cdw11, void *payload, uint32_t payload_size,
328 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
329 {
330 	fake_cpl_sc(cb_fn, cb_arg);
331 	return 0;
332 }
333 
334 struct spdk_nvme_ana_page *g_ana_hdr;
335 struct spdk_nvme_ana_group_descriptor **g_ana_descs;
336 
337 int
338 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
339 				 uint32_t nsid, void *payload, uint32_t payload_size,
340 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
341 {
342 	if ((log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) && g_ana_hdr) {
343 		uint32_t i;
344 		uint8_t *ptr = payload;
345 
346 		memset(payload, 0, payload_size);
347 		memcpy(ptr, g_ana_hdr, sizeof(*g_ana_hdr));
348 		ptr += sizeof(*g_ana_hdr);
349 		for (i = 0; i < g_ana_hdr->num_ana_group_desc; ++i) {
350 			uint32_t desc_size = sizeof(**g_ana_descs) +
351 					     g_ana_descs[i]->num_of_nsid * sizeof(uint32_t);
352 			memcpy(ptr, g_ana_descs[i], desc_size);
353 			ptr += desc_size;
354 		}
355 	} else if (log_page == SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY) {
356 		struct spdk_nvme_intel_log_page_directory *log_page_directory = payload;
357 		log_page_directory->read_latency_log_len = true;
358 		log_page_directory->write_latency_log_len = true;
359 		log_page_directory->temperature_statistics_log_len = true;
360 		log_page_directory->smart_log_len = true;
361 		log_page_directory->marketing_description_log_len =  true;
362 	}
363 
364 	fake_cpl_sc(cb_fn, cb_arg);
365 	return 0;
366 }
367 
368 int
369 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
370 				     uint32_t nsid, void *payload, uint32_t payload_size,
371 				     uint64_t offset, uint32_t cdw10, uint32_t cdw11,
372 				     uint32_t cdw14, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
373 {
374 	fake_cpl_sc(cb_fn, cb_arg);
375 	return 0;
376 }
377 
378 int
379 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
380 {
381 	CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
382 	STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
383 
384 	/*
385 	 * For the purposes of this unit test, we don't need to bother emulating request submission.
386 	 */
387 
388 	return 0;
389 }
390 
391 static int32_t g_wait_for_completion_return_val;
392 
393 int32_t
394 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
395 {
396 	return g_wait_for_completion_return_val;
397 }
398 
399 void
400 nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
401 {
402 }
403 
404 
405 void
406 nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
407 {
408 	struct nvme_completion_poll_status	*status = arg;
409 	/* This should not happen it test env since this callback is always called
410 	 * before wait_for_completion_* while this field can only be set to true in
411 	 * wait_for_completion_* functions */
412 	CU_ASSERT(status->timed_out == false);
413 
414 	status->cpl = *cpl;
415 	status->done = true;
416 }
417 
418 static struct nvme_completion_poll_status *g_failed_status;
419 
420 int
421 nvme_wait_for_completion_robust_lock_timeout(
422 	struct spdk_nvme_qpair *qpair,
423 	struct nvme_completion_poll_status *status,
424 	pthread_mutex_t *robust_mutex,
425 	uint64_t timeout_in_usecs)
426 {
427 	if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
428 		g_failed_status = status;
429 		status->timed_out = true;
430 		return -1;
431 	}
432 
433 	status->done = true;
434 	if (set_status_cpl == 1) {
435 		status->cpl.status.sc = 1;
436 	}
437 	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
438 }
439 
440 int
441 nvme_wait_for_completion_robust_lock(
442 	struct spdk_nvme_qpair *qpair,
443 	struct nvme_completion_poll_status *status,
444 	pthread_mutex_t *robust_mutex)
445 {
446 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, robust_mutex, 0);
447 }
448 
449 int
450 nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
451 			 struct nvme_completion_poll_status *status)
452 {
453 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, 0);
454 }
455 
456 int
457 nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
458 				 struct nvme_completion_poll_status *status,
459 				 uint64_t timeout_in_usecs)
460 {
461 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, timeout_in_usecs);
462 }
463 
464 int
465 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
466 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
467 				      void *cb_arg)
468 {
469 	fake_cpl_sc(cb_fn, cb_arg);
470 	return 0;
471 }
472 
473 static uint32_t *g_active_ns_list = NULL;
474 static uint32_t g_active_ns_list_length = 0;
475 static struct spdk_nvme_ctrlr_data *g_cdata = NULL;
476 static bool g_fail_next_identify = false;
477 
478 int
479 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
480 			uint8_t csi, void *payload, size_t payload_size,
481 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
482 {
483 	if (g_fail_next_identify) {
484 		g_fail_next_identify = false;
485 		return 1;
486 	}
487 
488 	memset(payload, 0, payload_size);
489 	if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
490 		uint32_t count = 0;
491 		uint32_t i = 0;
492 		struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
493 
494 		if (g_active_ns_list == NULL) {
495 			for (i = 1; i <= ctrlr->cdata.nn; i++) {
496 				if (i <= nsid) {
497 					continue;
498 				}
499 
500 				ns_list->ns_list[count++] = i;
501 				if (count == SPDK_COUNTOF(ns_list->ns_list)) {
502 					break;
503 				}
504 			}
505 		} else {
506 			for (i = 0; i < g_active_ns_list_length; i++) {
507 				uint32_t cur_nsid = g_active_ns_list[i];
508 				if (cur_nsid <= nsid) {
509 					continue;
510 				}
511 
512 				ns_list->ns_list[count++] = cur_nsid;
513 				if (count == SPDK_COUNTOF(ns_list->ns_list)) {
514 					break;
515 				}
516 			}
517 		}
518 	} else if (cns == SPDK_NVME_IDENTIFY_CTRLR) {
519 		if (g_cdata) {
520 			memcpy(payload, g_cdata, sizeof(*g_cdata));
521 		}
522 	} else if (cns == SPDK_NVME_IDENTIFY_NS_IOCS) {
523 		return 0;
524 	}
525 
526 	fake_cpl_sc(cb_fn, cb_arg);
527 	return 0;
528 }
529 
530 int
531 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
532 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
533 {
534 	fake_cpl_sc(cb_fn, cb_arg);
535 	return 0;
536 }
537 
538 int
539 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
540 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
541 {
542 	CU_ASSERT(0);
543 	return -1;
544 }
545 
546 int
547 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
548 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
549 {
550 	return 0;
551 }
552 
553 int
554 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
555 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
556 {
557 	return 0;
558 }
559 
560 int
561 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
562 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
563 {
564 	fake_cpl_sc(cb_fn, cb_arg);
565 	return 0;
566 }
567 
568 int
569 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
570 			 void *cb_arg)
571 {
572 	return 0;
573 }
574 
575 int
576 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
577 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
578 {
579 	return 0;
580 }
581 
582 int
583 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
584 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
585 				   void *payload, uint32_t payload_size, uint32_t cdw12,
586 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
587 {
588 	return 0;
589 }
590 
591 int
592 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
593 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
594 				      void *payload, uint32_t payload_size, uint32_t cdw12,
595 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
596 {
597 	return 0;
598 }
599 
600 int
601 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
602 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
603 {
604 	CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
605 	if (fw_commit->fs == 0) {
606 		return -1;
607 	}
608 	set_status_cpl = 1;
609 	if (ctrlr->is_resetting == true) {
610 		set_status_cpl = 0;
611 	}
612 	return 0;
613 }
614 
615 int
616 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
617 				 uint32_t size, uint32_t offset, void *payload,
618 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
619 {
620 	if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
621 		return -1;
622 	}
623 	CU_ASSERT(offset == 0);
624 	return 0;
625 }
626 
627 bool
628 nvme_ns_has_supported_iocs_specific_data(struct spdk_nvme_ns *ns)
629 {
630 	switch (ns->csi) {
631 	case SPDK_NVME_CSI_NVM:
632 		/*
633 		 * NVM Command Set Specific Identify Namespace data structure
634 		 * is currently all-zeroes, reserved for future use.
635 		 */
636 		return false;
637 	case SPDK_NVME_CSI_ZNS:
638 		return true;
639 	default:
640 		SPDK_WARNLOG("Unsupported CSI: %u for NSID: %u\n", ns->csi, ns->id);
641 		return false;
642 	}
643 }
644 
645 void
646 nvme_ns_free_zns_specific_data(struct spdk_nvme_ns *ns)
647 {
648 	if (!ns->id) {
649 		return;
650 	}
651 
652 	if (ns->nsdata_zns) {
653 		spdk_free(ns->nsdata_zns);
654 		ns->nsdata_zns = NULL;
655 	}
656 }
657 
658 void
659 nvme_ns_destruct(struct spdk_nvme_ns *ns)
660 {
661 }
662 
663 int
664 nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
665 		  struct spdk_nvme_ctrlr *ctrlr)
666 {
667 	return 0;
668 }
669 
670 void
671 spdk_pci_device_detach(struct spdk_pci_device *device)
672 {
673 }
674 
675 #define DECLARE_AND_CONSTRUCT_CTRLR()	\
676 	struct spdk_nvme_ctrlr	ctrlr = {};	\
677 	struct spdk_nvme_qpair	adminq = {};	\
678 	struct nvme_request	req;		\
679 						\
680 	STAILQ_INIT(&adminq.free_req);		\
681 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);	\
682 	ctrlr.adminq = &adminq;					\
683 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
684 
685 static void
686 test_nvme_ctrlr_init_en_1_rdy_0(void)
687 {
688 	DECLARE_AND_CONSTRUCT_CTRLR();
689 
690 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
691 
692 	/*
693 	 * Initial state: CC.EN = 1, CSTS.RDY = 0
694 	 */
695 	g_ut_nvme_regs.cc.bits.en = 1;
696 	g_ut_nvme_regs.csts.bits.rdy = 0;
697 
698 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
699 	ctrlr.cdata.nn = 1;
700 	ctrlr.page_size = 0x1000;
701 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
702 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
703 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
704 	}
705 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
706 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
707 
708 	/*
709 	 * Transition to CSTS.RDY = 1.
710 	 * init() should set CC.EN = 0.
711 	 */
712 	g_ut_nvme_regs.csts.bits.rdy = 1;
713 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
714 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_EN_0);
715 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
716 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
717 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
718 
719 	/*
720 	 * Transition to CSTS.RDY = 0.
721 	 */
722 	g_ut_nvme_regs.csts.bits.rdy = 0;
723 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
724 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
725 
726 	/*
727 	 * Start enabling the controller.
728 	 */
729 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
730 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
731 
732 	/*
733 	 * Transition to CC.EN = 1
734 	 */
735 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
736 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
737 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
738 
739 	/*
740 	 * Transition to CSTS.RDY = 1.
741 	 */
742 	g_ut_nvme_regs.csts.bits.rdy = 1;
743 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
744 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
745 
746 	/*
747 	 * Transition to READY.
748 	 */
749 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
750 		nvme_ctrlr_process_init(&ctrlr);
751 	}
752 
753 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
754 	nvme_ctrlr_destruct(&ctrlr);
755 }
756 
757 static void
758 test_nvme_ctrlr_init_en_1_rdy_1(void)
759 {
760 	DECLARE_AND_CONSTRUCT_CTRLR();
761 
762 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
763 
764 	/*
765 	 * Initial state: CC.EN = 1, CSTS.RDY = 1
766 	 * init() should set CC.EN = 0.
767 	 */
768 	g_ut_nvme_regs.cc.bits.en = 1;
769 	g_ut_nvme_regs.csts.bits.rdy = 1;
770 
771 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
772 	ctrlr.cdata.nn = 1;
773 	ctrlr.page_size = 0x1000;
774 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
775 	while (ctrlr.state != NVME_CTRLR_STATE_SET_EN_0) {
776 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
777 	}
778 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
779 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
780 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
781 
782 	/*
783 	 * Transition to CSTS.RDY = 0.
784 	 */
785 	g_ut_nvme_regs.csts.bits.rdy = 0;
786 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
787 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
788 
789 	/*
790 	 * Start enabling the controller.
791 	 */
792 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
793 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
794 
795 	/*
796 	 * Transition to CC.EN = 1
797 	 */
798 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
799 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
800 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
801 
802 	/*
803 	 * Transition to CSTS.RDY = 1.
804 	 */
805 	g_ut_nvme_regs.csts.bits.rdy = 1;
806 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
807 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
808 
809 	/*
810 	 * Transition to READY.
811 	 */
812 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
813 		nvme_ctrlr_process_init(&ctrlr);
814 	}
815 
816 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
817 	nvme_ctrlr_destruct(&ctrlr);
818 }
819 
820 static void
821 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
822 {
823 	DECLARE_AND_CONSTRUCT_CTRLR();
824 
825 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
826 
827 	/*
828 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
829 	 * init() should set CC.EN = 1.
830 	 */
831 	g_ut_nvme_regs.cc.bits.en = 0;
832 	g_ut_nvme_regs.csts.bits.rdy = 0;
833 
834 	/*
835 	 * Default round robin enabled
836 	 */
837 	g_ut_nvme_regs.cap.bits.ams = 0x0;
838 	ctrlr.cap = g_ut_nvme_regs.cap;
839 
840 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
841 	ctrlr.cdata.nn = 1;
842 	ctrlr.page_size = 0x1000;
843 	/*
844 	 * Case 1: default round robin arbitration mechanism selected
845 	 */
846 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
847 
848 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
849 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
850 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
851 	}
852 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
853 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
854 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
855 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
856 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
857 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
858 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
859 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
860 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
861 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
862 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
863 
864 	/*
865 	 * Complete and destroy the controller
866 	 */
867 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
868 	nvme_ctrlr_destruct(&ctrlr);
869 
870 	/*
871 	 * Reset to initial state
872 	 */
873 	g_ut_nvme_regs.cc.bits.en = 0;
874 	g_ut_nvme_regs.csts.bits.rdy = 0;
875 
876 	/*
877 	 * Case 2: weighted round robin arbitration mechanism selected
878 	 */
879 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
880 	ctrlr.cdata.nn = 1;
881 	ctrlr.page_size = 0x1000;
882 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
883 
884 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
885 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
886 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
887 	}
888 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
889 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
890 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
891 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
892 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
893 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
894 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
895 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
896 
897 	/*
898 	 * Complete and destroy the controller
899 	 */
900 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
901 	nvme_ctrlr_destruct(&ctrlr);
902 
903 	/*
904 	 * Reset to initial state
905 	 */
906 	g_ut_nvme_regs.cc.bits.en = 0;
907 	g_ut_nvme_regs.csts.bits.rdy = 0;
908 
909 	/*
910 	 * Case 3: vendor specific arbitration mechanism selected
911 	 */
912 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
913 	ctrlr.cdata.nn = 1;
914 	ctrlr.page_size = 0x1000;
915 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
916 
917 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
918 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
919 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
920 	}
921 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
922 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
923 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
924 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
925 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
926 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
927 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
928 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
929 
930 	/*
931 	 * Complete and destroy the controller
932 	 */
933 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
934 	nvme_ctrlr_destruct(&ctrlr);
935 
936 	/*
937 	 * Reset to initial state
938 	 */
939 	g_ut_nvme_regs.cc.bits.en = 0;
940 	g_ut_nvme_regs.csts.bits.rdy = 0;
941 
942 	/*
943 	 * Case 4: invalid arbitration mechanism selected
944 	 */
945 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
946 	ctrlr.cdata.nn = 1;
947 	ctrlr.page_size = 0x1000;
948 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
949 
950 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
951 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
952 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
953 	}
954 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
955 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
956 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
957 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
958 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
959 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
960 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
961 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
962 
963 	/*
964 	 * Complete and destroy the controller
965 	 */
966 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
967 	nvme_ctrlr_destruct(&ctrlr);
968 
969 	/*
970 	 * Reset to initial state
971 	 */
972 	g_ut_nvme_regs.cc.bits.en = 0;
973 	g_ut_nvme_regs.csts.bits.rdy = 0;
974 
975 	/*
976 	 * Case 5: reset to default round robin arbitration mechanism
977 	 */
978 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
979 	ctrlr.cdata.nn = 1;
980 	ctrlr.page_size = 0x1000;
981 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
982 
983 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
984 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
985 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
986 	}
987 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
988 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
989 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
990 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
991 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
992 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
993 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
994 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
995 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
996 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
997 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
998 
999 	/*
1000 	 * Transition to CSTS.RDY = 1.
1001 	 */
1002 	g_ut_nvme_regs.csts.bits.rdy = 1;
1003 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1004 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1005 
1006 	/*
1007 	 * Transition to READY.
1008 	 */
1009 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1010 		nvme_ctrlr_process_init(&ctrlr);
1011 	}
1012 
1013 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1014 	nvme_ctrlr_destruct(&ctrlr);
1015 }
1016 
1017 static void
1018 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
1019 {
1020 	DECLARE_AND_CONSTRUCT_CTRLR();
1021 
1022 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1023 
1024 	/*
1025 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1026 	 * init() should set CC.EN = 1.
1027 	 */
1028 	g_ut_nvme_regs.cc.bits.en = 0;
1029 	g_ut_nvme_regs.csts.bits.rdy = 0;
1030 
1031 	/*
1032 	 * Weighted round robin enabled
1033 	 */
1034 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
1035 	ctrlr.cap = g_ut_nvme_regs.cap;
1036 
1037 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1038 	ctrlr.cdata.nn = 1;
1039 	ctrlr.page_size = 0x1000;
1040 	/*
1041 	 * Case 1: default round robin arbitration mechanism selected
1042 	 */
1043 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1044 
1045 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1046 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1047 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1048 	}
1049 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1050 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1051 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1052 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1053 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1054 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1055 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1056 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1057 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1058 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1059 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1060 
1061 	/*
1062 	 * Complete and destroy the controller
1063 	 */
1064 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1065 	nvme_ctrlr_destruct(&ctrlr);
1066 
1067 	/*
1068 	 * Reset to initial state
1069 	 */
1070 	g_ut_nvme_regs.cc.bits.en = 0;
1071 	g_ut_nvme_regs.csts.bits.rdy = 0;
1072 
1073 	/*
1074 	 * Case 2: weighted round robin arbitration mechanism selected
1075 	 */
1076 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1077 	ctrlr.cdata.nn = 1;
1078 	ctrlr.page_size = 0x1000;
1079 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1080 
1081 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1082 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1083 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1084 	}
1085 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1086 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1087 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1088 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1089 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1090 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1091 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1092 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1093 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1094 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
1095 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
1096 
1097 	/*
1098 	 * Complete and destroy the controller
1099 	 */
1100 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1101 	nvme_ctrlr_destruct(&ctrlr);
1102 
1103 	/*
1104 	 * Reset to initial state
1105 	 */
1106 	g_ut_nvme_regs.cc.bits.en = 0;
1107 	g_ut_nvme_regs.csts.bits.rdy = 0;
1108 
1109 	/*
1110 	 * Case 3: vendor specific arbitration mechanism selected
1111 	 */
1112 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1113 	ctrlr.cdata.nn = 1;
1114 	ctrlr.page_size = 0x1000;
1115 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1116 
1117 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1118 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1119 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1120 	}
1121 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1122 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1123 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1124 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1125 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1126 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1127 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1128 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1129 
1130 	/*
1131 	 * Complete and destroy the controller
1132 	 */
1133 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1134 	nvme_ctrlr_destruct(&ctrlr);
1135 
1136 	/*
1137 	 * Reset to initial state
1138 	 */
1139 	g_ut_nvme_regs.cc.bits.en = 0;
1140 	g_ut_nvme_regs.csts.bits.rdy = 0;
1141 
1142 	/*
1143 	 * Case 4: invalid arbitration mechanism selected
1144 	 */
1145 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1146 	ctrlr.cdata.nn = 1;
1147 	ctrlr.page_size = 0x1000;
1148 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1149 
1150 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1151 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1152 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1153 	}
1154 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1155 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1156 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1157 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1158 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1159 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1160 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1161 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1162 
1163 	/*
1164 	 * Complete and destroy the controller
1165 	 */
1166 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1167 	nvme_ctrlr_destruct(&ctrlr);
1168 
1169 	/*
1170 	 * Reset to initial state
1171 	 */
1172 	g_ut_nvme_regs.cc.bits.en = 0;
1173 	g_ut_nvme_regs.csts.bits.rdy = 0;
1174 
1175 	/*
1176 	 * Case 5: reset to weighted round robin arbitration mechanism
1177 	 */
1178 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1179 	ctrlr.cdata.nn = 1;
1180 	ctrlr.page_size = 0x1000;
1181 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1182 
1183 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1184 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1185 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1186 	}
1187 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1188 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1189 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1190 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1191 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1192 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1193 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1194 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1195 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1196 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
1197 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
1198 
1199 	/*
1200 	 * Transition to CSTS.RDY = 1.
1201 	 */
1202 	g_ut_nvme_regs.csts.bits.rdy = 1;
1203 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1204 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1205 
1206 	/*
1207 	 * Transition to READY.
1208 	 */
1209 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1210 		nvme_ctrlr_process_init(&ctrlr);
1211 	}
1212 
1213 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1214 	nvme_ctrlr_destruct(&ctrlr);
1215 }
1216 static void
1217 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
1218 {
1219 	DECLARE_AND_CONSTRUCT_CTRLR();
1220 
1221 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1222 
1223 	/*
1224 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1225 	 * init() should set CC.EN = 1.
1226 	 */
1227 	g_ut_nvme_regs.cc.bits.en = 0;
1228 	g_ut_nvme_regs.csts.bits.rdy = 0;
1229 
1230 	/*
1231 	 * Default round robin enabled
1232 	 */
1233 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
1234 	ctrlr.cap = g_ut_nvme_regs.cap;
1235 
1236 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1237 	ctrlr.cdata.nn = 1;
1238 	ctrlr.page_size = 0x1000;
1239 	/*
1240 	 * Case 1: default round robin arbitration mechanism selected
1241 	 */
1242 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1243 
1244 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1245 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1246 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1247 	}
1248 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1249 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1250 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1251 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1252 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1253 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1254 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1255 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1256 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1257 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1258 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1259 
1260 	/*
1261 	 * Complete and destroy the controller
1262 	 */
1263 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1264 	nvme_ctrlr_destruct(&ctrlr);
1265 
1266 	/*
1267 	 * Reset to initial state
1268 	 */
1269 	g_ut_nvme_regs.cc.bits.en = 0;
1270 	g_ut_nvme_regs.csts.bits.rdy = 0;
1271 
1272 	/*
1273 	 * Case 2: weighted round robin arbitration mechanism selected
1274 	 */
1275 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1276 	ctrlr.cdata.nn = 1;
1277 	ctrlr.page_size = 0x1000;
1278 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1279 
1280 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1281 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1282 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1283 	}
1284 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1285 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1286 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1287 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1288 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1289 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1290 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1291 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1292 
1293 	/*
1294 	 * Complete and destroy the controller
1295 	 */
1296 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1297 	nvme_ctrlr_destruct(&ctrlr);
1298 
1299 	/*
1300 	 * Reset to initial state
1301 	 */
1302 	g_ut_nvme_regs.cc.bits.en = 0;
1303 	g_ut_nvme_regs.csts.bits.rdy = 0;
1304 
1305 	/*
1306 	 * Case 3: vendor specific arbitration mechanism selected
1307 	 */
1308 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1309 	ctrlr.cdata.nn = 1;
1310 	ctrlr.page_size = 0x1000;
1311 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1312 
1313 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1314 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1315 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1316 	}
1317 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1318 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1319 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1320 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1321 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1322 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1323 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1324 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1325 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1326 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1327 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1328 
1329 	/*
1330 	 * Complete and destroy the controller
1331 	 */
1332 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1333 	nvme_ctrlr_destruct(&ctrlr);
1334 
1335 	/*
1336 	 * Reset to initial state
1337 	 */
1338 	g_ut_nvme_regs.cc.bits.en = 0;
1339 	g_ut_nvme_regs.csts.bits.rdy = 0;
1340 
1341 	/*
1342 	 * Case 4: invalid arbitration mechanism selected
1343 	 */
1344 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1345 	ctrlr.cdata.nn = 1;
1346 	ctrlr.page_size = 0x1000;
1347 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1348 
1349 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1350 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1351 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1352 	}
1353 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1354 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1355 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1356 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1357 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1358 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1359 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1360 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1361 
1362 	/*
1363 	 * Complete and destroy the controller
1364 	 */
1365 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1366 	nvme_ctrlr_destruct(&ctrlr);
1367 
1368 	/*
1369 	 * Reset to initial state
1370 	 */
1371 	g_ut_nvme_regs.cc.bits.en = 0;
1372 	g_ut_nvme_regs.csts.bits.rdy = 0;
1373 
1374 	/*
1375 	 * Case 5: reset to vendor specific arbitration mechanism
1376 	 */
1377 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1378 	ctrlr.cdata.nn = 1;
1379 	ctrlr.page_size = 0x1000;
1380 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1381 
1382 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1383 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1384 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1385 	}
1386 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1387 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1388 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1389 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1390 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1391 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1392 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1393 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1394 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1395 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1396 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1397 
1398 	/*
1399 	 * Transition to CSTS.RDY = 1.
1400 	 */
1401 	g_ut_nvme_regs.csts.bits.rdy = 1;
1402 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1403 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1404 
1405 	/*
1406 	 * Transition to READY.
1407 	 */
1408 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1409 		nvme_ctrlr_process_init(&ctrlr);
1410 	}
1411 
1412 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1413 	nvme_ctrlr_destruct(&ctrlr);
1414 }
1415 
1416 static void
1417 test_nvme_ctrlr_init_en_0_rdy_0(void)
1418 {
1419 	DECLARE_AND_CONSTRUCT_CTRLR();
1420 
1421 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1422 
1423 	/*
1424 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1425 	 * init() should set CC.EN = 1.
1426 	 */
1427 	g_ut_nvme_regs.cc.bits.en = 0;
1428 	g_ut_nvme_regs.csts.bits.rdy = 0;
1429 
1430 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1431 	ctrlr.cdata.nn = 1;
1432 	ctrlr.page_size = 0x1000;
1433 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1434 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1435 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1436 	}
1437 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1438 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1439 
1440 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1441 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1442 
1443 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1444 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1445 
1446 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1447 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1448 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1449 
1450 	/*
1451 	 * Transition to CSTS.RDY = 1.
1452 	 */
1453 	g_ut_nvme_regs.csts.bits.rdy = 1;
1454 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1455 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1456 
1457 	/*
1458 	 * Transition to READY.
1459 	 */
1460 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1461 		nvme_ctrlr_process_init(&ctrlr);
1462 	}
1463 
1464 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1465 	nvme_ctrlr_destruct(&ctrlr);
1466 }
1467 
1468 static void
1469 test_nvme_ctrlr_init_en_0_rdy_1(void)
1470 {
1471 	DECLARE_AND_CONSTRUCT_CTRLR();
1472 
1473 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1474 
1475 	/*
1476 	 * Initial state: CC.EN = 0, CSTS.RDY = 1
1477 	 */
1478 	g_ut_nvme_regs.cc.bits.en = 0;
1479 	g_ut_nvme_regs.csts.bits.rdy = 1;
1480 
1481 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1482 	ctrlr.cdata.nn = 1;
1483 	ctrlr.page_size = 0x1000;
1484 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1485 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
1486 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1487 	}
1488 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1489 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1490 
1491 	/*
1492 	 * Transition to CSTS.RDY = 0.
1493 	 */
1494 	g_ut_nvme_regs.csts.bits.rdy = 0;
1495 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1496 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
1497 
1498 	/*
1499 	 * Start enabling the controller.
1500 	 */
1501 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1502 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1503 
1504 	/*
1505 	 * Transition to CC.EN = 1
1506 	 */
1507 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1508 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1509 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1510 
1511 	/*
1512 	 * Transition to CSTS.RDY = 1.
1513 	 */
1514 	g_ut_nvme_regs.csts.bits.rdy = 1;
1515 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1516 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1517 
1518 	/*
1519 	 * Transition to READY.
1520 	 */
1521 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1522 		nvme_ctrlr_process_init(&ctrlr);
1523 	}
1524 
1525 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1526 	nvme_ctrlr_destruct(&ctrlr);
1527 }
1528 
1529 static void
1530 setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
1531 {
1532 	uint32_t i;
1533 
1534 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
1535 
1536 	ctrlr->page_size = 0x1000;
1537 	ctrlr->opts.num_io_queues = num_io_queues;
1538 	ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
1539 	ctrlr->state = NVME_CTRLR_STATE_READY;
1540 	SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
1541 
1542 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1543 	for (i = 1; i <= num_io_queues; i++) {
1544 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1545 	}
1546 }
1547 
1548 static void
1549 cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
1550 {
1551 	nvme_ctrlr_destruct(ctrlr);
1552 }
1553 
1554 static void
1555 test_alloc_io_qpair_rr_1(void)
1556 {
1557 	struct spdk_nvme_io_qpair_opts opts;
1558 	struct spdk_nvme_ctrlr ctrlr = {};
1559 	struct spdk_nvme_qpair *q0;
1560 
1561 	setup_qpairs(&ctrlr, 1);
1562 
1563 	/*
1564 	 * Fake to simulate the controller with default round robin
1565 	 * arbitration mechanism.
1566 	 */
1567 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
1568 
1569 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1570 
1571 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1572 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1573 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1574 	/* Only 1 I/O qpair was allocated, so this should fail */
1575 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
1576 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1577 
1578 	/*
1579 	 * Now that the qpair has been returned to the free list,
1580 	 * we should be able to allocate it again.
1581 	 */
1582 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1583 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1584 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1585 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1586 
1587 	/* Only 0 qprio is acceptable for default round robin arbitration mechanism */
1588 	opts.qprio = 1;
1589 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1590 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1591 
1592 	opts.qprio = 2;
1593 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1594 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1595 
1596 	opts.qprio = 3;
1597 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1598 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1599 
1600 	/* Only 0 ~ 3 qprio is acceptable */
1601 	opts.qprio = 4;
1602 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1603 	opts.qprio = 0;
1604 
1605 	/* IO qpair can only be created when ctrlr is in READY state */
1606 	ctrlr.state = NVME_CTRLR_STATE_ENABLE;
1607 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1608 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1609 	ctrlr.state = NVME_CTRLR_STATE_READY;
1610 
1611 	cleanup_qpairs(&ctrlr);
1612 }
1613 
1614 static void
1615 test_alloc_io_qpair_wrr_1(void)
1616 {
1617 	struct spdk_nvme_io_qpair_opts opts;
1618 	struct spdk_nvme_ctrlr ctrlr = {};
1619 	struct spdk_nvme_qpair *q0, *q1;
1620 
1621 	setup_qpairs(&ctrlr, 2);
1622 
1623 	/*
1624 	 * Fake to simulate the controller with weighted round robin
1625 	 * arbitration mechanism.
1626 	 */
1627 	ctrlr.process_init_cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1628 
1629 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1630 
1631 	/*
1632 	 * Allocate 2 qpairs and free them
1633 	 */
1634 	opts.qprio = 0;
1635 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1636 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1637 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1638 
1639 	opts.qprio = 1;
1640 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1641 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1642 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1643 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1644 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1645 
1646 	/*
1647 	 * Allocate 2 qpairs and free them in the reverse order
1648 	 */
1649 	opts.qprio = 2;
1650 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1651 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1652 	SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
1653 
1654 	opts.qprio = 3;
1655 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1656 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1657 	SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
1658 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1659 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1660 
1661 	/* Only 0 ~ 3 qprio is acceptable */
1662 	opts.qprio = 4;
1663 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1664 
1665 	cleanup_qpairs(&ctrlr);
1666 }
1667 
1668 static void
1669 test_alloc_io_qpair_wrr_2(void)
1670 {
1671 	struct spdk_nvme_io_qpair_opts opts;
1672 	struct spdk_nvme_ctrlr ctrlr = {};
1673 	struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
1674 
1675 	setup_qpairs(&ctrlr, 4);
1676 
1677 	/*
1678 	 * Fake to simulate the controller with weighted round robin
1679 	 * arbitration mechanism.
1680 	 */
1681 	ctrlr.process_init_cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1682 
1683 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1684 
1685 	opts.qprio = 0;
1686 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1687 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1688 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1689 
1690 	opts.qprio = 1;
1691 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1692 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1693 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1694 
1695 	opts.qprio = 2;
1696 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1697 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1698 	SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
1699 
1700 	opts.qprio = 3;
1701 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1702 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1703 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1704 
1705 	/* Only 4 I/O qpairs was allocated, so this should fail */
1706 	opts.qprio = 0;
1707 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1708 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1709 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1710 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1711 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1712 
1713 	/*
1714 	 * Now that the qpair has been returned to the free list,
1715 	 * we should be able to allocate it again.
1716 	 *
1717 	 * Allocate 4 I/O qpairs and half of them with same qprio.
1718 	 */
1719 	opts.qprio = 1;
1720 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1721 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1722 	SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
1723 
1724 	opts.qprio = 1;
1725 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1726 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1727 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1728 
1729 	opts.qprio = 3;
1730 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1731 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1732 	SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
1733 
1734 	opts.qprio = 3;
1735 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1736 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1737 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1738 
1739 	/*
1740 	 * Free all I/O qpairs in reverse order
1741 	 */
1742 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1743 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1744 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1745 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1746 
1747 	cleanup_qpairs(&ctrlr);
1748 }
1749 
1750 bool g_connect_qpair_called = false;
1751 int g_connect_qpair_return_code = 0;
1752 int
1753 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1754 {
1755 	g_connect_qpair_called = true;
1756 	qpair->state = NVME_QPAIR_CONNECTED;
1757 	return g_connect_qpair_return_code;
1758 }
1759 
1760 static void
1761 test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
1762 {
1763 	struct spdk_nvme_ctrlr	ctrlr = {};
1764 	struct spdk_nvme_qpair	qpair = {};
1765 	int rc;
1766 
1767 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
1768 
1769 	/* Various states of controller disconnect. */
1770 	qpair.id = 1;
1771 	qpair.ctrlr = &ctrlr;
1772 	ctrlr.is_removed = 1;
1773 	ctrlr.is_failed = 0;
1774 	ctrlr.is_resetting = 0;
1775 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1776 	CU_ASSERT(rc == -ENODEV)
1777 
1778 	ctrlr.is_removed = 0;
1779 	ctrlr.is_failed = 1;
1780 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1781 	CU_ASSERT(rc == -ENXIO)
1782 
1783 	ctrlr.is_failed = 0;
1784 	ctrlr.is_resetting = 1;
1785 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1786 	CU_ASSERT(rc == -EAGAIN)
1787 
1788 	/* Confirm precedence for controller states: removed > resetting > failed */
1789 	ctrlr.is_removed = 1;
1790 	ctrlr.is_failed = 1;
1791 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1792 	CU_ASSERT(rc == -ENODEV)
1793 
1794 	ctrlr.is_removed = 0;
1795 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1796 	CU_ASSERT(rc == -EAGAIN)
1797 
1798 	ctrlr.is_resetting = 0;
1799 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1800 	CU_ASSERT(rc == -ENXIO)
1801 
1802 	/* qpair not failed. Make sure we don't call down to the transport */
1803 	ctrlr.is_failed = 0;
1804 	qpair.state = NVME_QPAIR_CONNECTED;
1805 	g_connect_qpair_called = false;
1806 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1807 	CU_ASSERT(g_connect_qpair_called == false);
1808 	CU_ASSERT(rc == 0)
1809 
1810 	/* transport qpair is failed. make sure we call down to the transport */
1811 	qpair.state = NVME_QPAIR_DISCONNECTED;
1812 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1813 	CU_ASSERT(g_connect_qpair_called == true);
1814 	CU_ASSERT(rc == 0)
1815 
1816 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
1817 }
1818 
1819 static void
1820 test_nvme_ctrlr_fail(void)
1821 {
1822 	struct spdk_nvme_ctrlr	ctrlr = {};
1823 
1824 	ctrlr.opts.num_io_queues = 0;
1825 	nvme_ctrlr_fail(&ctrlr, false);
1826 
1827 	CU_ASSERT(ctrlr.is_failed == true);
1828 }
1829 
1830 static void
1831 test_nvme_ctrlr_construct_intel_support_log_page_list(void)
1832 {
1833 	bool	res;
1834 	struct spdk_nvme_ctrlr				ctrlr = {};
1835 	struct spdk_nvme_intel_log_page_directory	payload = {};
1836 	struct spdk_pci_id				pci_id = {};
1837 
1838 	/* Get quirks for a device with all 0 vendor/device id */
1839 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1840 	CU_ASSERT(ctrlr.quirks == 0);
1841 
1842 	/* Set the vendor to Intel, but provide no device id */
1843 	pci_id.class_id = SPDK_PCI_CLASS_NVME;
1844 	ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1845 	payload.temperature_statistics_log_len = 1;
1846 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1847 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1848 
1849 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1850 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1851 	CU_ASSERT(res == true);
1852 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1853 	CU_ASSERT(res == true);
1854 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1855 	CU_ASSERT(res == false);
1856 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1857 	CU_ASSERT(res == false);
1858 
1859 	/* set valid vendor id, device id and sub device id */
1860 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1861 	payload.temperature_statistics_log_len = 0;
1862 	pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1863 	pci_id.device_id = 0x0953;
1864 	pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
1865 	pci_id.subdevice_id = 0x3702;
1866 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1867 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1868 
1869 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1870 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1871 	CU_ASSERT(res == true);
1872 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1873 	CU_ASSERT(res == false);
1874 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1875 	CU_ASSERT(res == true);
1876 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1877 	CU_ASSERT(res == false);
1878 }
1879 
1880 static void
1881 test_nvme_ctrlr_set_supported_features(void)
1882 {
1883 	bool	res;
1884 	struct spdk_nvme_ctrlr			ctrlr = {};
1885 
1886 	/* set a invalid vendor id */
1887 	ctrlr.cdata.vid = 0xFFFF;
1888 	nvme_ctrlr_set_supported_features(&ctrlr);
1889 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1890 	CU_ASSERT(res == true);
1891 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1892 	CU_ASSERT(res == false);
1893 
1894 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1895 	nvme_ctrlr_set_supported_features(&ctrlr);
1896 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1897 	CU_ASSERT(res == true);
1898 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1899 	CU_ASSERT(res == true);
1900 }
1901 
1902 static void
1903 test_ctrlr_get_default_ctrlr_opts(void)
1904 {
1905 	struct spdk_nvme_ctrlr_opts opts = {};
1906 
1907 	CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id,
1908 				  "e53e9258-c93b-48b5-be1a-f025af6d232a") == 0);
1909 
1910 	memset(&opts, 0, sizeof(opts));
1911 
1912 	/* set a smaller opts_size */
1913 	CU_ASSERT(sizeof(opts) > 8);
1914 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
1915 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1916 	CU_ASSERT_FALSE(opts.use_cmb_sqs);
1917 	/* check below fields are not initialized by default value */
1918 	CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
1919 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
1920 	CU_ASSERT_EQUAL(opts.io_queue_size, 0);
1921 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1922 	for (int i = 0; i < 8; i++) {
1923 		CU_ASSERT(opts.host_id[i] == 0);
1924 	}
1925 	for (int i = 0; i < 16; i++) {
1926 		CU_ASSERT(opts.extended_host_id[i] == 0);
1927 	}
1928 	CU_ASSERT(strlen(opts.hostnqn) == 0);
1929 	CU_ASSERT(strlen(opts.src_addr) == 0);
1930 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1931 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
1932 
1933 	/* set a consistent opts_size */
1934 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
1935 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1936 	CU_ASSERT_FALSE(opts.use_cmb_sqs);
1937 	CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
1938 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
1939 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1940 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1941 	for (int i = 0; i < 8; i++) {
1942 		CU_ASSERT(opts.host_id[i] == 0);
1943 	}
1944 	CU_ASSERT_STRING_EQUAL(opts.hostnqn,
1945 			       "nqn.2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
1946 	CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
1947 			 sizeof(opts.extended_host_id)) == 0);
1948 	CU_ASSERT(strlen(opts.src_addr) == 0);
1949 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1950 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
1951 }
1952 
1953 static void
1954 test_ctrlr_get_default_io_qpair_opts(void)
1955 {
1956 	struct spdk_nvme_ctrlr ctrlr = {};
1957 	struct spdk_nvme_io_qpair_opts opts = {};
1958 
1959 	memset(&opts, 0, sizeof(opts));
1960 
1961 	/* set a smaller opts_size */
1962 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
1963 	CU_ASSERT(sizeof(opts) > 8);
1964 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
1965 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
1966 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1967 	/* check below field is not initialized by default value */
1968 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1969 
1970 	/* set a consistent opts_size */
1971 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
1972 	ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
1973 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1974 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
1975 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1976 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1977 }
1978 
1979 #if 0 /* TODO: move to PCIe-specific unit test */
1980 static void
1981 test_nvme_ctrlr_alloc_cmb(void)
1982 {
1983 	int			rc;
1984 	uint64_t		offset;
1985 	struct spdk_nvme_ctrlr	ctrlr = {};
1986 
1987 	ctrlr.cmb_size = 0x1000000;
1988 	ctrlr.cmb_current_offset = 0x100;
1989 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
1990 	CU_ASSERT(rc == 0);
1991 	CU_ASSERT(offset == 0x1000);
1992 	CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
1993 
1994 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
1995 	CU_ASSERT(rc == 0);
1996 	CU_ASSERT(offset == 0x2000);
1997 	CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
1998 
1999 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
2000 	CU_ASSERT(rc == 0);
2001 	CU_ASSERT(offset == 0x100000);
2002 	CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
2003 
2004 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
2005 	CU_ASSERT(rc == -1);
2006 }
2007 #endif
2008 
2009 static void
2010 test_spdk_nvme_ctrlr_update_firmware(void)
2011 {
2012 	struct spdk_nvme_ctrlr ctrlr = {};
2013 	void *payload = NULL;
2014 	int point_payload = 1;
2015 	int slot = 0;
2016 	int ret = 0;
2017 	struct spdk_nvme_status status;
2018 	enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
2019 
2020 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
2021 
2022 	/* Set invalid size check function return value */
2023 	set_size = 5;
2024 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2025 	CU_ASSERT(ret == -1);
2026 
2027 	/* When payload is NULL but set_size < min_page_size */
2028 	set_size = 4;
2029 	ctrlr.min_page_size = 5;
2030 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2031 	CU_ASSERT(ret == -1);
2032 
2033 	/* When payload not NULL but min_page_size is 0 */
2034 	set_size = 4;
2035 	ctrlr.min_page_size = 0;
2036 	payload = &point_payload;
2037 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2038 	CU_ASSERT(ret == -1);
2039 
2040 	/* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
2041 	set_status_cpl = 1;
2042 	set_size = 4;
2043 	ctrlr.min_page_size = 5;
2044 	payload = &point_payload;
2045 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2046 	CU_ASSERT(ret == -ENXIO);
2047 
2048 	/* Check firmware image download and set status.cpl value is 0 */
2049 	set_status_cpl = 0;
2050 	set_size = 4;
2051 	ctrlr.min_page_size = 5;
2052 	payload = &point_payload;
2053 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2054 	CU_ASSERT(ret == -1);
2055 
2056 	/* Check firmware commit */
2057 	ctrlr.is_resetting = false;
2058 	set_status_cpl = 0;
2059 	slot = 1;
2060 	set_size = 4;
2061 	ctrlr.min_page_size = 5;
2062 	payload = &point_payload;
2063 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2064 	CU_ASSERT(ret == -ENXIO);
2065 
2066 	/* Set size check firmware download and firmware commit */
2067 	ctrlr.is_resetting = true;
2068 	set_status_cpl = 0;
2069 	slot = 1;
2070 	set_size = 4;
2071 	ctrlr.min_page_size = 5;
2072 	payload = &point_payload;
2073 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2074 	CU_ASSERT(ret == 0);
2075 
2076 	/* nvme_wait_for_completion returns an error */
2077 	g_wait_for_completion_return_val = -1;
2078 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
2079 	CU_ASSERT(ret == -ENXIO);
2080 	CU_ASSERT(g_failed_status != NULL);
2081 	CU_ASSERT(g_failed_status->timed_out == true);
2082 	/* status should be freed by callback, which is not triggered in test env.
2083 	   Store status to global variable and free it manually.
2084 	   If spdk_nvme_ctrlr_update_firmware changes its behaviour and frees the status
2085 	   itself, we'll get a double free here.. */
2086 	free(g_failed_status);
2087 	g_failed_status = NULL;
2088 	g_wait_for_completion_return_val = 0;
2089 
2090 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
2091 	set_status_cpl = 0;
2092 }
2093 
2094 int
2095 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
2096 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
2097 {
2098 	fake_cpl_sc(cb_fn, cb_arg);
2099 	return 0;
2100 }
2101 
2102 static void
2103 test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
2104 {
2105 	struct spdk_nvme_ctrlr ctrlr = {};
2106 	int ret = -1;
2107 
2108 	ctrlr.cdata.oacs.doorbell_buffer_config = 1;
2109 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2110 	ctrlr.page_size = 0x1000;
2111 	MOCK_CLEAR(spdk_malloc);
2112 	MOCK_CLEAR(spdk_zmalloc);
2113 	ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
2114 	CU_ASSERT(ret == 0);
2115 	nvme_ctrlr_free_doorbell_buffer(&ctrlr);
2116 }
2117 
2118 static void
2119 test_nvme_ctrlr_test_active_ns(void)
2120 {
2121 	uint32_t		nsid, minor;
2122 	size_t			ns_id_count;
2123 	struct spdk_nvme_ctrlr	ctrlr = {};
2124 	uint32_t		active_ns_list[1531];
2125 
2126 	for (nsid = 1; nsid <= 1531; nsid++) {
2127 		active_ns_list[nsid - 1] = nsid;
2128 	}
2129 
2130 	g_active_ns_list = active_ns_list;
2131 
2132 	ctrlr.page_size = 0x1000;
2133 
2134 	for (minor = 0; minor <= 2; minor++) {
2135 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2136 		ctrlr.state = NVME_CTRLR_STATE_READY;
2137 
2138 		ctrlr.vs.bits.mjr = 1;
2139 		ctrlr.vs.bits.mnr = minor;
2140 		ctrlr.vs.bits.ter = 0;
2141 		ctrlr.cdata.nn = 1531;
2142 
2143 		RB_INIT(&ctrlr.ns);
2144 
2145 		g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2146 		nvme_ctrlr_identify_active_ns(&ctrlr);
2147 
2148 		for (nsid = 1; nsid <= ctrlr.cdata.nn; nsid++) {
2149 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
2150 		}
2151 
2152 		for (; nsid <= 1559; nsid++) {
2153 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
2154 		}
2155 
2156 		g_active_ns_list_length = 0;
2157 		if (minor <= 1) {
2158 			ctrlr.cdata.nn = 0;
2159 		}
2160 		nvme_ctrlr_identify_active_ns(&ctrlr);
2161 		CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
2162 
2163 		g_active_ns_list_length = 1;
2164 		if (minor <= 1) {
2165 			ctrlr.cdata.nn = 1;
2166 		}
2167 		nvme_ctrlr_identify_active_ns(&ctrlr);
2168 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
2169 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
2170 		nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
2171 		CU_ASSERT(nsid == 1);
2172 
2173 		if (minor >= 2) {
2174 			/* For NVMe 1.2 and newer, the namespace list can have "holes" where
2175 			 * some namespaces are not active. Test this. */
2176 			g_active_ns_list_length = 2;
2177 			g_active_ns_list[1] = 3;
2178 			nvme_ctrlr_identify_active_ns(&ctrlr);
2179 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
2180 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
2181 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
2182 			nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
2183 			CU_ASSERT(nsid == 3);
2184 			nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
2185 			CU_ASSERT(nsid == 0);
2186 
2187 			/* Reset the active namespace list array */
2188 			g_active_ns_list[1] = 2;
2189 		}
2190 
2191 		g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2192 		if (minor <= 1) {
2193 			ctrlr.cdata.nn = 1531;
2194 		}
2195 		nvme_ctrlr_identify_active_ns(&ctrlr);
2196 
2197 		ns_id_count = 0;
2198 		for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
2199 		     nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
2200 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
2201 			ns_id_count++;
2202 		}
2203 		CU_ASSERT(ns_id_count == ctrlr.cdata.nn);
2204 
2205 		nvme_ctrlr_destruct(&ctrlr);
2206 	}
2207 
2208 	g_active_ns_list = NULL;
2209 	g_active_ns_list_length = 0;
2210 }
2211 
2212 static void
2213 test_nvme_ctrlr_test_active_ns_error_case(void)
2214 {
2215 	int rc;
2216 	struct spdk_nvme_ctrlr	ctrlr = {.state = NVME_CTRLR_STATE_READY};
2217 
2218 	ctrlr.page_size = 0x1000;
2219 	ctrlr.vs.bits.mjr = 1;
2220 	ctrlr.vs.bits.mnr = 2;
2221 	ctrlr.vs.bits.ter = 0;
2222 	ctrlr.cdata.nn = 2;
2223 
2224 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2225 	rc = nvme_ctrlr_identify_active_ns(&ctrlr);
2226 	CU_ASSERT(rc == -ENXIO);
2227 	set_status_code = SPDK_NVME_SC_SUCCESS;
2228 }
2229 
2230 static void
2231 test_nvme_ctrlr_init_delay(void)
2232 {
2233 	DECLARE_AND_CONSTRUCT_CTRLR();
2234 
2235 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
2236 
2237 	/*
2238 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
2239 	 * init() should set CC.EN = 1.
2240 	 */
2241 	g_ut_nvme_regs.cc.bits.en = 0;
2242 	g_ut_nvme_regs.csts.bits.rdy = 0;
2243 
2244 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2245 	/* Test that the initialization delay works correctly.  We only
2246 	 * do the initialization delay on SSDs that require it, so
2247 	 * set that quirk here.
2248 	 */
2249 	ctrlr.quirks = NVME_QUIRK_DELAY_BEFORE_INIT;
2250 	ctrlr.cdata.nn = 1;
2251 	ctrlr.page_size = 0x1000;
2252 	ctrlr.state = NVME_CTRLR_STATE_INIT_DELAY;
2253 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2254 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
2255 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
2256 
2257 	/* delay 1s, just return as sleep time isn't enough */
2258 	spdk_delay_us(1 * spdk_get_ticks_hz());
2259 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2260 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
2261 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
2262 
2263 	/* sleep timeout, start to initialize */
2264 	spdk_delay_us(2 * spdk_get_ticks_hz());
2265 	while (ctrlr.state != NVME_CTRLR_STATE_CHECK_EN) {
2266 		CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2267 	}
2268 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2269 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
2270 
2271 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2272 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
2273 
2274 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2275 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
2276 
2277 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2278 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
2279 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
2280 
2281 	/*
2282 	 * Transition to CSTS.RDY = 1.
2283 	 */
2284 	g_ut_nvme_regs.csts.bits.rdy = 1;
2285 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2286 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
2287 
2288 	/*
2289 	 * Transition to READY.
2290 	 */
2291 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2292 		nvme_ctrlr_process_init(&ctrlr);
2293 	}
2294 
2295 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
2296 	nvme_ctrlr_destruct(&ctrlr);
2297 }
2298 
2299 static void
2300 test_spdk_nvme_ctrlr_set_trid(void)
2301 {
2302 	struct spdk_nvme_ctrlr ctrlr = {{0}};
2303 	struct spdk_nvme_transport_id new_trid = {{0}};
2304 
2305 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
2306 
2307 	ctrlr.is_failed = false;
2308 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2309 	snprintf(ctrlr.trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
2310 	snprintf(ctrlr.trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
2311 	snprintf(ctrlr.trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
2312 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EPERM);
2313 
2314 	ctrlr.is_failed = true;
2315 	new_trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2316 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
2317 	CU_ASSERT(ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA);
2318 
2319 	new_trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2320 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode2");
2321 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
2322 	CU_ASSERT(strncmp(ctrlr.trid.subnqn, "nqn.2016-06.io.spdk:cnode1", SPDK_NVMF_NQN_MAX_LEN) == 0);
2323 
2324 
2325 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
2326 	snprintf(new_trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
2327 	snprintf(new_trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4421");
2328 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == 0);
2329 	CU_ASSERT(strncmp(ctrlr.trid.traddr, "192.168.100.9", SPDK_NVMF_TRADDR_MAX_LEN) == 0);
2330 	CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
2331 
2332 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
2333 }
2334 
2335 static void
2336 test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
2337 {
2338 	struct spdk_nvme_ctrlr_data cdata = {};
2339 	DECLARE_AND_CONSTRUCT_CTRLR();
2340 	/* equivalent of 4096 bytes */
2341 	cdata.nvmf_specific.ioccsz = 260;
2342 	cdata.nvmf_specific.icdoff = 1;
2343 	g_cdata = &cdata;
2344 
2345 	/* Check PCI trtype, */
2346 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2347 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2348 
2349 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2350 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2351 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2352 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2353 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2354 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2355 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2356 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2357 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2358 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2359 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2360 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2361 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2362 
2363 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2364 	CU_ASSERT(ctrlr.icdoff == 0);
2365 
2366 	nvme_ctrlr_destruct(&ctrlr);
2367 
2368 	/* Check RDMA trtype, */
2369 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2370 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2371 
2372 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2373 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2374 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2375 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2376 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2377 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2378 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2379 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2380 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2381 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2382 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2383 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2384 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2385 
2386 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2387 	CU_ASSERT(ctrlr.icdoff == 1);
2388 	ctrlr.ioccsz_bytes = 0;
2389 	ctrlr.icdoff = 0;
2390 
2391 	nvme_ctrlr_destruct(&ctrlr);
2392 
2393 	/* Check TCP trtype, */
2394 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2395 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2396 
2397 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2398 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2399 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2400 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2401 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2402 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2403 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2404 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2405 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2406 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2407 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2408 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2409 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2410 
2411 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2412 	CU_ASSERT(ctrlr.icdoff == 1);
2413 	ctrlr.ioccsz_bytes = 0;
2414 	ctrlr.icdoff = 0;
2415 
2416 	nvme_ctrlr_destruct(&ctrlr);
2417 
2418 	/* Check FC trtype, */
2419 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2420 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
2421 
2422 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2423 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2424 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2425 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2426 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2427 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2428 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2429 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2430 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2431 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2432 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2433 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2434 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2435 
2436 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2437 	CU_ASSERT(ctrlr.icdoff == 1);
2438 	ctrlr.ioccsz_bytes = 0;
2439 	ctrlr.icdoff = 0;
2440 
2441 	nvme_ctrlr_destruct(&ctrlr);
2442 
2443 	/* Check CUSTOM trtype, */
2444 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2445 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
2446 
2447 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2448 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2449 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2450 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2451 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2452 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2453 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2454 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2455 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2456 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2457 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2458 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2459 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2460 
2461 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2462 	CU_ASSERT(ctrlr.icdoff == 0);
2463 
2464 	nvme_ctrlr_destruct(&ctrlr);
2465 
2466 	/* Check CUSTOM_FABRICS trtype, */
2467 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2468 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM_FABRICS;
2469 
2470 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2471 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2472 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2473 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2474 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2475 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2476 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2477 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2478 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2479 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2480 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2481 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2482 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2483 
2484 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2485 	CU_ASSERT(ctrlr.icdoff == 1);
2486 	ctrlr.ioccsz_bytes = 0;
2487 	ctrlr.icdoff = 0;
2488 
2489 	nvme_ctrlr_destruct(&ctrlr);
2490 
2491 	g_cdata = NULL;
2492 }
2493 
2494 static void
2495 test_nvme_ctrlr_init_set_num_queues(void)
2496 {
2497 	DECLARE_AND_CONSTRUCT_CTRLR();
2498 
2499 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2500 
2501 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2502 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2503 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONFIGURE_AER);
2504 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2505 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2506 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2507 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2508 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2509 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2510 
2511 	ctrlr.opts.num_io_queues = 64;
2512 	/* Num queues is zero-based. So, use 31 to get 32 queues */
2513 	fake_cpl.cdw0 = 31 + (31 << 16);
2514 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_ACTIVE_NS */
2515 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS);
2516 	CU_ASSERT(ctrlr.opts.num_io_queues == 32);
2517 	fake_cpl.cdw0 = 0;
2518 
2519 	nvme_ctrlr_destruct(&ctrlr);
2520 }
2521 
2522 static void
2523 test_nvme_ctrlr_init_set_keep_alive_timeout(void)
2524 {
2525 	DECLARE_AND_CONSTRUCT_CTRLR();
2526 
2527 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2528 
2529 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2530 	ctrlr.cdata.kas = 1;
2531 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2532 	fake_cpl.cdw0 = 120000;
2533 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_IOCS_SPECIFIC */
2534 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2535 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 120000);
2536 	fake_cpl.cdw0 = 0;
2537 
2538 	/* Target does not support Get Feature "Keep Alive Timer" */
2539 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2540 	ctrlr.cdata.kas = 1;
2541 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2542 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2543 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> IDENTIFY_IOCS_SPECIFIC */
2544 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2545 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 60000);
2546 	set_status_code = SPDK_NVME_SC_SUCCESS;
2547 
2548 	/* Target fails Get Feature "Keep Alive Timer" for another reason */
2549 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2550 	ctrlr.cdata.kas = 1;
2551 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2552 	set_status_code = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2553 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> ERROR */
2554 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
2555 	set_status_code = SPDK_NVME_SC_SUCCESS;
2556 
2557 	nvme_ctrlr_destruct(&ctrlr);
2558 }
2559 
2560 static void
2561 test_alloc_io_qpair_fail(void)
2562 {
2563 	struct spdk_nvme_ctrlr ctrlr = {};
2564 	struct spdk_nvme_qpair *q0;
2565 
2566 	setup_qpairs(&ctrlr, 1);
2567 
2568 	/* Modify the connect_qpair return code to inject a failure */
2569 	g_connect_qpair_return_code = 1;
2570 
2571 	/* Attempt to allocate a qpair, this should fail */
2572 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
2573 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
2574 
2575 	/* Verify that the qpair is removed from the lists */
2576 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.active_io_qpairs));
2577 
2578 	g_connect_qpair_return_code = 0;
2579 	cleanup_qpairs(&ctrlr);
2580 }
2581 
2582 static void
2583 test_nvme_ctrlr_add_remove_process(void)
2584 {
2585 	struct spdk_nvme_ctrlr ctrlr = {};
2586 	void *devhandle = (void *)0xDEADBEEF;
2587 	struct spdk_nvme_ctrlr_process *proc = NULL;
2588 	int rc;
2589 
2590 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2591 	TAILQ_INIT(&ctrlr.active_procs);
2592 
2593 	rc = nvme_ctrlr_add_process(&ctrlr, devhandle);
2594 	CU_ASSERT(rc == 0);
2595 	proc = TAILQ_FIRST(&ctrlr.active_procs);
2596 	SPDK_CU_ASSERT_FATAL(proc != NULL);
2597 	CU_ASSERT(proc->is_primary == true);
2598 	CU_ASSERT(proc->pid == getpid());
2599 	CU_ASSERT(proc->devhandle == (void *)0xDEADBEEF);
2600 	CU_ASSERT(proc->ref == 0);
2601 
2602 	nvme_ctrlr_remove_process(&ctrlr, proc);
2603 	CU_ASSERT(TAILQ_EMPTY(&ctrlr.active_procs));
2604 }
2605 
2606 static void
2607 test_nvme_ctrlr_set_arbitration_feature(void)
2608 {
2609 	struct spdk_nvme_ctrlr ctrlr = {};
2610 
2611 	ctrlr.opts.arbitration_burst = 6;
2612 	ctrlr.flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
2613 	ctrlr.opts.low_priority_weight = 1;
2614 	ctrlr.opts.medium_priority_weight = 2;
2615 	ctrlr.opts.high_priority_weight = 3;
2616 	/* g_ut_cdw11 used to record value command feature set. */
2617 	g_ut_cdw11 = 0;
2618 
2619 	/* arbitration_burst count available. */
2620 	nvme_ctrlr_set_arbitration_feature(&ctrlr);
2621 	CU_ASSERT((uint8_t)g_ut_cdw11 == 6);
2622 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 8) == 1);
2623 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 16) == 2);
2624 	CU_ASSERT((uint8_t)(g_ut_cdw11 >> 24) == 3);
2625 
2626 	/* arbitration_burst unavailable. */
2627 	g_ut_cdw11 = 0;
2628 	ctrlr.opts.arbitration_burst = 8;
2629 
2630 	nvme_ctrlr_set_arbitration_feature(&ctrlr);
2631 	CU_ASSERT(g_ut_cdw11 == 0);
2632 }
2633 
2634 static void
2635 test_nvme_ctrlr_set_state(void)
2636 {
2637 	struct spdk_nvme_ctrlr ctrlr = {};
2638 	MOCK_SET(spdk_get_ticks, 0);
2639 
2640 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 1000);
2641 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2642 	CU_ASSERT(ctrlr.state_timeout_tsc == 1000000);
2643 
2644 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 0);
2645 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2646 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
2647 
2648 	/* Time out ticks causes integer overflow. */
2649 	MOCK_SET(spdk_get_ticks, UINT64_MAX);
2650 
2651 	nvme_ctrlr_set_state(&ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, 1000);
2652 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT);
2653 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
2654 	MOCK_CLEAR(spdk_get_ticks);
2655 }
2656 
2657 static void
2658 test_nvme_ctrlr_active_ns_list_v0(void)
2659 {
2660 	DECLARE_AND_CONSTRUCT_CTRLR();
2661 
2662 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2663 
2664 	ctrlr.vs.bits.mjr = 1;
2665 	ctrlr.vs.bits.mnr = 0;
2666 	ctrlr.vs.bits.ter = 0;
2667 	ctrlr.cdata.nn = 1024;
2668 
2669 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2670 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2671 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2672 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2673 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2674 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2675 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2676 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 1024);
2677 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2678 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2679 
2680 	nvme_ctrlr_destruct(&ctrlr);
2681 }
2682 
2683 static void
2684 test_nvme_ctrlr_active_ns_list_v2(void)
2685 {
2686 	uint32_t i;
2687 	uint32_t active_ns_list[1024];
2688 	DECLARE_AND_CONSTRUCT_CTRLR();
2689 
2690 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2691 
2692 	ctrlr.vs.bits.mjr = 1;
2693 	ctrlr.vs.bits.mnr = 2;
2694 	ctrlr.vs.bits.ter = 0;
2695 	ctrlr.cdata.nn = 4096;
2696 
2697 	g_active_ns_list = active_ns_list;
2698 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2699 
2700 	/* No active namespaces */
2701 	memset(active_ns_list, 0, sizeof(active_ns_list));
2702 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2703 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2704 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2705 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2706 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2707 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2708 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
2709 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2710 
2711 	nvme_ctrlr_destruct(&ctrlr);
2712 
2713 	/* 1024 active namespaces - one full page */
2714 	memset(active_ns_list, 0, sizeof(active_ns_list));
2715 	for (i = 0; i < 1024; ++i) {
2716 		active_ns_list[i] = i + 1;
2717 	}
2718 
2719 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2720 
2721 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2722 	g_active_ns_list = active_ns_list;
2723 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2724 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2725 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2726 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2727 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2728 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2729 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2730 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 1024);
2731 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2732 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2733 
2734 	nvme_ctrlr_destruct(&ctrlr);
2735 
2736 	/* 1023 active namespaces - full page minus one	 */
2737 	memset(active_ns_list, 0, sizeof(active_ns_list));
2738 	for (i = 0; i < 1023; ++i) {
2739 		active_ns_list[i] = i + 1;
2740 	}
2741 
2742 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2743 
2744 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2745 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2746 	SPDK_CU_ASSERT_FATAL(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_NS);
2747 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1));
2748 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1023));
2749 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1024));
2750 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1025));
2751 	CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 1);
2752 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1023) == 0);
2753 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1024) == 0);
2754 	CU_ASSERT(spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, 1025) == 0);
2755 
2756 	nvme_ctrlr_destruct(&ctrlr);
2757 
2758 	g_active_ns_list = NULL;
2759 	g_active_ns_list_length = 0;
2760 }
2761 
2762 static void
2763 test_nvme_ctrlr_ns_mgmt(void)
2764 {
2765 	DECLARE_AND_CONSTRUCT_CTRLR();
2766 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2767 	uint32_t active_ns_list2[] = { 1, 2, 3, 100, 1024 };
2768 	struct spdk_nvme_ns_data nsdata = {};
2769 	struct spdk_nvme_ctrlr_list ctrlr_list = {};
2770 	uint32_t nsid;
2771 
2772 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2773 
2774 	ctrlr.vs.bits.mjr = 1;
2775 	ctrlr.vs.bits.mnr = 2;
2776 	ctrlr.vs.bits.ter = 0;
2777 	ctrlr.cdata.nn = 4096;
2778 
2779 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2780 	g_active_ns_list = active_ns_list;
2781 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2782 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2783 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2784 	}
2785 
2786 	fake_cpl.cdw0 = 3;
2787 	nsid = spdk_nvme_ctrlr_create_ns(&ctrlr, &nsdata);
2788 	fake_cpl.cdw0 = 0;
2789 	CU_ASSERT(nsid == 3);
2790 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2791 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2792 
2793 	g_active_ns_list = active_ns_list2;
2794 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
2795 	CU_ASSERT(spdk_nvme_ctrlr_attach_ns(&ctrlr, 3, &ctrlr_list) == 0);
2796 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2797 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2798 
2799 	g_active_ns_list = active_ns_list;
2800 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2801 	CU_ASSERT(spdk_nvme_ctrlr_detach_ns(&ctrlr, 3, &ctrlr_list) == 0);
2802 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2803 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2804 
2805 	CU_ASSERT(spdk_nvme_ctrlr_delete_ns(&ctrlr, 3) == 0);
2806 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3));
2807 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 3) != NULL);
2808 	g_active_ns_list = NULL;
2809 	g_active_ns_list_length = 0;
2810 
2811 	nvme_ctrlr_destruct(&ctrlr);
2812 }
2813 
2814 static void
2815 check_en_set_rdy(void)
2816 {
2817 	if (g_ut_nvme_regs.cc.bits.en == 1) {
2818 		g_ut_nvme_regs.csts.bits.rdy = 1;
2819 	}
2820 }
2821 
2822 static void
2823 test_nvme_ctrlr_reset(void)
2824 {
2825 	DECLARE_AND_CONSTRUCT_CTRLR();
2826 	struct spdk_nvme_ctrlr_data cdata = { .nn = 4096 };
2827 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2828 	uint32_t active_ns_list2[] = { 1, 100, 1024 };
2829 
2830 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2831 
2832 	g_ut_nvme_regs.vs.bits.mjr = 1;
2833 	g_ut_nvme_regs.vs.bits.mnr = 2;
2834 	g_ut_nvme_regs.vs.bits.ter = 0;
2835 	nvme_ctrlr_get_vs(&ctrlr, &ctrlr.vs);
2836 	ctrlr.cdata.nn = 2048;
2837 
2838 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS;
2839 	g_active_ns_list = active_ns_list;
2840 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2841 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2842 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2843 	}
2844 	CU_ASSERT(spdk_nvme_ctrlr_get_num_ns(&ctrlr) == 2048);
2845 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 2) != NULL);
2846 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2));
2847 
2848 	/* Reset controller with changed number of namespaces */
2849 	g_cdata = &cdata;
2850 	g_active_ns_list = active_ns_list2;
2851 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
2852 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);
2853 	g_ut_nvme_regs.cc.raw = 0;
2854 	g_ut_nvme_regs.csts.raw = 0;
2855 	g_set_reg_cb = check_en_set_rdy;
2856 	g_wait_for_completion_return_val = -ENXIO;
2857 	CU_ASSERT(spdk_nvme_ctrlr_reset(&ctrlr) == 0);
2858 	g_set_reg_cb = NULL;
2859 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
2860 	g_cdata = NULL;
2861 	g_active_ns_list = NULL;
2862 	g_active_ns_list_length = 0;
2863 
2864 	CU_ASSERT(spdk_nvme_ctrlr_get_num_ns(&ctrlr) == 4096);
2865 	CU_ASSERT(spdk_nvme_ctrlr_get_ns(&ctrlr, 2) != NULL);
2866 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2));
2867 
2868 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
2869 	nvme_ctrlr_destruct(&ctrlr);
2870 
2871 	g_wait_for_completion_return_val = 0;
2872 }
2873 
2874 static uint32_t g_aer_cb_counter;
2875 
2876 static void
2877 aer_cb(void *aer_cb_arg, const struct spdk_nvme_cpl *cpl)
2878 {
2879 	g_aer_cb_counter++;
2880 }
2881 
2882 static void
2883 test_nvme_ctrlr_aer_callback(void)
2884 {
2885 	DECLARE_AND_CONSTRUCT_CTRLR();
2886 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2887 	union spdk_nvme_async_event_completion	aer_event = {
2888 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
2889 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
2890 	};
2891 	struct spdk_nvme_cpl aer_cpl = {
2892 		.status.sct = SPDK_NVME_SCT_GENERIC,
2893 		.status.sc = SPDK_NVME_SC_SUCCESS,
2894 		.cdw0 = aer_event.raw
2895 	};
2896 
2897 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2898 
2899 	ctrlr.vs.bits.mjr = 1;
2900 	ctrlr.vs.bits.mnr = 2;
2901 	ctrlr.vs.bits.ter = 0;
2902 	ctrlr.cdata.nn = 4096;
2903 
2904 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
2905 	g_active_ns_list = active_ns_list;
2906 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2907 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2908 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2909 	}
2910 
2911 	CU_ASSERT(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
2912 	spdk_nvme_ctrlr_register_aer_callback(&ctrlr, aer_cb, NULL);
2913 
2914 	/* Async event */
2915 	g_aer_cb_counter = 0;
2916 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
2917 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
2918 	CU_ASSERT(g_aer_cb_counter == 1);
2919 	g_active_ns_list = NULL;
2920 	g_active_ns_list_length = 0;
2921 
2922 	nvme_ctrlr_free_processes(&ctrlr);
2923 	nvme_ctrlr_destruct(&ctrlr);
2924 }
2925 
2926 static void
2927 test_nvme_ctrlr_ns_attr_changed(void)
2928 {
2929 	DECLARE_AND_CONSTRUCT_CTRLR();
2930 	uint32_t active_ns_list[] = { 1, 2, 100, 1024 };
2931 	uint32_t active_ns_list2[] = { 1, 2, 1024 };
2932 	uint32_t active_ns_list3[] = { 1, 2, 101, 1024 };
2933 	union spdk_nvme_async_event_completion	aer_event = {
2934 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
2935 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
2936 	};
2937 	struct spdk_nvme_cpl aer_cpl = {
2938 		.status.sct = SPDK_NVME_SCT_GENERIC,
2939 		.status.sc = SPDK_NVME_SC_SUCCESS,
2940 		.cdw0 = aer_event.raw
2941 	};
2942 
2943 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
2944 
2945 	ctrlr.vs.bits.mjr = 1;
2946 	ctrlr.vs.bits.mnr = 3;
2947 	ctrlr.vs.bits.ter = 0;
2948 	ctrlr.cap.bits.css |= SPDK_NVME_CAP_CSS_IOCS;
2949 	ctrlr.cdata.nn = 4096;
2950 
2951 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
2952 	g_active_ns_list = active_ns_list;
2953 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list);
2954 
2955 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
2956 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
2957 	}
2958 
2959 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 100));
2960 
2961 	CU_ASSERT(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
2962 	spdk_nvme_ctrlr_register_aer_callback(&ctrlr, aer_cb, NULL);
2963 
2964 	/* Remove NS 100 */
2965 	g_aer_cb_counter = 0;
2966 	g_active_ns_list = active_ns_list2;
2967 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list2);
2968 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
2969 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
2970 	CU_ASSERT(g_aer_cb_counter == 1);
2971 	CU_ASSERT(!spdk_nvme_ctrlr_is_active_ns(&ctrlr, 100));
2972 
2973 	/* Add NS 101 */
2974 	g_active_ns_list = active_ns_list3;
2975 	g_active_ns_list_length = SPDK_COUNTOF(active_ns_list3);
2976 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
2977 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
2978 	CU_ASSERT(g_aer_cb_counter == 2);
2979 	CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 101));
2980 
2981 	g_active_ns_list = NULL;
2982 	g_active_ns_list_length = 0;
2983 	nvme_ctrlr_free_processes(&ctrlr);
2984 	nvme_ctrlr_destruct(&ctrlr);
2985 }
2986 
2987 static void
2988 test_nvme_ctrlr_identify_namespaces_iocs_specific_next(void)
2989 {
2990 	struct spdk_nvme_ctrlr ctrlr = {};
2991 	uint32_t prev_nsid;
2992 	struct spdk_nvme_ns ns[5] = {};
2993 	struct spdk_nvme_ctrlr ns_ctrlr[5] = {};
2994 	int rc = 0;
2995 	int i;
2996 
2997 	RB_INIT(&ctrlr.ns);
2998 	for (i = 0; i < 5; i++) {
2999 		ns[i].id = i + 1;
3000 		ns[i].active = true;
3001 	}
3002 
3003 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
3004 
3005 	ctrlr.cdata.nn = 5;
3006 	/* case 1: No first/next active NS, move on to the next state, expect: pass */
3007 	prev_nsid = 0;
3008 	ctrlr.active_ns_count = 0;
3009 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3010 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3011 	CU_ASSERT(rc == 0);
3012 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES);
3013 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3014 
3015 	/* case 2: move on to the next active NS, and no namespace with (supported) iocs specific data found , expect: pass */
3016 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3017 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3018 	prev_nsid = 1;
3019 	for (i = 0; i < 5; i++) {
3020 		RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns[i]);
3021 	}
3022 	ctrlr.active_ns_count = 5;
3023 	ns[1].csi = SPDK_NVME_CSI_NVM;
3024 	ns[1].id = 2;
3025 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3026 	CU_ASSERT(rc == 0);
3027 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES);
3028 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3029 
3030 	/* case 3: ns.csi is SPDK_NVME_CSI_ZNS, do not loop, expect: pass */
3031 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3032 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3033 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3034 	prev_nsid = 0;
3035 	ctrlr.active_ns_count = 5;
3036 
3037 	for (int i = 0; i < 5; i++) {
3038 		ns[i].csi = SPDK_NVME_CSI_NVM;
3039 		ns[i].id = i + 1;
3040 		ns[i].ctrlr = &ns_ctrlr[i];
3041 	}
3042 	ns[4].csi = SPDK_NVME_CSI_ZNS;
3043 	ns_ctrlr[4].opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3044 
3045 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3046 	CU_ASSERT(rc == 0);
3047 	CU_ASSERT(ctrlr.state == 0);
3048 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3049 	CU_ASSERT(ns_ctrlr[4].state == NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC);
3050 	CU_ASSERT(ns_ctrlr[4].state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3051 
3052 	for (int i = 0; i < 5; i++) {
3053 		nvme_ns_free_zns_specific_data(&ns[i]);
3054 	}
3055 
3056 	/* case 4: nvme_ctrlr_identify_ns_iocs_specific_async return 1, expect: false */
3057 	memset(&ctrlr.state, 0x00, sizeof(ctrlr.state));
3058 	memset(&ctrlr.state_timeout_tsc, 0x00, sizeof(ctrlr.state_timeout_tsc));
3059 	prev_nsid = 1;
3060 	ctrlr.active_ns_count = 5;
3061 	ns[1].csi = SPDK_NVME_CSI_ZNS;
3062 	g_fail_next_identify = true;
3063 	rc = nvme_ctrlr_identify_namespaces_iocs_specific_next(&ctrlr, prev_nsid);
3064 	CU_ASSERT(rc == 1);
3065 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
3066 	CU_ASSERT(ctrlr.state_timeout_tsc == NVME_TIMEOUT_INFINITE);
3067 
3068 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
3069 }
3070 
3071 static void
3072 test_nvme_ctrlr_set_supported_log_pages(void)
3073 {
3074 	int rc;
3075 	struct spdk_nvme_ctrlr ctrlr = {};
3076 
3077 	/* ana supported */
3078 	memset(&ctrlr, 0, sizeof(ctrlr));
3079 	ctrlr.cdata.cmic.ana_reporting = true;
3080 	ctrlr.cdata.lpa.celp = 1;
3081 	ctrlr.cdata.nanagrpid = 1;
3082 	ctrlr.active_ns_count = 1;
3083 
3084 	rc = nvme_ctrlr_set_supported_log_pages(&ctrlr);
3085 	CU_ASSERT(rc == 0);
3086 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ERROR] == true);
3087 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] == true);
3088 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] == true);
3089 	CU_ASSERT(ctrlr.ana_log_page_size == sizeof(struct spdk_nvme_ana_page) +
3090 		  sizeof(struct spdk_nvme_ana_group_descriptor) * 1 + sizeof(uint32_t) * 1);
3091 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS] == true);
3092 	free(ctrlr.ana_log_page);
3093 	free(ctrlr.copied_ana_desc);
3094 }
3095 
3096 static void
3097 test_nvme_ctrlr_set_intel_supported_log_pages(void)
3098 {
3099 	DECLARE_AND_CONSTRUCT_CTRLR();
3100 
3101 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3102 
3103 	ctrlr.opts.admin_timeout_ms = NVME_TIMEOUT_INFINITE;
3104 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
3105 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
3106 	ctrlr.state = NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES;
3107 
3108 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
3109 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES);
3110 
3111 	set_status_code = SPDK_NVME_SC_SUCCESS;
3112 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
3113 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES);
3114 
3115 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_ERROR] == true);
3116 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] == true);
3117 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] == true);
3118 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] == true);
3119 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] == true);
3120 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] == true);
3121 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_LOG_SMART] == true);
3122 	CU_ASSERT(ctrlr.log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] == true);
3123 
3124 	nvme_ctrlr_destruct(&ctrlr);
3125 }
3126 
3127 #define UT_ANA_DESC_SIZE	(sizeof(struct spdk_nvme_ana_group_descriptor) +	\
3128 				 sizeof(uint32_t))
3129 static void
3130 test_nvme_ctrlr_parse_ana_log_page(void)
3131 {
3132 	int rc, i;
3133 	struct spdk_nvme_ctrlr ctrlr = {};
3134 	struct spdk_nvme_ns ns[3] = {};
3135 	struct spdk_nvme_ana_page ana_hdr;
3136 	char _ana_desc[UT_ANA_DESC_SIZE];
3137 	struct spdk_nvme_ana_group_descriptor *ana_desc;
3138 	uint32_t offset;
3139 
3140 	RB_INIT(&ctrlr.ns);
3141 	for (i = 0; i < 3; i++) {
3142 		ns[i].id = i + 1;
3143 		ns[i].active = true;
3144 		RB_INSERT(nvme_ns_tree, &ctrlr.ns, &ns[i]);
3145 	}
3146 
3147 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
3148 
3149 	ctrlr.cdata.nn = 3;
3150 	ctrlr.cdata.nanagrpid = 3;
3151 	ctrlr.active_ns_count = 3;
3152 
3153 	rc = nvme_ctrlr_update_ana_log_page(&ctrlr);
3154 	CU_ASSERT(rc == 0);
3155 	CU_ASSERT(ctrlr.ana_log_page != NULL);
3156 	CU_ASSERT(ctrlr.copied_ana_desc != NULL);
3157 
3158 	/*
3159 	 * Create ANA log page data - There are three ANA groups.
3160 	 * Each ANA group has a namespace and has a different ANA state.
3161 	 */
3162 	memset(&ana_hdr, 0, sizeof(ana_hdr));
3163 	ana_hdr.num_ana_group_desc = 3;
3164 
3165 	SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= ctrlr.ana_log_page_size);
3166 	memcpy((char *)ctrlr.ana_log_page, (char *)&ana_hdr, sizeof(ana_hdr));
3167 	offset = sizeof(ana_hdr);
3168 
3169 	ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc;
3170 	memset(ana_desc, 0, UT_ANA_DESC_SIZE);
3171 	ana_desc->num_of_nsid = 1;
3172 
3173 	ana_desc->ana_group_id = 1;
3174 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3175 	ana_desc->nsid[0] = 3;
3176 
3177 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3178 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3179 	offset += UT_ANA_DESC_SIZE;
3180 
3181 	ana_desc->ana_group_id = 2;
3182 	ana_desc->ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3183 	ana_desc->nsid[0] = 2;
3184 
3185 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3186 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3187 	offset += UT_ANA_DESC_SIZE;
3188 
3189 	ana_desc->ana_group_id = 3;
3190 	ana_desc->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
3191 	ana_desc->nsid[0] = 1;
3192 
3193 	SPDK_CU_ASSERT_FATAL(offset + UT_ANA_DESC_SIZE <= ctrlr.ana_log_page_size);
3194 	memcpy((char *)ctrlr.ana_log_page + offset, (char *)ana_desc, UT_ANA_DESC_SIZE);
3195 
3196 	/* Parse the created ANA log page data, and update ANA states. */
3197 	rc = nvme_ctrlr_parse_ana_log_page(&ctrlr, nvme_ctrlr_update_ns_ana_states,
3198 					   &ctrlr);
3199 	CU_ASSERT(rc == 0);
3200 	CU_ASSERT(ns[0].ana_group_id == 3);
3201 	CU_ASSERT(ns[0].ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE);
3202 	CU_ASSERT(ns[1].ana_group_id == 2);
3203 	CU_ASSERT(ns[1].ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3204 	CU_ASSERT(ns[2].ana_group_id == 1);
3205 	CU_ASSERT(ns[2].ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3206 
3207 	CU_ASSERT(pthread_mutex_destroy(&ctrlr.ctrlr_lock) == 0);
3208 
3209 	free(ctrlr.ana_log_page);
3210 	free(ctrlr.copied_ana_desc);
3211 }
3212 
3213 static void
3214 test_nvme_ctrlr_ana_resize(void)
3215 {
3216 	DECLARE_AND_CONSTRUCT_CTRLR();
3217 	uint32_t active_ns_list[] = { 1, 2, 3, 4 };
3218 	struct spdk_nvme_ana_page ana_hdr = {
3219 		.change_count = 0,
3220 		.num_ana_group_desc = 1
3221 	};
3222 	uint8_t ana_desc_buf[sizeof(struct spdk_nvme_ana_group_descriptor) + 4 * sizeof(uint32_t)] = {};
3223 	struct spdk_nvme_ana_group_descriptor *ana_desc =
3224 		(struct spdk_nvme_ana_group_descriptor *)ana_desc_buf;
3225 	struct spdk_nvme_ns *ns;
3226 	union spdk_nvme_async_event_completion aer_event = {
3227 		.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE,
3228 		.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
3229 	};
3230 	struct spdk_nvme_cpl aer_cpl = {
3231 		.status.sct = SPDK_NVME_SCT_GENERIC,
3232 		.status.sc = SPDK_NVME_SC_SUCCESS,
3233 		.cdw0 = aer_event.raw
3234 	};
3235 	uint32_t i;
3236 
3237 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3238 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_add_process(&ctrlr, NULL) == 0);
3239 
3240 	ctrlr.vs.bits.mjr = 1;
3241 	ctrlr.vs.bits.mnr = 4;
3242 	ctrlr.vs.bits.ter = 0;
3243 	ctrlr.cdata.nn = 4096;
3244 	ctrlr.cdata.cmic.ana_reporting = true;
3245 	ctrlr.cdata.nanagrpid = 1;
3246 
3247 	ctrlr.state = NVME_CTRLR_STATE_CONFIGURE_AER;
3248 	/* Start with 2 active namespaces */
3249 	g_active_ns_list = active_ns_list;
3250 	g_active_ns_list_length = 2;
3251 	g_ana_hdr = &ana_hdr;
3252 	g_ana_descs = &ana_desc;
3253 	ana_desc->ana_group_id = 1;
3254 	ana_desc->ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
3255 	ana_desc->num_of_nsid = 2;
3256 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3257 		ana_desc->nsid[i] = i + 1;
3258 	}
3259 
3260 	/* Bring controller to ready state */
3261 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
3262 		SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3263 	}
3264 
3265 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3266 		ns = spdk_nvme_ctrlr_get_ns(&ctrlr, i + 1);
3267 		CU_ASSERT(ns->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
3268 	}
3269 
3270 	/* Add more namespaces */
3271 	g_active_ns_list_length = 4;
3272 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3273 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3274 
3275 	/* Update ANA log with new namespaces */
3276 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
3277 	ana_desc->num_of_nsid = 4;
3278 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3279 		ana_desc->nsid[i] = i + 1;
3280 	}
3281 	aer_event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE;
3282 	aer_cpl.cdw0 = aer_event.raw;
3283 	nvme_ctrlr_async_event_cb(&ctrlr.aer[0], &aer_cpl);
3284 	nvme_ctrlr_complete_queued_async_events(&ctrlr);
3285 
3286 	for (i = 0; i < ana_desc->num_of_nsid; ++i) {
3287 		ns = spdk_nvme_ctrlr_get_ns(&ctrlr, i + 1);
3288 		CU_ASSERT(ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE);
3289 	}
3290 
3291 	g_active_ns_list = NULL;
3292 	g_active_ns_list_length = 0;
3293 	g_ana_hdr = NULL;
3294 	g_ana_descs = NULL;
3295 	nvme_ctrlr_free_processes(&ctrlr);
3296 	nvme_ctrlr_destruct(&ctrlr);
3297 }
3298 
3299 static void
3300 test_nvme_ctrlr_get_memory_domains(void)
3301 {
3302 	struct spdk_nvme_ctrlr ctrlr = {};
3303 
3304 	MOCK_SET(nvme_transport_ctrlr_get_memory_domains, 1);
3305 	CU_ASSERT(spdk_nvme_ctrlr_get_memory_domains(&ctrlr, NULL, 0) == 1);
3306 
3307 	MOCK_SET(nvme_transport_ctrlr_get_memory_domains, 0);
3308 	CU_ASSERT(spdk_nvme_ctrlr_get_memory_domains(&ctrlr, NULL, 0) == 0);
3309 
3310 	MOCK_CLEAR(nvme_transport_ctrlr_get_memory_domains);
3311 }
3312 
3313 static void
3314 test_nvme_transport_ctrlr_ready(void)
3315 {
3316 	DECLARE_AND_CONSTRUCT_CTRLR();
3317 
3318 	/* Transport init succeeded */
3319 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3320 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3321 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
3322 
3323 	/* Transport init failed */
3324 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3325 	MOCK_SET(nvme_transport_ctrlr_ready, -1);
3326 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == -1);
3327 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
3328 	MOCK_CLEAR(nvme_transport_ctrlr_ready);
3329 }
3330 
3331 static void
3332 test_nvme_ctrlr_disable(void)
3333 {
3334 	DECLARE_AND_CONSTRUCT_CTRLR();
3335 	int rc;
3336 
3337 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
3338 
3339 	ctrlr.state = NVME_CTRLR_STATE_TRANSPORT_READY;
3340 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_process_init(&ctrlr) == 0);
3341 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
3342 
3343 	/* Start a Controller Level Reset. */
3344 	ctrlr.is_disconnecting = true;
3345 	nvme_ctrlr_disable(&ctrlr);
3346 
3347 	g_ut_nvme_regs.cc.bits.en = 0;
3348 
3349 	rc = nvme_ctrlr_disable_poll(&ctrlr);
3350 	CU_ASSERT(rc == -EAGAIN);
3351 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
3352 
3353 	g_ut_nvme_regs.csts.bits.rdy = 0;
3354 
3355 	rc = nvme_ctrlr_disable_poll(&ctrlr);
3356 	CU_ASSERT(rc == 0);
3357 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLED);
3358 
3359 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
3360 	nvme_ctrlr_destruct(&ctrlr);
3361 }
3362 
3363 int
3364 main(int argc, char **argv)
3365 {
3366 	CU_pSuite	suite = NULL;
3367 	unsigned int	num_failures;
3368 
3369 	CU_initialize_registry();
3370 
3371 	suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
3372 
3373 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_0);
3374 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_1);
3375 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0);
3376 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_1);
3377 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_rr);
3378 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr);
3379 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_vs);
3380 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_delay);
3381 	CU_ADD_TEST(suite, test_alloc_io_qpair_rr_1);
3382 	CU_ADD_TEST(suite, test_ctrlr_get_default_ctrlr_opts);
3383 	CU_ADD_TEST(suite, test_ctrlr_get_default_io_qpair_opts);
3384 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_1);
3385 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_2);
3386 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_update_firmware);
3387 	CU_ADD_TEST(suite, test_nvme_ctrlr_fail);
3388 	CU_ADD_TEST(suite, test_nvme_ctrlr_construct_intel_support_log_page_list);
3389 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_features);
3390 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_doorbell_buffer_config);
3391 #if 0 /* TODO: move to PCIe-specific unit test */
3392 	CU_ADD_TEST(suite, test_nvme_ctrlr_alloc_cmb);
3393 #endif
3394 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns);
3395 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns_error_case);
3396 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_reconnect_io_qpair);
3397 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_set_trid);
3398 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_nvmf_ioccsz);
3399 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_num_queues);
3400 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_keep_alive_timeout);
3401 	CU_ADD_TEST(suite, test_alloc_io_qpair_fail);
3402 	CU_ADD_TEST(suite, test_nvme_ctrlr_add_remove_process);
3403 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_arbitration_feature);
3404 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_state);
3405 	CU_ADD_TEST(suite, test_nvme_ctrlr_active_ns_list_v0);
3406 	CU_ADD_TEST(suite, test_nvme_ctrlr_active_ns_list_v2);
3407 	CU_ADD_TEST(suite, test_nvme_ctrlr_ns_mgmt);
3408 	CU_ADD_TEST(suite, test_nvme_ctrlr_reset);
3409 	CU_ADD_TEST(suite, test_nvme_ctrlr_aer_callback);
3410 	CU_ADD_TEST(suite, test_nvme_ctrlr_ns_attr_changed);
3411 	CU_ADD_TEST(suite, test_nvme_ctrlr_identify_namespaces_iocs_specific_next);
3412 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_log_pages);
3413 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_intel_supported_log_pages);
3414 	CU_ADD_TEST(suite, test_nvme_ctrlr_parse_ana_log_page);
3415 	CU_ADD_TEST(suite, test_nvme_ctrlr_ana_resize);
3416 	CU_ADD_TEST(suite, test_nvme_ctrlr_get_memory_domains);
3417 	CU_ADD_TEST(suite, test_nvme_transport_ctrlr_ready);
3418 	CU_ADD_TEST(suite, test_nvme_ctrlr_disable);
3419 
3420 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3421 	CU_cleanup_registry();
3422 	return num_failures;
3423 }
3424