xref: /spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c (revision fa2d95b3fe66e7f5c543eaef89fa00d4eaa0e6e7)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/log.h"
39 
40 #include "common/lib/test_env.c"
41 
42 struct spdk_log_flag SPDK_LOG_NVME = {
43 	.name = "nvme",
44 	.enabled = false,
45 };
46 
47 #include "nvme/nvme_ctrlr.c"
48 #include "nvme/nvme_quirks.c"
49 
50 pid_t g_spdk_nvme_pid;
51 
52 struct nvme_driver _g_nvme_driver = {
53 	.lock = PTHREAD_MUTEX_INITIALIZER,
54 };
55 
56 struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
57 
58 struct spdk_nvme_registers g_ut_nvme_regs = {};
59 
60 __thread int    nvme_thread_ioq_index = -1;
61 
62 uint32_t set_size = 1;
63 
64 int set_status_cpl = -1;
65 
66 DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
67 	    (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
68 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
69 DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
70 
71 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
72 		const struct spdk_nvme_ctrlr_opts *opts,
73 		void *devhandle)
74 {
75 	return NULL;
76 }
77 
78 int
79 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
80 {
81 	nvme_ctrlr_destruct_finish(ctrlr);
82 
83 	return 0;
84 }
85 
86 int
87 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
88 {
89 	return 0;
90 }
91 
92 int
93 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
94 {
95 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
96 	*(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
97 	return 0;
98 }
99 
100 int
101 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
102 {
103 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
104 	*(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
105 	return 0;
106 }
107 
108 int
109 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
110 {
111 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
112 	*value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
113 	return 0;
114 }
115 
116 int
117 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
118 {
119 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
120 	*value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
121 	return 0;
122 }
123 
124 uint32_t
125 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
126 {
127 	return UINT32_MAX;
128 }
129 
130 uint16_t
131 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
132 {
133 	return 1;
134 }
135 
136 void *
137 nvme_transport_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
138 {
139 	return NULL;
140 }
141 
142 int
143 nvme_transport_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
144 {
145 	return 0;
146 }
147 
148 struct spdk_nvme_qpair *
149 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
150 				     const struct spdk_nvme_io_qpair_opts *opts)
151 {
152 	struct spdk_nvme_qpair *qpair;
153 
154 	qpair = calloc(1, sizeof(*qpair));
155 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
156 
157 	qpair->ctrlr = ctrlr;
158 	qpair->id = qid;
159 	qpair->qprio = opts->qprio;
160 
161 	return qpair;
162 }
163 
164 int
165 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
166 {
167 	free(qpair);
168 	return 0;
169 }
170 
171 int
172 nvme_transport_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
173 {
174 	return 0;
175 }
176 
177 int
178 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
179 {
180 	return 0;
181 }
182 
183 int
184 nvme_driver_init(void)
185 {
186 	return 0;
187 }
188 
189 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
190 		    struct spdk_nvme_ctrlr *ctrlr,
191 		    enum spdk_nvme_qprio qprio,
192 		    uint32_t num_requests)
193 {
194 	qpair->id = id;
195 	qpair->qprio = qprio;
196 	qpair->ctrlr = ctrlr;
197 
198 	return 0;
199 }
200 
201 static void
202 fake_cpl_success(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
203 {
204 	struct spdk_nvme_cpl cpl = {};
205 
206 	cpl.status.sc = SPDK_NVME_SC_SUCCESS;
207 	cb_fn(cb_arg, &cpl);
208 }
209 
210 int
211 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
212 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
213 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
214 {
215 	CU_ASSERT(0);
216 	return -1;
217 }
218 
219 int
220 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
221 				uint32_t cdw11, void *payload, uint32_t payload_size,
222 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
223 {
224 	CU_ASSERT(0);
225 	return -1;
226 }
227 
228 int
229 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
230 				 uint32_t nsid, void *payload, uint32_t payload_size,
231 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
232 {
233 	fake_cpl_success(cb_fn, cb_arg);
234 	return 0;
235 }
236 
237 int
238 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
239 {
240 	CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
241 
242 	/*
243 	 * For the purposes of this unit test, we don't need to bother emulating request submission.
244 	 */
245 
246 	return 0;
247 }
248 
249 int32_t
250 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
251 {
252 	return 0;
253 }
254 
255 void
256 nvme_qpair_disable(struct spdk_nvme_qpair *qpair)
257 {
258 }
259 
260 void
261 nvme_qpair_enable(struct spdk_nvme_qpair *qpair)
262 {
263 }
264 
265 void
266 nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
267 {
268 	struct nvme_completion_poll_status	*status = arg;
269 
270 	status->cpl = *cpl;
271 	status->done = true;
272 }
273 
274 int
275 spdk_nvme_wait_for_completion_robust_lock(
276 	struct spdk_nvme_qpair *qpair,
277 	struct nvme_completion_poll_status *status,
278 	pthread_mutex_t *robust_mutex)
279 {
280 	status->done = true;
281 	memset(&status->cpl, 0, sizeof(status->cpl));
282 	status->cpl.status.sc = 0;
283 	if (set_status_cpl == 1) {
284 		status->cpl.status.sc = 1;
285 	}
286 	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
287 }
288 
289 int
290 spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
291 			      struct nvme_completion_poll_status *status)
292 {
293 	return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
294 }
295 
296 int
297 spdk_nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
298 				      struct nvme_completion_poll_status *status,
299 				      uint64_t timeout_in_secs)
300 {
301 	return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
302 }
303 
304 int
305 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
306 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
307 				      void *cb_arg)
308 {
309 	fake_cpl_success(cb_fn, cb_arg);
310 	return 0;
311 }
312 
313 int
314 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
315 			void *payload, size_t payload_size,
316 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
317 {
318 	if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
319 		uint32_t count = 0;
320 		uint32_t i = 0;
321 		struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
322 
323 		for (i = 1; i <= ctrlr->num_ns; i++) {
324 			if (i <= nsid) {
325 				continue;
326 			}
327 
328 			ns_list->ns_list[count++] = i;
329 			if (count == SPDK_COUNTOF(ns_list->ns_list)) {
330 				break;
331 			}
332 		}
333 
334 	}
335 	fake_cpl_success(cb_fn, cb_arg);
336 	return 0;
337 }
338 
339 int
340 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
341 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
342 {
343 	fake_cpl_success(cb_fn, cb_arg);
344 	return 0;
345 }
346 
347 int
348 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
349 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
350 {
351 	fake_cpl_success(cb_fn, cb_arg);
352 	return 0;
353 }
354 
355 int
356 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
357 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
358 {
359 	return 0;
360 }
361 
362 int
363 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
364 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
365 {
366 	return 0;
367 }
368 
369 int
370 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
371 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
372 {
373 	return 0;
374 }
375 
376 int
377 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
378 			 void *cb_arg)
379 {
380 	return 0;
381 }
382 
383 int
384 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
385 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
386 {
387 	return 0;
388 }
389 
390 int
391 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
392 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
393 {
394 	CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
395 	if (fw_commit->fs == 0) {
396 		return -1;
397 	}
398 	set_status_cpl = 1;
399 	if (ctrlr->is_resetting == true) {
400 		set_status_cpl = 0;
401 	}
402 	return 0;
403 }
404 
405 int
406 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
407 				 uint32_t size, uint32_t offset, void *payload,
408 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
409 {
410 	if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
411 		return -1;
412 	}
413 	CU_ASSERT(offset == 0);
414 	return 0;
415 }
416 
417 int
418 nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp, uint16_t spsp,
419 				uint8_t nssf, void *payload, uint32_t payload_size,
420 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
421 {
422 	return 0;
423 }
424 
425 int
426 nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
427 			     uint16_t spsp, uint8_t nssf, void *payload,
428 			     uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
429 {
430 	return 0;
431 }
432 
433 
434 void
435 nvme_ns_destruct(struct spdk_nvme_ns *ns)
436 {
437 }
438 
439 int
440 nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
441 		  struct spdk_nvme_ctrlr *ctrlr)
442 {
443 	return 0;
444 }
445 
446 void
447 spdk_pci_device_detach(struct spdk_pci_device *device)
448 {
449 }
450 
451 #define DECLARE_AND_CONSTRUCT_CTRLR()	\
452 	struct spdk_nvme_ctrlr	ctrlr = {};	\
453 	struct spdk_nvme_qpair	adminq = {};	\
454 	struct nvme_request	req;		\
455 						\
456 	STAILQ_INIT(&adminq.free_req);		\
457 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);	\
458 	ctrlr.adminq = &adminq;
459 
460 static void
461 test_nvme_ctrlr_init_en_1_rdy_0(void)
462 {
463 	DECLARE_AND_CONSTRUCT_CTRLR();
464 
465 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
466 
467 	/*
468 	 * Initial state: CC.EN = 1, CSTS.RDY = 0
469 	 */
470 	g_ut_nvme_regs.cc.bits.en = 1;
471 	g_ut_nvme_regs.csts.bits.rdy = 0;
472 
473 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
474 	ctrlr.cdata.nn = 1;
475 	ctrlr.page_size = 0x1000;
476 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
477 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
478 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
479 
480 	/*
481 	 * Transition to CSTS.RDY = 1.
482 	 * init() should set CC.EN = 0.
483 	 */
484 	g_ut_nvme_regs.csts.bits.rdy = 1;
485 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
486 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
487 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
488 
489 	/*
490 	 * Transition to CSTS.RDY = 0.
491 	 */
492 	g_ut_nvme_regs.csts.bits.rdy = 0;
493 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
494 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
495 
496 	/*
497 	 * Transition to CC.EN = 1
498 	 */
499 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
500 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
501 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
502 
503 	/*
504 	 * Transition to CSTS.RDY = 1.
505 	 */
506 	g_ut_nvme_regs.csts.bits.rdy = 1;
507 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
508 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
509 
510 	/*
511 	 * Transition to READY.
512 	 */
513 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
514 		nvme_ctrlr_process_init(&ctrlr);
515 	}
516 
517 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
518 	nvme_ctrlr_destruct(&ctrlr);
519 }
520 
521 static void
522 test_nvme_ctrlr_init_en_1_rdy_1(void)
523 {
524 	DECLARE_AND_CONSTRUCT_CTRLR();
525 
526 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
527 
528 	/*
529 	 * Initial state: CC.EN = 1, CSTS.RDY = 1
530 	 * init() should set CC.EN = 0.
531 	 */
532 	g_ut_nvme_regs.cc.bits.en = 1;
533 	g_ut_nvme_regs.csts.bits.rdy = 1;
534 
535 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
536 	ctrlr.cdata.nn = 1;
537 	ctrlr.page_size = 0x1000;
538 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
539 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
540 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
541 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
542 
543 	/*
544 	 * Transition to CSTS.RDY = 0.
545 	 */
546 	g_ut_nvme_regs.csts.bits.rdy = 0;
547 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
548 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
549 
550 	/*
551 	 * Transition to CC.EN = 1
552 	 */
553 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
554 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
555 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
556 
557 	/*
558 	 * Transition to CSTS.RDY = 1.
559 	 */
560 	g_ut_nvme_regs.csts.bits.rdy = 1;
561 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
562 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
563 
564 	/*
565 	 * Transition to READY.
566 	 */
567 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
568 		nvme_ctrlr_process_init(&ctrlr);
569 	}
570 
571 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
572 	nvme_ctrlr_destruct(&ctrlr);
573 }
574 
575 static void
576 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
577 {
578 	DECLARE_AND_CONSTRUCT_CTRLR();
579 
580 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
581 
582 	/*
583 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
584 	 * init() should set CC.EN = 1.
585 	 */
586 	g_ut_nvme_regs.cc.bits.en = 0;
587 	g_ut_nvme_regs.csts.bits.rdy = 0;
588 
589 	/*
590 	 * Default round robin enabled
591 	 */
592 	g_ut_nvme_regs.cap.bits.ams = 0x0;
593 	ctrlr.cap = g_ut_nvme_regs.cap;
594 
595 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
596 	ctrlr.cdata.nn = 1;
597 	ctrlr.page_size = 0x1000;
598 	/*
599 	 * Case 1: default round robin arbitration mechanism selected
600 	 */
601 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
602 
603 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
604 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
605 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
606 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
607 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
608 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
609 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
610 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
611 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
612 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
613 
614 	/*
615 	 * Complete and destroy the controller
616 	 */
617 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
618 	nvme_ctrlr_destruct(&ctrlr);
619 
620 	/*
621 	 * Reset to initial state
622 	 */
623 	g_ut_nvme_regs.cc.bits.en = 0;
624 	g_ut_nvme_regs.csts.bits.rdy = 0;
625 
626 	/*
627 	 * Case 2: weighted round robin arbitration mechanism selected
628 	 */
629 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
630 	ctrlr.cdata.nn = 1;
631 	ctrlr.page_size = 0x1000;
632 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
633 
634 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
635 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
636 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
637 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
638 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
639 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
640 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
641 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
642 
643 	/*
644 	 * Complete and destroy the controller
645 	 */
646 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
647 	nvme_ctrlr_destruct(&ctrlr);
648 
649 	/*
650 	 * Reset to initial state
651 	 */
652 	g_ut_nvme_regs.cc.bits.en = 0;
653 	g_ut_nvme_regs.csts.bits.rdy = 0;
654 
655 	/*
656 	 * Case 3: vendor specific arbitration mechanism selected
657 	 */
658 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
659 	ctrlr.cdata.nn = 1;
660 	ctrlr.page_size = 0x1000;
661 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
662 
663 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
664 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
665 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
666 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
667 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
668 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
669 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
670 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
671 
672 	/*
673 	 * Complete and destroy the controller
674 	 */
675 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
676 	nvme_ctrlr_destruct(&ctrlr);
677 
678 	/*
679 	 * Reset to initial state
680 	 */
681 	g_ut_nvme_regs.cc.bits.en = 0;
682 	g_ut_nvme_regs.csts.bits.rdy = 0;
683 
684 	/*
685 	 * Case 4: invalid arbitration mechanism selected
686 	 */
687 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
688 	ctrlr.cdata.nn = 1;
689 	ctrlr.page_size = 0x1000;
690 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
691 
692 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
693 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
694 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
695 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
696 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
697 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
698 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
699 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
700 
701 	/*
702 	 * Complete and destroy the controller
703 	 */
704 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
705 	nvme_ctrlr_destruct(&ctrlr);
706 
707 	/*
708 	 * Reset to initial state
709 	 */
710 	g_ut_nvme_regs.cc.bits.en = 0;
711 	g_ut_nvme_regs.csts.bits.rdy = 0;
712 
713 	/*
714 	 * Case 5: reset to default round robin arbitration mechanism
715 	 */
716 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
717 	ctrlr.cdata.nn = 1;
718 	ctrlr.page_size = 0x1000;
719 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
720 
721 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
722 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
723 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
724 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
725 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
726 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
727 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
728 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
729 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
730 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
731 
732 	/*
733 	 * Transition to CSTS.RDY = 1.
734 	 */
735 	g_ut_nvme_regs.csts.bits.rdy = 1;
736 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
737 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
738 
739 	/*
740 	 * Transition to READY.
741 	 */
742 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
743 		nvme_ctrlr_process_init(&ctrlr);
744 	}
745 
746 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
747 	nvme_ctrlr_destruct(&ctrlr);
748 }
749 
750 static void
751 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
752 {
753 	DECLARE_AND_CONSTRUCT_CTRLR();
754 
755 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
756 
757 	/*
758 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
759 	 * init() should set CC.EN = 1.
760 	 */
761 	g_ut_nvme_regs.cc.bits.en = 0;
762 	g_ut_nvme_regs.csts.bits.rdy = 0;
763 
764 	/*
765 	 * Weighted round robin enabled
766 	 */
767 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
768 	ctrlr.cap = g_ut_nvme_regs.cap;
769 
770 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
771 	ctrlr.cdata.nn = 1;
772 	ctrlr.page_size = 0x1000;
773 	/*
774 	 * Case 1: default round robin arbitration mechanism selected
775 	 */
776 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
777 
778 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
779 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
780 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
781 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
782 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
783 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
784 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
785 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
786 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
787 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
788 
789 	/*
790 	 * Complete and destroy the controller
791 	 */
792 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
793 	nvme_ctrlr_destruct(&ctrlr);
794 
795 	/*
796 	 * Reset to initial state
797 	 */
798 	g_ut_nvme_regs.cc.bits.en = 0;
799 	g_ut_nvme_regs.csts.bits.rdy = 0;
800 
801 	/*
802 	 * Case 2: weighted round robin arbitration mechanism selected
803 	 */
804 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
805 	ctrlr.cdata.nn = 1;
806 	ctrlr.page_size = 0x1000;
807 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
808 
809 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
810 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
811 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
812 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
813 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
814 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
815 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
816 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
817 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
818 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
819 
820 	/*
821 	 * Complete and destroy the controller
822 	 */
823 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
824 	nvme_ctrlr_destruct(&ctrlr);
825 
826 	/*
827 	 * Reset to initial state
828 	 */
829 	g_ut_nvme_regs.cc.bits.en = 0;
830 	g_ut_nvme_regs.csts.bits.rdy = 0;
831 
832 	/*
833 	 * Case 3: vendor specific arbitration mechanism selected
834 	 */
835 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
836 	ctrlr.cdata.nn = 1;
837 	ctrlr.page_size = 0x1000;
838 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
839 
840 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
841 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
842 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
843 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
844 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
845 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
846 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
847 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
848 
849 	/*
850 	 * Complete and destroy the controller
851 	 */
852 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
853 	nvme_ctrlr_destruct(&ctrlr);
854 
855 	/*
856 	 * Reset to initial state
857 	 */
858 	g_ut_nvme_regs.cc.bits.en = 0;
859 	g_ut_nvme_regs.csts.bits.rdy = 0;
860 
861 	/*
862 	 * Case 4: invalid arbitration mechanism selected
863 	 */
864 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
865 	ctrlr.cdata.nn = 1;
866 	ctrlr.page_size = 0x1000;
867 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
868 
869 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
870 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
871 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
872 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
873 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
874 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
875 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
876 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
877 
878 	/*
879 	 * Complete and destroy the controller
880 	 */
881 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
882 	nvme_ctrlr_destruct(&ctrlr);
883 
884 	/*
885 	 * Reset to initial state
886 	 */
887 	g_ut_nvme_regs.cc.bits.en = 0;
888 	g_ut_nvme_regs.csts.bits.rdy = 0;
889 
890 	/*
891 	 * Case 5: reset to weighted round robin arbitration mechanism
892 	 */
893 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
894 	ctrlr.cdata.nn = 1;
895 	ctrlr.page_size = 0x1000;
896 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
897 
898 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
899 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
900 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
901 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
902 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
903 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
904 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
905 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
906 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
907 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
908 
909 	/*
910 	 * Transition to CSTS.RDY = 1.
911 	 */
912 	g_ut_nvme_regs.csts.bits.rdy = 1;
913 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
914 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
915 
916 	/*
917 	 * Transition to READY.
918 	 */
919 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
920 		nvme_ctrlr_process_init(&ctrlr);
921 	}
922 
923 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
924 	nvme_ctrlr_destruct(&ctrlr);
925 }
926 static void
927 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
928 {
929 	DECLARE_AND_CONSTRUCT_CTRLR();
930 
931 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
932 
933 	/*
934 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
935 	 * init() should set CC.EN = 1.
936 	 */
937 	g_ut_nvme_regs.cc.bits.en = 0;
938 	g_ut_nvme_regs.csts.bits.rdy = 0;
939 
940 	/*
941 	 * Default round robin enabled
942 	 */
943 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
944 	ctrlr.cap = g_ut_nvme_regs.cap;
945 
946 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
947 	ctrlr.cdata.nn = 1;
948 	ctrlr.page_size = 0x1000;
949 	/*
950 	 * Case 1: default round robin arbitration mechanism selected
951 	 */
952 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
953 
954 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
955 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
956 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
957 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
958 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
959 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
960 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
961 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
962 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
963 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
964 
965 	/*
966 	 * Complete and destroy the controller
967 	 */
968 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
969 	nvme_ctrlr_destruct(&ctrlr);
970 
971 	/*
972 	 * Reset to initial state
973 	 */
974 	g_ut_nvme_regs.cc.bits.en = 0;
975 	g_ut_nvme_regs.csts.bits.rdy = 0;
976 
977 	/*
978 	 * Case 2: weighted round robin arbitration mechanism selected
979 	 */
980 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
981 	ctrlr.cdata.nn = 1;
982 	ctrlr.page_size = 0x1000;
983 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
984 
985 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
986 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
987 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
988 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
989 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
990 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
991 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
992 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
993 
994 	/*
995 	 * Complete and destroy the controller
996 	 */
997 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
998 	nvme_ctrlr_destruct(&ctrlr);
999 
1000 	/*
1001 	 * Reset to initial state
1002 	 */
1003 	g_ut_nvme_regs.cc.bits.en = 0;
1004 	g_ut_nvme_regs.csts.bits.rdy = 0;
1005 
1006 	/*
1007 	 * Case 3: vendor specific arbitration mechanism selected
1008 	 */
1009 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1010 	ctrlr.cdata.nn = 1;
1011 	ctrlr.page_size = 0x1000;
1012 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1013 
1014 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1015 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1016 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1017 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1018 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1019 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1020 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1021 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1022 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1023 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1024 
1025 	/*
1026 	 * Complete and destroy the controller
1027 	 */
1028 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1029 	nvme_ctrlr_destruct(&ctrlr);
1030 
1031 	/*
1032 	 * Reset to initial state
1033 	 */
1034 	g_ut_nvme_regs.cc.bits.en = 0;
1035 	g_ut_nvme_regs.csts.bits.rdy = 0;
1036 
1037 	/*
1038 	 * Case 4: invalid arbitration mechanism selected
1039 	 */
1040 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1041 	ctrlr.cdata.nn = 1;
1042 	ctrlr.page_size = 0x1000;
1043 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1044 
1045 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1046 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1047 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1048 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1049 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1050 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1051 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1052 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1053 
1054 	/*
1055 	 * Complete and destroy the controller
1056 	 */
1057 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1058 	nvme_ctrlr_destruct(&ctrlr);
1059 
1060 	/*
1061 	 * Reset to initial state
1062 	 */
1063 	g_ut_nvme_regs.cc.bits.en = 0;
1064 	g_ut_nvme_regs.csts.bits.rdy = 0;
1065 
1066 	/*
1067 	 * Case 5: reset to vendor specific arbitration mechanism
1068 	 */
1069 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1070 	ctrlr.cdata.nn = 1;
1071 	ctrlr.page_size = 0x1000;
1072 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1073 
1074 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1075 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1076 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1077 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1078 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1079 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1080 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1081 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1082 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1083 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1084 
1085 	/*
1086 	 * Transition to CSTS.RDY = 1.
1087 	 */
1088 	g_ut_nvme_regs.csts.bits.rdy = 1;
1089 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1090 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
1091 
1092 	/*
1093 	 * Transition to READY.
1094 	 */
1095 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1096 		nvme_ctrlr_process_init(&ctrlr);
1097 	}
1098 
1099 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1100 	nvme_ctrlr_destruct(&ctrlr);
1101 }
1102 
1103 static void
1104 test_nvme_ctrlr_init_en_0_rdy_0(void)
1105 {
1106 	DECLARE_AND_CONSTRUCT_CTRLR();
1107 
1108 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1109 
1110 	/*
1111 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1112 	 * init() should set CC.EN = 1.
1113 	 */
1114 	g_ut_nvme_regs.cc.bits.en = 0;
1115 	g_ut_nvme_regs.csts.bits.rdy = 0;
1116 
1117 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1118 	ctrlr.cdata.nn = 1;
1119 	ctrlr.page_size = 0x1000;
1120 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1121 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1122 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1123 
1124 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1125 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1126 
1127 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1128 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1129 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1130 
1131 	/*
1132 	 * Transition to CSTS.RDY = 1.
1133 	 */
1134 	g_ut_nvme_regs.csts.bits.rdy = 1;
1135 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1136 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
1137 
1138 	/*
1139 	 * Transition to READY.
1140 	 */
1141 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1142 		nvme_ctrlr_process_init(&ctrlr);
1143 	}
1144 
1145 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1146 	nvme_ctrlr_destruct(&ctrlr);
1147 }
1148 
1149 static void
1150 test_nvme_ctrlr_init_en_0_rdy_1(void)
1151 {
1152 	DECLARE_AND_CONSTRUCT_CTRLR();
1153 
1154 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1155 
1156 	/*
1157 	 * Initial state: CC.EN = 0, CSTS.RDY = 1
1158 	 */
1159 	g_ut_nvme_regs.cc.bits.en = 0;
1160 	g_ut_nvme_regs.csts.bits.rdy = 1;
1161 
1162 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1163 	ctrlr.cdata.nn = 1;
1164 	ctrlr.page_size = 0x1000;
1165 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1166 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1167 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1168 
1169 	/*
1170 	 * Transition to CSTS.RDY = 0.
1171 	 */
1172 	g_ut_nvme_regs.csts.bits.rdy = 0;
1173 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1174 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1175 
1176 	/*
1177 	 * Transition to CC.EN = 1
1178 	 */
1179 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1180 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1181 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1182 
1183 	/*
1184 	 * Transition to CSTS.RDY = 1.
1185 	 */
1186 	g_ut_nvme_regs.csts.bits.rdy = 1;
1187 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1188 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
1189 
1190 	/*
1191 	 * Transition to READY.
1192 	 */
1193 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1194 		nvme_ctrlr_process_init(&ctrlr);
1195 	}
1196 
1197 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1198 	nvme_ctrlr_destruct(&ctrlr);
1199 }
1200 
1201 static void
1202 setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
1203 {
1204 	uint32_t i;
1205 
1206 	CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
1207 
1208 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
1209 
1210 	ctrlr->page_size = 0x1000;
1211 	ctrlr->opts.num_io_queues = num_io_queues;
1212 	ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
1213 	SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
1214 
1215 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1216 	for (i = 1; i <= num_io_queues; i++) {
1217 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1218 	}
1219 }
1220 
1221 static void
1222 cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
1223 {
1224 	nvme_ctrlr_destruct(ctrlr);
1225 }
1226 
1227 static void
1228 test_alloc_io_qpair_rr_1(void)
1229 {
1230 	struct spdk_nvme_io_qpair_opts opts;
1231 	struct spdk_nvme_ctrlr ctrlr = {};
1232 	struct spdk_nvme_qpair *q0;
1233 
1234 	setup_qpairs(&ctrlr, 1);
1235 
1236 	/*
1237 	 * Fake to simulate the controller with default round robin
1238 	 * arbitration mechanism.
1239 	 */
1240 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
1241 
1242 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1243 
1244 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1245 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1246 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1247 	/* Only 1 I/O qpair was allocated, so this should fail */
1248 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
1249 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1250 
1251 	/*
1252 	 * Now that the qpair has been returned to the free list,
1253 	 * we should be able to allocate it again.
1254 	 */
1255 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1256 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1257 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1258 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1259 
1260 	/* Only 0 qprio is acceptable for default round robin arbitration mechanism */
1261 	opts.qprio = 1;
1262 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1263 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1264 
1265 	opts.qprio = 2;
1266 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1267 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1268 
1269 	opts.qprio = 3;
1270 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1271 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1272 
1273 	/* Only 0 ~ 3 qprio is acceptable */
1274 	opts.qprio = 4;
1275 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1276 
1277 	cleanup_qpairs(&ctrlr);
1278 }
1279 
1280 static void
1281 test_alloc_io_qpair_wrr_1(void)
1282 {
1283 	struct spdk_nvme_io_qpair_opts opts;
1284 	struct spdk_nvme_ctrlr ctrlr = {};
1285 	struct spdk_nvme_qpair *q0, *q1;
1286 
1287 	setup_qpairs(&ctrlr, 2);
1288 
1289 	/*
1290 	 * Fake to simulate the controller with weighted round robin
1291 	 * arbitration mechanism.
1292 	 */
1293 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1294 
1295 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1296 
1297 	/*
1298 	 * Allocate 2 qpairs and free them
1299 	 */
1300 	opts.qprio = 0;
1301 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1302 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1303 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1304 
1305 	opts.qprio = 1;
1306 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1307 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1308 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1309 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1310 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1311 
1312 	/*
1313 	 * Allocate 2 qpairs and free them in the reverse order
1314 	 */
1315 	opts.qprio = 2;
1316 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1317 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1318 	SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
1319 
1320 	opts.qprio = 3;
1321 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1322 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1323 	SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
1324 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1325 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1326 
1327 	/* Only 0 ~ 3 qprio is acceptable */
1328 	opts.qprio = 4;
1329 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1330 
1331 	cleanup_qpairs(&ctrlr);
1332 }
1333 
1334 static void
1335 test_alloc_io_qpair_wrr_2(void)
1336 {
1337 	struct spdk_nvme_io_qpair_opts opts;
1338 	struct spdk_nvme_ctrlr ctrlr = {};
1339 	struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
1340 
1341 	setup_qpairs(&ctrlr, 4);
1342 
1343 	/*
1344 	 * Fake to simulate the controller with weighted round robin
1345 	 * arbitration mechanism.
1346 	 */
1347 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1348 
1349 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1350 
1351 	opts.qprio = 0;
1352 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1353 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1354 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1355 
1356 	opts.qprio = 1;
1357 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1358 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1359 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1360 
1361 	opts.qprio = 2;
1362 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1363 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1364 	SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
1365 
1366 	opts.qprio = 3;
1367 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1368 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1369 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1370 
1371 	/* Only 4 I/O qpairs was allocated, so this should fail */
1372 	opts.qprio = 0;
1373 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1374 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1375 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1376 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1377 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1378 
1379 	/*
1380 	 * Now that the qpair has been returned to the free list,
1381 	 * we should be able to allocate it again.
1382 	 *
1383 	 * Allocate 4 I/O qpairs and half of them with same qprio.
1384 	 */
1385 	opts.qprio = 1;
1386 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1387 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1388 	SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
1389 
1390 	opts.qprio = 1;
1391 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1392 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1393 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1394 
1395 	opts.qprio = 3;
1396 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1397 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1398 	SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
1399 
1400 	opts.qprio = 3;
1401 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1402 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1403 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1404 
1405 	/*
1406 	 * Free all I/O qpairs in reverse order
1407 	 */
1408 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1409 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1410 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1411 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1412 
1413 	cleanup_qpairs(&ctrlr);
1414 }
1415 
1416 static void
1417 test_nvme_ctrlr_fail(void)
1418 {
1419 	struct spdk_nvme_ctrlr	ctrlr = {};
1420 
1421 	ctrlr.opts.num_io_queues = 0;
1422 	nvme_ctrlr_fail(&ctrlr, false);
1423 
1424 	CU_ASSERT(ctrlr.is_failed == true);
1425 }
1426 
1427 static void
1428 test_nvme_ctrlr_construct_intel_support_log_page_list(void)
1429 {
1430 	bool	res;
1431 	struct spdk_nvme_ctrlr				ctrlr = {};
1432 	struct spdk_nvme_intel_log_page_directory	payload = {};
1433 	struct spdk_pci_id				pci_id = {};
1434 
1435 	/* Get quirks for a device with all 0 vendor/device id */
1436 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1437 	CU_ASSERT(ctrlr.quirks == 0);
1438 
1439 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1440 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1441 	CU_ASSERT(res == false);
1442 
1443 	/* Set the vendor to Intel, but provide no device id */
1444 	ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1445 	payload.temperature_statistics_log_len = 1;
1446 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1447 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1448 
1449 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1450 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1451 	CU_ASSERT(res == true);
1452 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1453 	CU_ASSERT(res == true);
1454 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1455 	CU_ASSERT(res == false);
1456 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1457 	CU_ASSERT(res == false);
1458 
1459 	/* set valid vendor id, device id and sub device id */
1460 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1461 	payload.temperature_statistics_log_len = 0;
1462 	pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1463 	pci_id.device_id = 0x0953;
1464 	pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
1465 	pci_id.subdevice_id = 0x3702;
1466 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1467 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1468 
1469 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1470 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1471 	CU_ASSERT(res == true);
1472 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1473 	CU_ASSERT(res == false);
1474 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1475 	CU_ASSERT(res == true);
1476 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1477 	CU_ASSERT(res == false);
1478 }
1479 
1480 static void
1481 test_nvme_ctrlr_set_supported_features(void)
1482 {
1483 	bool	res;
1484 	struct spdk_nvme_ctrlr			ctrlr = {};
1485 
1486 	/* set a invalid vendor id */
1487 	ctrlr.cdata.vid = 0xFFFF;
1488 	nvme_ctrlr_set_supported_features(&ctrlr);
1489 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1490 	CU_ASSERT(res == true);
1491 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1492 	CU_ASSERT(res == false);
1493 
1494 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1495 	nvme_ctrlr_set_supported_features(&ctrlr);
1496 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1497 	CU_ASSERT(res == true);
1498 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1499 	CU_ASSERT(res == true);
1500 }
1501 
1502 static void
1503 test_ctrlr_get_default_ctrlr_opts(void)
1504 {
1505 	struct spdk_nvme_ctrlr_opts opts = {};
1506 
1507 	CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id,
1508 				  "e53e9258-c93b-48b5-be1a-f025af6d232a") == 0);
1509 
1510 	memset(&opts, 0, sizeof(opts));
1511 
1512 	/* set a smaller opts_size */
1513 	CU_ASSERT(sizeof(opts) > 8);
1514 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
1515 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1516 	CU_ASSERT_TRUE(opts.use_cmb_sqs);
1517 	/* check below fields are not initialized by default value */
1518 	CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
1519 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
1520 	CU_ASSERT_EQUAL(opts.io_queue_size, 0);
1521 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1522 	for (int i = 0; i < 8; i++) {
1523 		CU_ASSERT(opts.host_id[i] == 0);
1524 	}
1525 	for (int i = 0; i < 16; i++) {
1526 		CU_ASSERT(opts.extended_host_id[i] == 0);
1527 	}
1528 	CU_ASSERT(strlen(opts.hostnqn) == 0);
1529 	CU_ASSERT(strlen(opts.src_addr) == 0);
1530 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1531 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
1532 
1533 	/* set a consistent opts_size */
1534 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
1535 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1536 	CU_ASSERT_TRUE(opts.use_cmb_sqs);
1537 	CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
1538 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
1539 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1540 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1541 	for (int i = 0; i < 8; i++) {
1542 		CU_ASSERT(opts.host_id[i] == 0);
1543 	}
1544 	CU_ASSERT_STRING_EQUAL(opts.hostnqn,
1545 			       "2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
1546 	CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
1547 			 sizeof(opts.extended_host_id)) == 0);
1548 	CU_ASSERT(strlen(opts.src_addr) == 0);
1549 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1550 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
1551 }
1552 
1553 static void
1554 test_ctrlr_get_default_io_qpair_opts(void)
1555 {
1556 	struct spdk_nvme_ctrlr ctrlr = {};
1557 	struct spdk_nvme_io_qpair_opts opts = {};
1558 
1559 	memset(&opts, 0, sizeof(opts));
1560 
1561 	/* set a smaller opts_size */
1562 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
1563 	CU_ASSERT(sizeof(opts) > 8);
1564 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
1565 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
1566 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1567 	/* check below field is not initialized by default value */
1568 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1569 
1570 	/* set a consistent opts_size */
1571 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
1572 	ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
1573 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1574 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
1575 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1576 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1577 }
1578 
1579 #if 0 /* TODO: move to PCIe-specific unit test */
1580 static void
1581 test_nvme_ctrlr_alloc_cmb(void)
1582 {
1583 	int			rc;
1584 	uint64_t		offset;
1585 	struct spdk_nvme_ctrlr	ctrlr = {};
1586 
1587 	ctrlr.cmb_size = 0x1000000;
1588 	ctrlr.cmb_current_offset = 0x100;
1589 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
1590 	CU_ASSERT(rc == 0);
1591 	CU_ASSERT(offset == 0x1000);
1592 	CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
1593 
1594 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
1595 	CU_ASSERT(rc == 0);
1596 	CU_ASSERT(offset == 0x2000);
1597 	CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
1598 
1599 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
1600 	CU_ASSERT(rc == 0);
1601 	CU_ASSERT(offset == 0x100000);
1602 	CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
1603 
1604 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
1605 	CU_ASSERT(rc == -1);
1606 }
1607 #endif
1608 
1609 static void
1610 test_spdk_nvme_ctrlr_update_firmware(void)
1611 {
1612 	struct spdk_nvme_ctrlr ctrlr = {};
1613 	void *payload = NULL;
1614 	int point_payload = 1;
1615 	int slot = 0;
1616 	int ret = 0;
1617 	struct spdk_nvme_status status;
1618 	enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
1619 
1620 	/* Set invalid size check function return value */
1621 	set_size = 5;
1622 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1623 	CU_ASSERT(ret == -1);
1624 
1625 	/* When payload is NULL but set_size < min_page_size */
1626 	set_size = 4;
1627 	ctrlr.min_page_size = 5;
1628 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1629 	CU_ASSERT(ret == -1);
1630 
1631 	/* When payload not NULL but min_page_size is 0 */
1632 	set_size = 4;
1633 	ctrlr.min_page_size = 0;
1634 	payload = &point_payload;
1635 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1636 	CU_ASSERT(ret == -1);
1637 
1638 	/* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
1639 	set_status_cpl = 1;
1640 	set_size = 4;
1641 	ctrlr.min_page_size = 5;
1642 	payload = &point_payload;
1643 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1644 	CU_ASSERT(ret == -ENXIO);
1645 
1646 	/* Check firmware image download and set status.cpl value is 0 */
1647 	set_status_cpl = 0;
1648 	set_size = 4;
1649 	ctrlr.min_page_size = 5;
1650 	payload = &point_payload;
1651 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1652 	CU_ASSERT(ret == -1);
1653 
1654 	/* Check firmware commit */
1655 	ctrlr.is_resetting = false;
1656 	set_status_cpl = 0;
1657 	slot = 1;
1658 	set_size = 4;
1659 	ctrlr.min_page_size = 5;
1660 	payload = &point_payload;
1661 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1662 	CU_ASSERT(ret == -ENXIO);
1663 
1664 	/* Set size check firmware download and firmware commit */
1665 	ctrlr.is_resetting = true;
1666 	set_status_cpl = 0;
1667 	slot = 1;
1668 	set_size = 4;
1669 	ctrlr.min_page_size = 5;
1670 	payload = &point_payload;
1671 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1672 	CU_ASSERT(ret == 0);
1673 
1674 	set_status_cpl = 0;
1675 }
1676 
1677 int
1678 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
1679 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1680 {
1681 	fake_cpl_success(cb_fn, cb_arg);
1682 	return 0;
1683 }
1684 
1685 static void
1686 test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
1687 {
1688 	struct spdk_nvme_ctrlr ctrlr = {};
1689 	int ret = -1;
1690 
1691 	ctrlr.cdata.oacs.doorbell_buffer_config = 1;
1692 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1693 	ctrlr.page_size = 0x1000;
1694 	MOCK_CLEAR(spdk_malloc);
1695 	MOCK_CLEAR(spdk_zmalloc);
1696 	MOCK_CLEAR(spdk_dma_malloc);
1697 	MOCK_CLEAR(spdk_dma_zmalloc);
1698 	ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
1699 	CU_ASSERT(ret == 0);
1700 	nvme_ctrlr_free_doorbell_buffer(&ctrlr);
1701 }
1702 
1703 static void
1704 test_nvme_ctrlr_test_active_ns(void)
1705 {
1706 	uint32_t		nsid, minor;
1707 	size_t			ns_id_count;
1708 	struct spdk_nvme_ctrlr	ctrlr = {};
1709 
1710 	ctrlr.page_size = 0x1000;
1711 
1712 	for (minor = 0; minor <= 2; minor++) {
1713 		ctrlr.cdata.ver.bits.mjr = 1;
1714 		ctrlr.cdata.ver.bits.mnr = minor;
1715 		ctrlr.cdata.ver.bits.ter = 0;
1716 		ctrlr.num_ns = 1531;
1717 		nvme_ctrlr_identify_active_ns(&ctrlr);
1718 
1719 		for (nsid = 1; nsid <= ctrlr.num_ns; nsid++) {
1720 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
1721 		}
1722 		ctrlr.num_ns = 1559;
1723 		for (; nsid <= ctrlr.num_ns; nsid++) {
1724 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
1725 		}
1726 		ctrlr.num_ns = 1531;
1727 		for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
1728 			ctrlr.active_ns_list[nsid] = 0;
1729 		}
1730 		CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
1731 
1732 		ctrlr.active_ns_list[0] = 1;
1733 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
1734 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
1735 		nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
1736 		CU_ASSERT(nsid == 1);
1737 
1738 		ctrlr.active_ns_list[1] = 3;
1739 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
1740 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
1741 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
1742 		nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
1743 		CU_ASSERT(nsid == 3);
1744 		nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
1745 		CU_ASSERT(nsid == 0);
1746 
1747 		memset(ctrlr.active_ns_list, 0, ctrlr.num_ns);
1748 		for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
1749 			ctrlr.active_ns_list[nsid] = nsid + 1;
1750 		}
1751 
1752 		ns_id_count = 0;
1753 		for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
1754 		     nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
1755 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
1756 			ns_id_count++;
1757 		}
1758 		CU_ASSERT(ns_id_count == ctrlr.num_ns);
1759 
1760 		nvme_ctrlr_destruct(&ctrlr);
1761 	}
1762 }
1763 
1764 int main(int argc, char **argv)
1765 {
1766 	CU_pSuite	suite = NULL;
1767 	unsigned int	num_failures;
1768 
1769 	if (CU_initialize_registry() != CUE_SUCCESS) {
1770 		return CU_get_error();
1771 	}
1772 
1773 	suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
1774 	if (suite == NULL) {
1775 		CU_cleanup_registry();
1776 		return CU_get_error();
1777 	}
1778 
1779 	if (
1780 		CU_add_test(suite, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 0",
1781 			    test_nvme_ctrlr_init_en_1_rdy_0) == NULL
1782 		|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 1",
1783 			       test_nvme_ctrlr_init_en_1_rdy_1) == NULL
1784 		|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0",
1785 			       test_nvme_ctrlr_init_en_0_rdy_0) == NULL
1786 		|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 1",
1787 			       test_nvme_ctrlr_init_en_0_rdy_1) == NULL
1788 		|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = RR",
1789 			       test_nvme_ctrlr_init_en_0_rdy_0_ams_rr) == NULL
1790 		|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = WRR",
1791 			       test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr) == NULL
1792 		|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = VS",
1793 			       test_nvme_ctrlr_init_en_0_rdy_0_ams_vs) == NULL
1794 		|| CU_add_test(suite, "alloc_io_qpair_rr 1", test_alloc_io_qpair_rr_1) == NULL
1795 		|| CU_add_test(suite, "get_default_ctrlr_opts", test_ctrlr_get_default_ctrlr_opts) == NULL
1796 		|| CU_add_test(suite, "get_default_io_qpair_opts", test_ctrlr_get_default_io_qpair_opts) == NULL
1797 		|| CU_add_test(suite, "alloc_io_qpair_wrr 1", test_alloc_io_qpair_wrr_1) == NULL
1798 		|| CU_add_test(suite, "alloc_io_qpair_wrr 2", test_alloc_io_qpair_wrr_2) == NULL
1799 		|| CU_add_test(suite, "test nvme ctrlr function update_firmware",
1800 			       test_spdk_nvme_ctrlr_update_firmware) == NULL
1801 		|| CU_add_test(suite, "test nvme_ctrlr function nvme_ctrlr_fail", test_nvme_ctrlr_fail) == NULL
1802 		|| CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_construct_intel_support_log_page_list",
1803 			       test_nvme_ctrlr_construct_intel_support_log_page_list) == NULL
1804 		|| CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_set_supported_features",
1805 			       test_nvme_ctrlr_set_supported_features) == NULL
1806 		|| CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_set_doorbell_buffer_config",
1807 			       test_spdk_nvme_ctrlr_doorbell_buffer_config) == NULL
1808 #if 0 /* TODO: move to PCIe-specific unit test */
1809 		|| CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_alloc_cmb",
1810 			       test_nvme_ctrlr_alloc_cmb) == NULL
1811 #endif
1812 		|| CU_add_test(suite, "test nvme ctrlr function test_nvme_ctrlr_test_active_ns",
1813 			       test_nvme_ctrlr_test_active_ns) == NULL
1814 	) {
1815 		CU_cleanup_registry();
1816 		return CU_get_error();
1817 	}
1818 
1819 	CU_basic_set_mode(CU_BRM_VERBOSE);
1820 	CU_basic_run_tests();
1821 	num_failures = CU_get_number_of_failures();
1822 	CU_cleanup_registry();
1823 	return num_failures;
1824 }
1825