xref: /spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c (revision 06b537bfdb4393dea857e204b85d8df46a351d8a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/log.h"
39 
40 #include "common/lib/test_env.c"
41 
42 struct spdk_log_flag SPDK_LOG_NVME = {
43 	.name = "nvme",
44 	.enabled = false,
45 };
46 
47 #include "nvme/nvme_ctrlr.c"
48 #include "nvme/nvme_quirks.c"
49 
50 pid_t g_spdk_nvme_pid;
51 
52 struct nvme_driver _g_nvme_driver = {
53 	.lock = PTHREAD_MUTEX_INITIALIZER,
54 };
55 
56 struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
57 
58 struct spdk_nvme_registers g_ut_nvme_regs = {};
59 
60 __thread int    nvme_thread_ioq_index = -1;
61 
62 uint32_t set_size = 1;
63 
64 int set_status_cpl = -1;
65 
66 DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
67 	    (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
68 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
69 DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
70 DEFINE_STUB_V(nvme_ns_set_id_desc_list_data, (struct spdk_nvme_ns *ns));
71 DEFINE_STUB_V(nvme_ns_free_zns_specific_data, (struct spdk_nvme_ns *ns));
72 DEFINE_STUB_V(nvme_ns_free_iocs_specific_data, (struct spdk_nvme_ns *ns));
73 DEFINE_STUB(nvme_ns_has_supported_iocs_specific_data, bool, (struct spdk_nvme_ns *ns), false);
74 DEFINE_STUB_V(nvme_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
75 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
76 		struct spdk_nvme_qpair *qpair), 0);
77 DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));
78 
79 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
80 		const struct spdk_nvme_ctrlr_opts *opts,
81 		void *devhandle)
82 {
83 	return NULL;
84 }
85 
86 int
87 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
88 {
89 	nvme_ctrlr_destruct_finish(ctrlr);
90 
91 	return 0;
92 }
93 
94 int
95 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
96 {
97 	return 0;
98 }
99 
100 int
101 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
102 {
103 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
104 	*(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
105 	return 0;
106 }
107 
108 int
109 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
110 {
111 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
112 	*(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
113 	return 0;
114 }
115 
116 int
117 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
118 {
119 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
120 	*value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
121 	return 0;
122 }
123 
124 int
125 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
126 {
127 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
128 	*value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
129 	return 0;
130 }
131 
132 uint32_t
133 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
134 {
135 	return UINT32_MAX;
136 }
137 
138 uint16_t
139 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
140 {
141 	return 1;
142 }
143 
144 void *
145 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
146 {
147 	return NULL;
148 }
149 
150 int
151 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
152 {
153 	return 0;
154 }
155 
156 struct spdk_nvme_qpair *
157 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
158 				     const struct spdk_nvme_io_qpair_opts *opts)
159 {
160 	struct spdk_nvme_qpair *qpair;
161 
162 	qpair = calloc(1, sizeof(*qpair));
163 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
164 
165 	qpair->ctrlr = ctrlr;
166 	qpair->id = qid;
167 	qpair->qprio = opts->qprio;
168 
169 	return qpair;
170 }
171 
172 int
173 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
174 {
175 	free(qpair);
176 	return 0;
177 }
178 
179 void
180 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
181 {
182 }
183 
184 int
185 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
186 {
187 	return 0;
188 }
189 
190 void
191 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
192 {
193 }
194 
195 void
196 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
197 {
198 }
199 
200 int
201 nvme_driver_init(void)
202 {
203 	return 0;
204 }
205 
206 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
207 		    struct spdk_nvme_ctrlr *ctrlr,
208 		    enum spdk_nvme_qprio qprio,
209 		    uint32_t num_requests)
210 {
211 	qpair->id = id;
212 	qpair->qprio = qprio;
213 	qpair->ctrlr = ctrlr;
214 
215 	return 0;
216 }
217 
218 static struct spdk_nvme_cpl fake_cpl = {};
219 static enum spdk_nvme_generic_command_status_code set_status_code = SPDK_NVME_SC_SUCCESS;
220 
221 static void
222 fake_cpl_sc(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
223 {
224 	fake_cpl.status.sc = set_status_code;
225 	cb_fn(cb_arg, &fake_cpl);
226 }
227 
228 int
229 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
230 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
231 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
232 {
233 	CU_ASSERT(0);
234 	return -1;
235 }
236 
237 int
238 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
239 				uint32_t cdw11, void *payload, uint32_t payload_size,
240 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
241 {
242 	fake_cpl_sc(cb_fn, cb_arg);
243 	return 0;
244 }
245 
246 int
247 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
248 				 uint32_t nsid, void *payload, uint32_t payload_size,
249 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
250 {
251 	fake_cpl_sc(cb_fn, cb_arg);
252 	return 0;
253 }
254 
255 int
256 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
257 {
258 	CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
259 
260 	/*
261 	 * For the purposes of this unit test, we don't need to bother emulating request submission.
262 	 */
263 
264 	return 0;
265 }
266 
267 static int32_t g_wait_for_completion_return_val;
268 
269 int32_t
270 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
271 {
272 	return g_wait_for_completion_return_val;
273 }
274 
275 void
276 nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
277 {
278 }
279 
280 
281 void
282 nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
283 {
284 	struct nvme_completion_poll_status	*status = arg;
285 	/* This should not happen it test env since this callback is always called
286 	 * before wait_for_completion_* while this field can only be set to true in
287 	 * wait_for_completion_* functions */
288 	CU_ASSERT(status->timed_out == false);
289 
290 	status->cpl = *cpl;
291 	status->done = true;
292 }
293 
294 static struct nvme_completion_poll_status *g_failed_status;
295 
296 int
297 nvme_wait_for_completion_robust_lock_timeout(
298 	struct spdk_nvme_qpair *qpair,
299 	struct nvme_completion_poll_status *status,
300 	pthread_mutex_t *robust_mutex,
301 	uint64_t timeout_in_usecs)
302 {
303 	if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
304 		g_failed_status = status;
305 		status->timed_out = true;
306 		return -1;
307 	}
308 
309 	status->done = true;
310 	if (set_status_cpl == 1) {
311 		status->cpl.status.sc = 1;
312 	}
313 	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
314 }
315 
316 int
317 nvme_wait_for_completion_robust_lock(
318 	struct spdk_nvme_qpair *qpair,
319 	struct nvme_completion_poll_status *status,
320 	pthread_mutex_t *robust_mutex)
321 {
322 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, robust_mutex, 0);
323 }
324 
325 int
326 nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
327 			 struct nvme_completion_poll_status *status)
328 {
329 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, 0);
330 }
331 
332 int
333 nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
334 				 struct nvme_completion_poll_status *status,
335 				 uint64_t timeout_in_usecs)
336 {
337 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, timeout_in_usecs);
338 }
339 
340 int
341 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
342 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
343 				      void *cb_arg)
344 {
345 	fake_cpl_sc(cb_fn, cb_arg);
346 	return 0;
347 }
348 
349 int
350 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
351 			uint8_t csi, void *payload, size_t payload_size,
352 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
353 {
354 	if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
355 		uint32_t count = 0;
356 		uint32_t i = 0;
357 		struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
358 
359 		for (i = 1; i <= ctrlr->num_ns; i++) {
360 			if (i <= nsid) {
361 				continue;
362 			}
363 
364 			ns_list->ns_list[count++] = i;
365 			if (count == SPDK_COUNTOF(ns_list->ns_list)) {
366 				break;
367 			}
368 		}
369 
370 	}
371 
372 	fake_cpl_sc(cb_fn, cb_arg);
373 	return 0;
374 }
375 
376 int
377 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
378 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
379 {
380 	fake_cpl_sc(cb_fn, cb_arg);
381 	return 0;
382 }
383 
384 int
385 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
386 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
387 {
388 	CU_ASSERT(0);
389 	return -1;
390 }
391 
392 int
393 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
394 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
395 {
396 	return 0;
397 }
398 
399 int
400 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
401 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
402 {
403 	return 0;
404 }
405 
406 int
407 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
408 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
409 {
410 	return 0;
411 }
412 
413 int
414 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
415 			 void *cb_arg)
416 {
417 	return 0;
418 }
419 
420 int
421 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
422 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
423 {
424 	return 0;
425 }
426 
427 int
428 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
429 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
430 {
431 	CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
432 	if (fw_commit->fs == 0) {
433 		return -1;
434 	}
435 	set_status_cpl = 1;
436 	if (ctrlr->is_resetting == true) {
437 		set_status_cpl = 0;
438 	}
439 	return 0;
440 }
441 
442 int
443 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
444 				 uint32_t size, uint32_t offset, void *payload,
445 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
446 {
447 	if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
448 		return -1;
449 	}
450 	CU_ASSERT(offset == 0);
451 	return 0;
452 }
453 
454 void
455 nvme_ns_destruct(struct spdk_nvme_ns *ns)
456 {
457 }
458 
459 int
460 nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
461 		  struct spdk_nvme_ctrlr *ctrlr)
462 {
463 	return 0;
464 }
465 
466 int
467 nvme_ns_update(struct spdk_nvme_ns *ns)
468 {
469 	return 0;
470 }
471 
472 void
473 spdk_pci_device_detach(struct spdk_pci_device *device)
474 {
475 }
476 
477 #define DECLARE_AND_CONSTRUCT_CTRLR()	\
478 	struct spdk_nvme_ctrlr	ctrlr = {};	\
479 	struct spdk_nvme_qpair	adminq = {};	\
480 	struct nvme_request	req;		\
481 						\
482 	STAILQ_INIT(&adminq.free_req);		\
483 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);	\
484 	ctrlr.adminq = &adminq;
485 
486 static void
487 test_nvme_ctrlr_init_en_1_rdy_0(void)
488 {
489 	DECLARE_AND_CONSTRUCT_CTRLR();
490 
491 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
492 
493 	/*
494 	 * Initial state: CC.EN = 1, CSTS.RDY = 0
495 	 */
496 	g_ut_nvme_regs.cc.bits.en = 1;
497 	g_ut_nvme_regs.csts.bits.rdy = 0;
498 
499 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
500 	ctrlr.cdata.nn = 1;
501 	ctrlr.page_size = 0x1000;
502 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
503 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
504 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
505 
506 	/*
507 	 * Transition to CSTS.RDY = 1.
508 	 * init() should set CC.EN = 0.
509 	 */
510 	g_ut_nvme_regs.csts.bits.rdy = 1;
511 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
512 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
513 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
514 
515 	/*
516 	 * Transition to CSTS.RDY = 0.
517 	 */
518 	g_ut_nvme_regs.csts.bits.rdy = 0;
519 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
520 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
521 
522 	/*
523 	 * Transition to CC.EN = 1
524 	 */
525 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
526 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
527 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
528 
529 	/*
530 	 * Transition to CSTS.RDY = 1.
531 	 */
532 	g_ut_nvme_regs.csts.bits.rdy = 1;
533 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
534 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
535 
536 	/*
537 	 * Transition to READY.
538 	 */
539 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
540 		nvme_ctrlr_process_init(&ctrlr);
541 	}
542 
543 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
544 	nvme_ctrlr_destruct(&ctrlr);
545 }
546 
547 static void
548 test_nvme_ctrlr_init_en_1_rdy_1(void)
549 {
550 	DECLARE_AND_CONSTRUCT_CTRLR();
551 
552 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
553 
554 	/*
555 	 * Initial state: CC.EN = 1, CSTS.RDY = 1
556 	 * init() should set CC.EN = 0.
557 	 */
558 	g_ut_nvme_regs.cc.bits.en = 1;
559 	g_ut_nvme_regs.csts.bits.rdy = 1;
560 
561 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
562 	ctrlr.cdata.nn = 1;
563 	ctrlr.page_size = 0x1000;
564 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
565 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
566 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
567 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
568 
569 	/*
570 	 * Transition to CSTS.RDY = 0.
571 	 */
572 	g_ut_nvme_regs.csts.bits.rdy = 0;
573 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
574 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
575 
576 	/*
577 	 * Transition to CC.EN = 1
578 	 */
579 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
580 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
581 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
582 
583 	/*
584 	 * Transition to CSTS.RDY = 1.
585 	 */
586 	g_ut_nvme_regs.csts.bits.rdy = 1;
587 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
588 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
589 
590 	/*
591 	 * Transition to READY.
592 	 */
593 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
594 		nvme_ctrlr_process_init(&ctrlr);
595 	}
596 
597 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
598 	nvme_ctrlr_destruct(&ctrlr);
599 }
600 
601 static void
602 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
603 {
604 	DECLARE_AND_CONSTRUCT_CTRLR();
605 
606 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
607 
608 	/*
609 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
610 	 * init() should set CC.EN = 1.
611 	 */
612 	g_ut_nvme_regs.cc.bits.en = 0;
613 	g_ut_nvme_regs.csts.bits.rdy = 0;
614 
615 	/*
616 	 * Default round robin enabled
617 	 */
618 	g_ut_nvme_regs.cap.bits.ams = 0x0;
619 	ctrlr.cap = g_ut_nvme_regs.cap;
620 
621 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
622 	ctrlr.cdata.nn = 1;
623 	ctrlr.page_size = 0x1000;
624 	/*
625 	 * Case 1: default round robin arbitration mechanism selected
626 	 */
627 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
628 
629 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
630 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
631 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
632 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
633 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
634 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
635 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
636 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
637 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
638 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
639 
640 	/*
641 	 * Complete and destroy the controller
642 	 */
643 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
644 	nvme_ctrlr_destruct(&ctrlr);
645 
646 	/*
647 	 * Reset to initial state
648 	 */
649 	g_ut_nvme_regs.cc.bits.en = 0;
650 	g_ut_nvme_regs.csts.bits.rdy = 0;
651 
652 	/*
653 	 * Case 2: weighted round robin arbitration mechanism selected
654 	 */
655 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
656 	ctrlr.cdata.nn = 1;
657 	ctrlr.page_size = 0x1000;
658 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
659 
660 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
661 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
662 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
663 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
664 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
665 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
666 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
667 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
668 
669 	/*
670 	 * Complete and destroy the controller
671 	 */
672 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
673 	nvme_ctrlr_destruct(&ctrlr);
674 
675 	/*
676 	 * Reset to initial state
677 	 */
678 	g_ut_nvme_regs.cc.bits.en = 0;
679 	g_ut_nvme_regs.csts.bits.rdy = 0;
680 
681 	/*
682 	 * Case 3: vendor specific arbitration mechanism selected
683 	 */
684 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
685 	ctrlr.cdata.nn = 1;
686 	ctrlr.page_size = 0x1000;
687 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
688 
689 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
690 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
691 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
692 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
693 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
694 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
695 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
696 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
697 
698 	/*
699 	 * Complete and destroy the controller
700 	 */
701 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
702 	nvme_ctrlr_destruct(&ctrlr);
703 
704 	/*
705 	 * Reset to initial state
706 	 */
707 	g_ut_nvme_regs.cc.bits.en = 0;
708 	g_ut_nvme_regs.csts.bits.rdy = 0;
709 
710 	/*
711 	 * Case 4: invalid arbitration mechanism selected
712 	 */
713 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
714 	ctrlr.cdata.nn = 1;
715 	ctrlr.page_size = 0x1000;
716 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
717 
718 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
719 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
720 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
721 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
722 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
723 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
724 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
725 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
726 
727 	/*
728 	 * Complete and destroy the controller
729 	 */
730 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
731 	nvme_ctrlr_destruct(&ctrlr);
732 
733 	/*
734 	 * Reset to initial state
735 	 */
736 	g_ut_nvme_regs.cc.bits.en = 0;
737 	g_ut_nvme_regs.csts.bits.rdy = 0;
738 
739 	/*
740 	 * Case 5: reset to default round robin arbitration mechanism
741 	 */
742 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
743 	ctrlr.cdata.nn = 1;
744 	ctrlr.page_size = 0x1000;
745 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
746 
747 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
748 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
749 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
750 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
751 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
752 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
753 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
754 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
755 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
756 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
757 
758 	/*
759 	 * Transition to CSTS.RDY = 1.
760 	 */
761 	g_ut_nvme_regs.csts.bits.rdy = 1;
762 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
763 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
764 
765 	/*
766 	 * Transition to READY.
767 	 */
768 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
769 		nvme_ctrlr_process_init(&ctrlr);
770 	}
771 
772 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
773 	nvme_ctrlr_destruct(&ctrlr);
774 }
775 
776 static void
777 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
778 {
779 	DECLARE_AND_CONSTRUCT_CTRLR();
780 
781 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
782 
783 	/*
784 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
785 	 * init() should set CC.EN = 1.
786 	 */
787 	g_ut_nvme_regs.cc.bits.en = 0;
788 	g_ut_nvme_regs.csts.bits.rdy = 0;
789 
790 	/*
791 	 * Weighted round robin enabled
792 	 */
793 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
794 	ctrlr.cap = g_ut_nvme_regs.cap;
795 
796 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
797 	ctrlr.cdata.nn = 1;
798 	ctrlr.page_size = 0x1000;
799 	/*
800 	 * Case 1: default round robin arbitration mechanism selected
801 	 */
802 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
803 
804 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
805 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
806 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
807 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
808 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
809 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
810 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
811 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
812 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
813 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
814 
815 	/*
816 	 * Complete and destroy the controller
817 	 */
818 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
819 	nvme_ctrlr_destruct(&ctrlr);
820 
821 	/*
822 	 * Reset to initial state
823 	 */
824 	g_ut_nvme_regs.cc.bits.en = 0;
825 	g_ut_nvme_regs.csts.bits.rdy = 0;
826 
827 	/*
828 	 * Case 2: weighted round robin arbitration mechanism selected
829 	 */
830 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
831 	ctrlr.cdata.nn = 1;
832 	ctrlr.page_size = 0x1000;
833 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
834 
835 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
836 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
837 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
838 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
839 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
840 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
841 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
842 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
843 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
844 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
845 
846 	/*
847 	 * Complete and destroy the controller
848 	 */
849 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
850 	nvme_ctrlr_destruct(&ctrlr);
851 
852 	/*
853 	 * Reset to initial state
854 	 */
855 	g_ut_nvme_regs.cc.bits.en = 0;
856 	g_ut_nvme_regs.csts.bits.rdy = 0;
857 
858 	/*
859 	 * Case 3: vendor specific arbitration mechanism selected
860 	 */
861 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
862 	ctrlr.cdata.nn = 1;
863 	ctrlr.page_size = 0x1000;
864 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
865 
866 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
867 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
868 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
869 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
870 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
871 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
872 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
873 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
874 
875 	/*
876 	 * Complete and destroy the controller
877 	 */
878 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
879 	nvme_ctrlr_destruct(&ctrlr);
880 
881 	/*
882 	 * Reset to initial state
883 	 */
884 	g_ut_nvme_regs.cc.bits.en = 0;
885 	g_ut_nvme_regs.csts.bits.rdy = 0;
886 
887 	/*
888 	 * Case 4: invalid arbitration mechanism selected
889 	 */
890 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
891 	ctrlr.cdata.nn = 1;
892 	ctrlr.page_size = 0x1000;
893 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
894 
895 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
896 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
897 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
898 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
899 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
900 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
901 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
902 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
903 
904 	/*
905 	 * Complete and destroy the controller
906 	 */
907 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
908 	nvme_ctrlr_destruct(&ctrlr);
909 
910 	/*
911 	 * Reset to initial state
912 	 */
913 	g_ut_nvme_regs.cc.bits.en = 0;
914 	g_ut_nvme_regs.csts.bits.rdy = 0;
915 
916 	/*
917 	 * Case 5: reset to weighted round robin arbitration mechanism
918 	 */
919 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
920 	ctrlr.cdata.nn = 1;
921 	ctrlr.page_size = 0x1000;
922 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
923 
924 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
925 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
926 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
927 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
928 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
929 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
930 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
931 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
932 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
933 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
934 
935 	/*
936 	 * Transition to CSTS.RDY = 1.
937 	 */
938 	g_ut_nvme_regs.csts.bits.rdy = 1;
939 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
940 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
941 
942 	/*
943 	 * Transition to READY.
944 	 */
945 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
946 		nvme_ctrlr_process_init(&ctrlr);
947 	}
948 
949 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
950 	nvme_ctrlr_destruct(&ctrlr);
951 }
952 static void
953 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
954 {
955 	DECLARE_AND_CONSTRUCT_CTRLR();
956 
957 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
958 
959 	/*
960 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
961 	 * init() should set CC.EN = 1.
962 	 */
963 	g_ut_nvme_regs.cc.bits.en = 0;
964 	g_ut_nvme_regs.csts.bits.rdy = 0;
965 
966 	/*
967 	 * Default round robin enabled
968 	 */
969 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
970 	ctrlr.cap = g_ut_nvme_regs.cap;
971 
972 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
973 	ctrlr.cdata.nn = 1;
974 	ctrlr.page_size = 0x1000;
975 	/*
976 	 * Case 1: default round robin arbitration mechanism selected
977 	 */
978 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
979 
980 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
981 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
982 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
983 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
984 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
985 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
986 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
987 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
988 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
989 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
990 
991 	/*
992 	 * Complete and destroy the controller
993 	 */
994 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
995 	nvme_ctrlr_destruct(&ctrlr);
996 
997 	/*
998 	 * Reset to initial state
999 	 */
1000 	g_ut_nvme_regs.cc.bits.en = 0;
1001 	g_ut_nvme_regs.csts.bits.rdy = 0;
1002 
1003 	/*
1004 	 * Case 2: weighted round robin arbitration mechanism selected
1005 	 */
1006 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1007 	ctrlr.cdata.nn = 1;
1008 	ctrlr.page_size = 0x1000;
1009 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1010 
1011 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1012 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1013 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1014 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1015 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1016 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1017 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1018 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1019 
1020 	/*
1021 	 * Complete and destroy the controller
1022 	 */
1023 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1024 	nvme_ctrlr_destruct(&ctrlr);
1025 
1026 	/*
1027 	 * Reset to initial state
1028 	 */
1029 	g_ut_nvme_regs.cc.bits.en = 0;
1030 	g_ut_nvme_regs.csts.bits.rdy = 0;
1031 
1032 	/*
1033 	 * Case 3: vendor specific arbitration mechanism selected
1034 	 */
1035 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1036 	ctrlr.cdata.nn = 1;
1037 	ctrlr.page_size = 0x1000;
1038 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1039 
1040 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1041 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1042 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1043 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1044 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1045 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1046 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1047 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1048 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1049 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1050 
1051 	/*
1052 	 * Complete and destroy the controller
1053 	 */
1054 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1055 	nvme_ctrlr_destruct(&ctrlr);
1056 
1057 	/*
1058 	 * Reset to initial state
1059 	 */
1060 	g_ut_nvme_regs.cc.bits.en = 0;
1061 	g_ut_nvme_regs.csts.bits.rdy = 0;
1062 
1063 	/*
1064 	 * Case 4: invalid arbitration mechanism selected
1065 	 */
1066 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1067 	ctrlr.cdata.nn = 1;
1068 	ctrlr.page_size = 0x1000;
1069 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1070 
1071 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1072 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1073 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1074 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1075 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1076 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1077 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1078 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1079 
1080 	/*
1081 	 * Complete and destroy the controller
1082 	 */
1083 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1084 	nvme_ctrlr_destruct(&ctrlr);
1085 
1086 	/*
1087 	 * Reset to initial state
1088 	 */
1089 	g_ut_nvme_regs.cc.bits.en = 0;
1090 	g_ut_nvme_regs.csts.bits.rdy = 0;
1091 
1092 	/*
1093 	 * Case 5: reset to vendor specific arbitration mechanism
1094 	 */
1095 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1096 	ctrlr.cdata.nn = 1;
1097 	ctrlr.page_size = 0x1000;
1098 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1099 
1100 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1101 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1102 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1103 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1104 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1105 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1106 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1107 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1108 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1109 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1110 
1111 	/*
1112 	 * Transition to CSTS.RDY = 1.
1113 	 */
1114 	g_ut_nvme_regs.csts.bits.rdy = 1;
1115 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1116 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1117 
1118 	/*
1119 	 * Transition to READY.
1120 	 */
1121 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1122 		nvme_ctrlr_process_init(&ctrlr);
1123 	}
1124 
1125 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1126 	nvme_ctrlr_destruct(&ctrlr);
1127 }
1128 
1129 static void
1130 test_nvme_ctrlr_init_en_0_rdy_0(void)
1131 {
1132 	DECLARE_AND_CONSTRUCT_CTRLR();
1133 
1134 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1135 
1136 	/*
1137 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1138 	 * init() should set CC.EN = 1.
1139 	 */
1140 	g_ut_nvme_regs.cc.bits.en = 0;
1141 	g_ut_nvme_regs.csts.bits.rdy = 0;
1142 
1143 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1144 	ctrlr.cdata.nn = 1;
1145 	ctrlr.page_size = 0x1000;
1146 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1147 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1148 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1149 
1150 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1151 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1152 
1153 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1154 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1155 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1156 
1157 	/*
1158 	 * Transition to CSTS.RDY = 1.
1159 	 */
1160 	g_ut_nvme_regs.csts.bits.rdy = 1;
1161 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1162 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1163 
1164 	/*
1165 	 * Transition to READY.
1166 	 */
1167 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1168 		nvme_ctrlr_process_init(&ctrlr);
1169 	}
1170 
1171 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1172 	nvme_ctrlr_destruct(&ctrlr);
1173 }
1174 
1175 static void
1176 test_nvme_ctrlr_init_en_0_rdy_1(void)
1177 {
1178 	DECLARE_AND_CONSTRUCT_CTRLR();
1179 
1180 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1181 
1182 	/*
1183 	 * Initial state: CC.EN = 0, CSTS.RDY = 1
1184 	 */
1185 	g_ut_nvme_regs.cc.bits.en = 0;
1186 	g_ut_nvme_regs.csts.bits.rdy = 1;
1187 
1188 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1189 	ctrlr.cdata.nn = 1;
1190 	ctrlr.page_size = 0x1000;
1191 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1192 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1193 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1194 
1195 	/*
1196 	 * Transition to CSTS.RDY = 0.
1197 	 */
1198 	g_ut_nvme_regs.csts.bits.rdy = 0;
1199 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1200 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1201 
1202 	/*
1203 	 * Transition to CC.EN = 1
1204 	 */
1205 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1206 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1207 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1208 
1209 	/*
1210 	 * Transition to CSTS.RDY = 1.
1211 	 */
1212 	g_ut_nvme_regs.csts.bits.rdy = 1;
1213 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1214 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1215 
1216 	/*
1217 	 * Transition to READY.
1218 	 */
1219 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1220 		nvme_ctrlr_process_init(&ctrlr);
1221 	}
1222 
1223 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1224 	nvme_ctrlr_destruct(&ctrlr);
1225 }
1226 
1227 static void
1228 setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
1229 {
1230 	uint32_t i;
1231 
1232 	CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
1233 
1234 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
1235 
1236 	ctrlr->page_size = 0x1000;
1237 	ctrlr->opts.num_io_queues = num_io_queues;
1238 	ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
1239 	SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
1240 
1241 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1242 	for (i = 1; i <= num_io_queues; i++) {
1243 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1244 	}
1245 }
1246 
1247 static void
1248 cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
1249 {
1250 	nvme_ctrlr_destruct(ctrlr);
1251 }
1252 
1253 static void
1254 test_alloc_io_qpair_rr_1(void)
1255 {
1256 	struct spdk_nvme_io_qpair_opts opts;
1257 	struct spdk_nvme_ctrlr ctrlr = {};
1258 	struct spdk_nvme_qpair *q0;
1259 
1260 	setup_qpairs(&ctrlr, 1);
1261 
1262 	/*
1263 	 * Fake to simulate the controller with default round robin
1264 	 * arbitration mechanism.
1265 	 */
1266 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
1267 
1268 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1269 
1270 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1271 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1272 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1273 	/* Only 1 I/O qpair was allocated, so this should fail */
1274 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
1275 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1276 
1277 	/*
1278 	 * Now that the qpair has been returned to the free list,
1279 	 * we should be able to allocate it again.
1280 	 */
1281 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1282 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1283 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1284 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1285 
1286 	/* Only 0 qprio is acceptable for default round robin arbitration mechanism */
1287 	opts.qprio = 1;
1288 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1289 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1290 
1291 	opts.qprio = 2;
1292 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1293 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1294 
1295 	opts.qprio = 3;
1296 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1297 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1298 
1299 	/* Only 0 ~ 3 qprio is acceptable */
1300 	opts.qprio = 4;
1301 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1302 
1303 	cleanup_qpairs(&ctrlr);
1304 }
1305 
1306 static void
1307 test_alloc_io_qpair_wrr_1(void)
1308 {
1309 	struct spdk_nvme_io_qpair_opts opts;
1310 	struct spdk_nvme_ctrlr ctrlr = {};
1311 	struct spdk_nvme_qpair *q0, *q1;
1312 
1313 	setup_qpairs(&ctrlr, 2);
1314 
1315 	/*
1316 	 * Fake to simulate the controller with weighted round robin
1317 	 * arbitration mechanism.
1318 	 */
1319 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1320 
1321 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1322 
1323 	/*
1324 	 * Allocate 2 qpairs and free them
1325 	 */
1326 	opts.qprio = 0;
1327 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1328 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1329 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1330 
1331 	opts.qprio = 1;
1332 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1333 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1334 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1335 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1336 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1337 
1338 	/*
1339 	 * Allocate 2 qpairs and free them in the reverse order
1340 	 */
1341 	opts.qprio = 2;
1342 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1343 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1344 	SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
1345 
1346 	opts.qprio = 3;
1347 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1348 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1349 	SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
1350 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1351 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1352 
1353 	/* Only 0 ~ 3 qprio is acceptable */
1354 	opts.qprio = 4;
1355 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1356 
1357 	cleanup_qpairs(&ctrlr);
1358 }
1359 
1360 static void
1361 test_alloc_io_qpair_wrr_2(void)
1362 {
1363 	struct spdk_nvme_io_qpair_opts opts;
1364 	struct spdk_nvme_ctrlr ctrlr = {};
1365 	struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
1366 
1367 	setup_qpairs(&ctrlr, 4);
1368 
1369 	/*
1370 	 * Fake to simulate the controller with weighted round robin
1371 	 * arbitration mechanism.
1372 	 */
1373 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1374 
1375 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1376 
1377 	opts.qprio = 0;
1378 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1379 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1380 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1381 
1382 	opts.qprio = 1;
1383 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1384 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1385 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1386 
1387 	opts.qprio = 2;
1388 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1389 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1390 	SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
1391 
1392 	opts.qprio = 3;
1393 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1394 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1395 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1396 
1397 	/* Only 4 I/O qpairs was allocated, so this should fail */
1398 	opts.qprio = 0;
1399 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1400 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1401 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1402 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1403 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1404 
1405 	/*
1406 	 * Now that the qpair has been returned to the free list,
1407 	 * we should be able to allocate it again.
1408 	 *
1409 	 * Allocate 4 I/O qpairs and half of them with same qprio.
1410 	 */
1411 	opts.qprio = 1;
1412 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1413 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1414 	SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
1415 
1416 	opts.qprio = 1;
1417 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1418 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1419 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1420 
1421 	opts.qprio = 3;
1422 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1423 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1424 	SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
1425 
1426 	opts.qprio = 3;
1427 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1428 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1429 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1430 
1431 	/*
1432 	 * Free all I/O qpairs in reverse order
1433 	 */
1434 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1435 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1436 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1437 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1438 
1439 	cleanup_qpairs(&ctrlr);
1440 }
1441 
1442 bool g_connect_qpair_called = false;
1443 int g_connect_qpair_return_code = 0;
1444 int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1445 {
1446 	g_connect_qpair_called = true;
1447 	return g_connect_qpair_return_code;
1448 }
1449 
1450 static void
1451 test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
1452 {
1453 	struct spdk_nvme_ctrlr	ctrlr = {};
1454 	struct spdk_nvme_qpair	qpair = {};
1455 	int rc;
1456 
1457 	/* Various states of controller disconnect. */
1458 	qpair.id = 1;
1459 	qpair.ctrlr = &ctrlr;
1460 	ctrlr.is_removed = 1;
1461 	ctrlr.is_failed = 0;
1462 	ctrlr.is_resetting = 0;
1463 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1464 	CU_ASSERT(rc == -ENODEV)
1465 
1466 	ctrlr.is_removed = 0;
1467 	ctrlr.is_failed = 1;
1468 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1469 	CU_ASSERT(rc == -ENXIO)
1470 
1471 	ctrlr.is_failed = 0;
1472 	ctrlr.is_resetting = 1;
1473 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1474 	CU_ASSERT(rc == -EAGAIN)
1475 
1476 	/* Confirm precedence for controller states: removed > resetting > failed */
1477 	ctrlr.is_removed = 1;
1478 	ctrlr.is_failed = 1;
1479 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1480 	CU_ASSERT(rc == -ENODEV)
1481 
1482 	ctrlr.is_removed = 0;
1483 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1484 	CU_ASSERT(rc == -EAGAIN)
1485 
1486 	ctrlr.is_resetting = 0;
1487 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1488 	CU_ASSERT(rc == -ENXIO)
1489 
1490 	/* qpair not failed. Make sure we don't call down to the transport */
1491 	ctrlr.is_failed = 0;
1492 	qpair.state = NVME_QPAIR_CONNECTED;
1493 	g_connect_qpair_called = false;
1494 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1495 	CU_ASSERT(g_connect_qpair_called == false);
1496 	CU_ASSERT(rc == 0)
1497 
1498 	/* transport qpair is failed. make sure we call down to the transport */
1499 	qpair.state = NVME_QPAIR_DISCONNECTED;
1500 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1501 	CU_ASSERT(g_connect_qpair_called == true);
1502 	CU_ASSERT(rc == 0)
1503 }
1504 
1505 static void
1506 test_nvme_ctrlr_fail(void)
1507 {
1508 	struct spdk_nvme_ctrlr	ctrlr = {};
1509 
1510 	ctrlr.opts.num_io_queues = 0;
1511 	nvme_ctrlr_fail(&ctrlr, false);
1512 
1513 	CU_ASSERT(ctrlr.is_failed == true);
1514 }
1515 
1516 static void
1517 test_nvme_ctrlr_construct_intel_support_log_page_list(void)
1518 {
1519 	bool	res;
1520 	struct spdk_nvme_ctrlr				ctrlr = {};
1521 	struct spdk_nvme_intel_log_page_directory	payload = {};
1522 	struct spdk_pci_id				pci_id = {};
1523 
1524 	/* Get quirks for a device with all 0 vendor/device id */
1525 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1526 	CU_ASSERT(ctrlr.quirks == 0);
1527 
1528 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1529 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1530 	CU_ASSERT(res == false);
1531 
1532 	/* Set the vendor to Intel, but provide no device id */
1533 	pci_id.class_id = SPDK_PCI_CLASS_NVME;
1534 	ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1535 	payload.temperature_statistics_log_len = 1;
1536 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1537 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1538 
1539 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1540 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1541 	CU_ASSERT(res == true);
1542 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1543 	CU_ASSERT(res == true);
1544 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1545 	CU_ASSERT(res == false);
1546 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1547 	CU_ASSERT(res == false);
1548 
1549 	/* set valid vendor id, device id and sub device id */
1550 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1551 	payload.temperature_statistics_log_len = 0;
1552 	pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1553 	pci_id.device_id = 0x0953;
1554 	pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
1555 	pci_id.subdevice_id = 0x3702;
1556 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1557 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1558 
1559 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1560 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1561 	CU_ASSERT(res == true);
1562 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1563 	CU_ASSERT(res == false);
1564 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1565 	CU_ASSERT(res == true);
1566 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1567 	CU_ASSERT(res == false);
1568 }
1569 
1570 static void
1571 test_nvme_ctrlr_set_supported_features(void)
1572 {
1573 	bool	res;
1574 	struct spdk_nvme_ctrlr			ctrlr = {};
1575 
1576 	/* set a invalid vendor id */
1577 	ctrlr.cdata.vid = 0xFFFF;
1578 	nvme_ctrlr_set_supported_features(&ctrlr);
1579 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1580 	CU_ASSERT(res == true);
1581 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1582 	CU_ASSERT(res == false);
1583 
1584 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1585 	nvme_ctrlr_set_supported_features(&ctrlr);
1586 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1587 	CU_ASSERT(res == true);
1588 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1589 	CU_ASSERT(res == true);
1590 }
1591 
1592 static void
1593 test_ctrlr_get_default_ctrlr_opts(void)
1594 {
1595 	struct spdk_nvme_ctrlr_opts opts = {};
1596 
1597 	CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id,
1598 				  "e53e9258-c93b-48b5-be1a-f025af6d232a") == 0);
1599 
1600 	memset(&opts, 0, sizeof(opts));
1601 
1602 	/* set a smaller opts_size */
1603 	CU_ASSERT(sizeof(opts) > 8);
1604 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
1605 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1606 	CU_ASSERT_TRUE(opts.use_cmb_sqs);
1607 	/* check below fields are not initialized by default value */
1608 	CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
1609 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
1610 	CU_ASSERT_EQUAL(opts.io_queue_size, 0);
1611 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1612 	for (int i = 0; i < 8; i++) {
1613 		CU_ASSERT(opts.host_id[i] == 0);
1614 	}
1615 	for (int i = 0; i < 16; i++) {
1616 		CU_ASSERT(opts.extended_host_id[i] == 0);
1617 	}
1618 	CU_ASSERT(strlen(opts.hostnqn) == 0);
1619 	CU_ASSERT(strlen(opts.src_addr) == 0);
1620 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1621 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
1622 
1623 	/* set a consistent opts_size */
1624 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
1625 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1626 	CU_ASSERT_TRUE(opts.use_cmb_sqs);
1627 	CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
1628 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
1629 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1630 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1631 	for (int i = 0; i < 8; i++) {
1632 		CU_ASSERT(opts.host_id[i] == 0);
1633 	}
1634 	CU_ASSERT_STRING_EQUAL(opts.hostnqn,
1635 			       "2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
1636 	CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
1637 			 sizeof(opts.extended_host_id)) == 0);
1638 	CU_ASSERT(strlen(opts.src_addr) == 0);
1639 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1640 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
1641 }
1642 
1643 static void
1644 test_ctrlr_get_default_io_qpair_opts(void)
1645 {
1646 	struct spdk_nvme_ctrlr ctrlr = {};
1647 	struct spdk_nvme_io_qpair_opts opts = {};
1648 
1649 	memset(&opts, 0, sizeof(opts));
1650 
1651 	/* set a smaller opts_size */
1652 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
1653 	CU_ASSERT(sizeof(opts) > 8);
1654 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
1655 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
1656 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1657 	/* check below field is not initialized by default value */
1658 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1659 
1660 	/* set a consistent opts_size */
1661 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
1662 	ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
1663 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1664 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
1665 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1666 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1667 }
1668 
1669 #if 0 /* TODO: move to PCIe-specific unit test */
1670 static void
1671 test_nvme_ctrlr_alloc_cmb(void)
1672 {
1673 	int			rc;
1674 	uint64_t		offset;
1675 	struct spdk_nvme_ctrlr	ctrlr = {};
1676 
1677 	ctrlr.cmb_size = 0x1000000;
1678 	ctrlr.cmb_current_offset = 0x100;
1679 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
1680 	CU_ASSERT(rc == 0);
1681 	CU_ASSERT(offset == 0x1000);
1682 	CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
1683 
1684 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
1685 	CU_ASSERT(rc == 0);
1686 	CU_ASSERT(offset == 0x2000);
1687 	CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
1688 
1689 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
1690 	CU_ASSERT(rc == 0);
1691 	CU_ASSERT(offset == 0x100000);
1692 	CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
1693 
1694 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
1695 	CU_ASSERT(rc == -1);
1696 }
1697 #endif
1698 
1699 static void
1700 test_spdk_nvme_ctrlr_update_firmware(void)
1701 {
1702 	struct spdk_nvme_ctrlr ctrlr = {};
1703 	void *payload = NULL;
1704 	int point_payload = 1;
1705 	int slot = 0;
1706 	int ret = 0;
1707 	struct spdk_nvme_status status;
1708 	enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
1709 
1710 	/* Set invalid size check function return value */
1711 	set_size = 5;
1712 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1713 	CU_ASSERT(ret == -1);
1714 
1715 	/* When payload is NULL but set_size < min_page_size */
1716 	set_size = 4;
1717 	ctrlr.min_page_size = 5;
1718 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1719 	CU_ASSERT(ret == -1);
1720 
1721 	/* When payload not NULL but min_page_size is 0 */
1722 	set_size = 4;
1723 	ctrlr.min_page_size = 0;
1724 	payload = &point_payload;
1725 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1726 	CU_ASSERT(ret == -1);
1727 
1728 	/* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
1729 	set_status_cpl = 1;
1730 	set_size = 4;
1731 	ctrlr.min_page_size = 5;
1732 	payload = &point_payload;
1733 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1734 	CU_ASSERT(ret == -ENXIO);
1735 
1736 	/* Check firmware image download and set status.cpl value is 0 */
1737 	set_status_cpl = 0;
1738 	set_size = 4;
1739 	ctrlr.min_page_size = 5;
1740 	payload = &point_payload;
1741 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1742 	CU_ASSERT(ret == -1);
1743 
1744 	/* Check firmware commit */
1745 	ctrlr.is_resetting = false;
1746 	set_status_cpl = 0;
1747 	slot = 1;
1748 	set_size = 4;
1749 	ctrlr.min_page_size = 5;
1750 	payload = &point_payload;
1751 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1752 	CU_ASSERT(ret == -ENXIO);
1753 
1754 	/* Set size check firmware download and firmware commit */
1755 	ctrlr.is_resetting = true;
1756 	set_status_cpl = 0;
1757 	slot = 1;
1758 	set_size = 4;
1759 	ctrlr.min_page_size = 5;
1760 	payload = &point_payload;
1761 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1762 	CU_ASSERT(ret == 0);
1763 
1764 	/* nvme_wait_for_completion returns an error */
1765 	g_wait_for_completion_return_val = -1;
1766 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1767 	CU_ASSERT(ret == -ENXIO);
1768 	CU_ASSERT(g_failed_status != NULL);
1769 	CU_ASSERT(g_failed_status->timed_out == true);
1770 	/* status should be freed by callback, which is not triggered in test env.
1771 	   Store status to global variable and free it manually.
1772 	   If spdk_nvme_ctrlr_update_firmware changes its behaviour and frees the status
1773 	   itself, we'll get a double free here.. */
1774 	free(g_failed_status);
1775 	g_failed_status = NULL;
1776 	g_wait_for_completion_return_val = 0;
1777 
1778 	set_status_cpl = 0;
1779 }
1780 
1781 int
1782 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
1783 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1784 {
1785 	fake_cpl_sc(cb_fn, cb_arg);
1786 	return 0;
1787 }
1788 
1789 static void
1790 test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
1791 {
1792 	struct spdk_nvme_ctrlr ctrlr = {};
1793 	int ret = -1;
1794 
1795 	ctrlr.cdata.oacs.doorbell_buffer_config = 1;
1796 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1797 	ctrlr.page_size = 0x1000;
1798 	MOCK_CLEAR(spdk_malloc);
1799 	MOCK_CLEAR(spdk_zmalloc);
1800 	ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
1801 	CU_ASSERT(ret == 0);
1802 	nvme_ctrlr_free_doorbell_buffer(&ctrlr);
1803 }
1804 
1805 static void
1806 test_nvme_ctrlr_test_active_ns(void)
1807 {
1808 	uint32_t		nsid, minor;
1809 	size_t			ns_id_count;
1810 	struct spdk_nvme_ctrlr	ctrlr = {.state = NVME_CTRLR_STATE_READY};
1811 
1812 	ctrlr.page_size = 0x1000;
1813 
1814 	for (minor = 0; minor <= 2; minor++) {
1815 		ctrlr.vs.bits.mjr = 1;
1816 		ctrlr.vs.bits.mnr = minor;
1817 		ctrlr.vs.bits.ter = 0;
1818 		ctrlr.num_ns = 1531;
1819 		nvme_ctrlr_identify_active_ns(&ctrlr);
1820 
1821 		for (nsid = 1; nsid <= ctrlr.num_ns; nsid++) {
1822 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
1823 		}
1824 		ctrlr.num_ns = 1559;
1825 		for (; nsid <= ctrlr.num_ns; nsid++) {
1826 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
1827 		}
1828 		ctrlr.num_ns = 1531;
1829 		for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
1830 			ctrlr.active_ns_list[nsid] = 0;
1831 		}
1832 		CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
1833 
1834 		ctrlr.active_ns_list[0] = 1;
1835 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
1836 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
1837 		nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
1838 		CU_ASSERT(nsid == 1);
1839 
1840 		ctrlr.active_ns_list[1] = 3;
1841 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
1842 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
1843 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
1844 		nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
1845 		CU_ASSERT(nsid == 3);
1846 		nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
1847 		CU_ASSERT(nsid == 0);
1848 
1849 		memset(ctrlr.active_ns_list, 0, ctrlr.num_ns);
1850 		for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
1851 			ctrlr.active_ns_list[nsid] = nsid + 1;
1852 		}
1853 
1854 		ns_id_count = 0;
1855 		for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
1856 		     nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
1857 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
1858 			ns_id_count++;
1859 		}
1860 		CU_ASSERT(ns_id_count == ctrlr.num_ns);
1861 
1862 		nvme_ctrlr_destruct(&ctrlr);
1863 	}
1864 }
1865 
1866 static void
1867 test_nvme_ctrlr_test_active_ns_error_case(void)
1868 {
1869 	int rc;
1870 	struct spdk_nvme_ctrlr	ctrlr = {.state = NVME_CTRLR_STATE_READY};
1871 
1872 	ctrlr.page_size = 0x1000;
1873 	ctrlr.vs.bits.mjr = 1;
1874 	ctrlr.vs.bits.mnr = 2;
1875 	ctrlr.vs.bits.ter = 0;
1876 	ctrlr.num_ns = 2;
1877 
1878 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
1879 	rc = nvme_ctrlr_identify_active_ns(&ctrlr);
1880 	CU_ASSERT(rc == -ENXIO);
1881 	set_status_code = SPDK_NVME_SC_SUCCESS;
1882 }
1883 
1884 static void
1885 test_nvme_ctrlr_init_delay(void)
1886 {
1887 	DECLARE_AND_CONSTRUCT_CTRLR();
1888 
1889 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1890 
1891 	/*
1892 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1893 	 * init() should set CC.EN = 1.
1894 	 */
1895 	g_ut_nvme_regs.cc.bits.en = 0;
1896 	g_ut_nvme_regs.csts.bits.rdy = 0;
1897 
1898 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1899 	/* Test that the initialization delay works correctly.  We only
1900 	 * do the initialization delay on SSDs that require it, so
1901 	 * set that quirk here.
1902 	 */
1903 	ctrlr.quirks = NVME_QUIRK_DELAY_BEFORE_INIT;
1904 	ctrlr.cdata.nn = 1;
1905 	ctrlr.page_size = 0x1000;
1906 	ctrlr.state = NVME_CTRLR_STATE_INIT_DELAY;
1907 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1908 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1909 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
1910 
1911 	/* delay 1s, just return as sleep time isn't enough */
1912 	spdk_delay_us(1 * spdk_get_ticks_hz());
1913 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1914 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1915 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
1916 
1917 	/* sleep timeout, start to initialize */
1918 	spdk_delay_us(2 * spdk_get_ticks_hz());
1919 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1920 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1921 
1922 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1923 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1924 
1925 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1926 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1927 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1928 
1929 	/*
1930 	 * Transition to CSTS.RDY = 1.
1931 	 */
1932 	g_ut_nvme_regs.csts.bits.rdy = 1;
1933 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1934 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1935 
1936 	/*
1937 	 * Transition to READY.
1938 	 */
1939 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1940 		nvme_ctrlr_process_init(&ctrlr);
1941 	}
1942 
1943 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1944 	nvme_ctrlr_destruct(&ctrlr);
1945 }
1946 
1947 static void
1948 test_spdk_nvme_ctrlr_set_trid(void)
1949 {
1950 	struct spdk_nvme_ctrlr	ctrlr = {0};
1951 	struct spdk_nvme_transport_id	new_trid = {{0}};
1952 
1953 	ctrlr.is_failed = false;
1954 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1955 	snprintf(ctrlr.trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
1956 	snprintf(ctrlr.trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
1957 	snprintf(ctrlr.trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
1958 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EPERM);
1959 
1960 	ctrlr.is_failed = true;
1961 	new_trid.trtype = SPDK_NVME_TRANSPORT_TCP;
1962 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
1963 	CU_ASSERT(ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA);
1964 
1965 	new_trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1966 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode2");
1967 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
1968 	CU_ASSERT(strncmp(ctrlr.trid.subnqn, "nqn.2016-06.io.spdk:cnode1", SPDK_NVMF_NQN_MAX_LEN) == 0);
1969 
1970 
1971 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
1972 	snprintf(new_trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
1973 	snprintf(new_trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4421");
1974 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == 0);
1975 	CU_ASSERT(strncmp(ctrlr.trid.traddr, "192.168.100.9", SPDK_NVMF_TRADDR_MAX_LEN) == 0);
1976 	CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
1977 }
1978 
1979 static void
1980 test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
1981 {
1982 	DECLARE_AND_CONSTRUCT_CTRLR();
1983 	/* equivalent of 4096 bytes */
1984 	ctrlr.cdata.nvmf_specific.ioccsz = 260;
1985 	ctrlr.cdata.nvmf_specific.icdoff = 1;
1986 
1987 	/* Check PCI trtype, */
1988 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1989 
1990 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
1991 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1992 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
1993 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1994 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
1995 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1996 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
1997 
1998 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
1999 	CU_ASSERT(ctrlr.icdoff == 0);
2000 
2001 	nvme_ctrlr_destruct(&ctrlr);
2002 
2003 	/* Check RDMA trtype, */
2004 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2005 
2006 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2007 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2008 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2009 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2010 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2011 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2012 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2013 
2014 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2015 	CU_ASSERT(ctrlr.icdoff == 1);
2016 	ctrlr.ioccsz_bytes = 0;
2017 	ctrlr.icdoff = 0;
2018 
2019 	nvme_ctrlr_destruct(&ctrlr);
2020 
2021 	/* Check TCP trtype, */
2022 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2023 
2024 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2025 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2026 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2027 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2028 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2029 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2030 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2031 
2032 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2033 	CU_ASSERT(ctrlr.icdoff == 1);
2034 	ctrlr.ioccsz_bytes = 0;
2035 	ctrlr.icdoff = 0;
2036 
2037 	nvme_ctrlr_destruct(&ctrlr);
2038 
2039 	/* Check FC trtype, */
2040 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
2041 
2042 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2043 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2044 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2045 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2046 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2047 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2048 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2049 
2050 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2051 	CU_ASSERT(ctrlr.icdoff == 1);
2052 	ctrlr.ioccsz_bytes = 0;
2053 	ctrlr.icdoff = 0;
2054 
2055 	nvme_ctrlr_destruct(&ctrlr);
2056 
2057 	/* Check CUSTOM trtype, */
2058 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
2059 
2060 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2061 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2062 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2063 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2064 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2065 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2066 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2067 
2068 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2069 	CU_ASSERT(ctrlr.icdoff == 0);
2070 
2071 	nvme_ctrlr_destruct(&ctrlr);
2072 }
2073 
2074 static void
2075 test_nvme_ctrlr_init_set_num_queues(void)
2076 {
2077 	DECLARE_AND_CONSTRUCT_CTRLR();
2078 
2079 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2080 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_IDENTIFY_IOCS_SPECIFIC */
2081 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2082 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_NUM_QUEUES */
2083 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2084 
2085 	ctrlr.opts.num_io_queues = 64;
2086 	/* Num queues is zero-based. So, use 31 to get 32 queues */
2087 	fake_cpl.cdw0 = 31 + (31 << 16);
2088 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> CONSTRUCT_NS */
2089 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2090 	CU_ASSERT(ctrlr.opts.num_io_queues == 32);
2091 	fake_cpl.cdw0 = 0;
2092 
2093 	nvme_ctrlr_destruct(&ctrlr);
2094 }
2095 
2096 static void
2097 test_nvme_ctrlr_init_set_keep_alive_timeout(void)
2098 {
2099 	DECLARE_AND_CONSTRUCT_CTRLR();
2100 
2101 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2102 	ctrlr.cdata.kas = 1;
2103 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2104 	fake_cpl.cdw0 = 120000;
2105 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
2106 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
2107 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 120000);
2108 	fake_cpl.cdw0 = 0;
2109 
2110 	/* Target does not support Get Feature "Keep Alive Timer" */
2111 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2112 	ctrlr.cdata.kas = 1;
2113 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2114 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2115 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
2116 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
2117 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 60000);
2118 	set_status_code = SPDK_NVME_SC_SUCCESS;
2119 
2120 	/* Target fails Get Feature "Keep Alive Timer" for another reason */
2121 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2122 	ctrlr.cdata.kas = 1;
2123 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2124 	set_status_code = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2125 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> ERROR */
2126 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
2127 	set_status_code = SPDK_NVME_SC_SUCCESS;
2128 
2129 	nvme_ctrlr_destruct(&ctrlr);
2130 }
2131 
2132 int main(int argc, char **argv)
2133 {
2134 	CU_pSuite	suite = NULL;
2135 	unsigned int	num_failures;
2136 
2137 	CU_set_error_action(CUEA_ABORT);
2138 	CU_initialize_registry();
2139 
2140 	suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
2141 
2142 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_0);
2143 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_1);
2144 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0);
2145 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_1);
2146 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_rr);
2147 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr);
2148 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_vs);
2149 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_delay);
2150 	CU_ADD_TEST(suite, test_alloc_io_qpair_rr_1);
2151 	CU_ADD_TEST(suite, test_ctrlr_get_default_ctrlr_opts);
2152 	CU_ADD_TEST(suite, test_ctrlr_get_default_io_qpair_opts);
2153 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_1);
2154 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_2);
2155 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_update_firmware);
2156 	CU_ADD_TEST(suite, test_nvme_ctrlr_fail);
2157 	CU_ADD_TEST(suite, test_nvme_ctrlr_construct_intel_support_log_page_list);
2158 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_features);
2159 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_doorbell_buffer_config);
2160 #if 0 /* TODO: move to PCIe-specific unit test */
2161 	CU_ADD_TEST(suite, test_nvme_ctrlr_alloc_cmb);
2162 #endif
2163 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns);
2164 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns_error_case);
2165 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_reconnect_io_qpair);
2166 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_set_trid);
2167 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_nvmf_ioccsz);
2168 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_num_queues);
2169 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_keep_alive_timeout);
2170 
2171 	CU_basic_set_mode(CU_BRM_VERBOSE);
2172 	CU_basic_run_tests();
2173 	num_failures = CU_get_number_of_failures();
2174 	CU_cleanup_registry();
2175 	return num_failures;
2176 }
2177