xref: /spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c (revision b30d57cdad6d2bc75cc1e4e2ebbcebcb0d98dcfa)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk/log.h"
39 
40 #include "common/lib/test_env.c"
41 
42 #include "nvme/nvme_ctrlr.c"
43 #include "nvme/nvme_quirks.c"
44 
45 SPDK_LOG_REGISTER_COMPONENT(nvme)
46 
47 pid_t g_spdk_nvme_pid;
48 
49 struct nvme_driver _g_nvme_driver = {
50 	.lock = PTHREAD_MUTEX_INITIALIZER,
51 };
52 
53 struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
54 
55 struct spdk_nvme_registers g_ut_nvme_regs = {};
56 
57 __thread int    nvme_thread_ioq_index = -1;
58 
59 uint32_t set_size = 1;
60 
61 int set_status_cpl = -1;
62 
63 DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
64 	    (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
65 	     spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
66 DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
67 DEFINE_STUB_V(nvme_ns_set_id_desc_list_data, (struct spdk_nvme_ns *ns));
68 DEFINE_STUB_V(nvme_ns_free_zns_specific_data, (struct spdk_nvme_ns *ns));
69 DEFINE_STUB_V(nvme_ns_free_iocs_specific_data, (struct spdk_nvme_ns *ns));
70 DEFINE_STUB(nvme_ns_has_supported_iocs_specific_data, bool, (struct spdk_nvme_ns *ns), false);
71 DEFINE_STUB_V(nvme_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
72 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
73 		struct spdk_nvme_qpair *qpair), 0);
74 DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));
75 DEFINE_STUB(nvme_io_msg_process, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
76 DEFINE_STUB(nvme_transport_ctrlr_reserve_cmb, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
77 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_receive, int, (struct spdk_nvme_ctrlr *ctrlr,
78 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
79 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
80 DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_send, int, (struct spdk_nvme_ctrlr *ctrlr,
81 		uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
82 		uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
83 
84 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
85 		const struct spdk_nvme_ctrlr_opts *opts,
86 		void *devhandle)
87 {
88 	return NULL;
89 }
90 
91 int
92 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
93 {
94 	nvme_ctrlr_destruct_finish(ctrlr);
95 
96 	return 0;
97 }
98 
99 int
100 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
101 {
102 	return 0;
103 }
104 
105 int
106 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
107 {
108 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
109 	*(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
110 	return 0;
111 }
112 
113 int
114 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
115 {
116 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
117 	*(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
118 	return 0;
119 }
120 
121 int
122 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
123 {
124 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
125 	*value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
126 	return 0;
127 }
128 
129 int
130 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
131 {
132 	SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
133 	*value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
134 	return 0;
135 }
136 
137 uint32_t
138 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
139 {
140 	return UINT32_MAX;
141 }
142 
143 uint16_t
144 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
145 {
146 	return 1;
147 }
148 
149 void *
150 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
151 {
152 	return NULL;
153 }
154 
155 int
156 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
157 {
158 	return 0;
159 }
160 
161 struct spdk_nvme_qpair *
162 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
163 				     const struct spdk_nvme_io_qpair_opts *opts)
164 {
165 	struct spdk_nvme_qpair *qpair;
166 
167 	qpair = calloc(1, sizeof(*qpair));
168 	SPDK_CU_ASSERT_FATAL(qpair != NULL);
169 
170 	qpair->ctrlr = ctrlr;
171 	qpair->id = qid;
172 	qpair->qprio = opts->qprio;
173 
174 	return qpair;
175 }
176 
177 int
178 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
179 {
180 	free(qpair);
181 	return 0;
182 }
183 
184 void
185 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
186 {
187 }
188 
189 int
190 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
191 {
192 	return 0;
193 }
194 
195 void
196 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
197 {
198 }
199 
200 void
201 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
202 {
203 }
204 
205 int
206 nvme_driver_init(void)
207 {
208 	return 0;
209 }
210 
211 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
212 		    struct spdk_nvme_ctrlr *ctrlr,
213 		    enum spdk_nvme_qprio qprio,
214 		    uint32_t num_requests)
215 {
216 	qpair->id = id;
217 	qpair->qprio = qprio;
218 	qpair->ctrlr = ctrlr;
219 
220 	return 0;
221 }
222 
223 static struct spdk_nvme_cpl fake_cpl = {};
224 static enum spdk_nvme_generic_command_status_code set_status_code = SPDK_NVME_SC_SUCCESS;
225 
226 static void
227 fake_cpl_sc(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
228 {
229 	fake_cpl.status.sc = set_status_code;
230 	cb_fn(cb_arg, &fake_cpl);
231 }
232 
233 int
234 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
235 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
236 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
237 {
238 	CU_ASSERT(0);
239 	return -1;
240 }
241 
242 int
243 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
244 				uint32_t cdw11, void *payload, uint32_t payload_size,
245 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
246 {
247 	fake_cpl_sc(cb_fn, cb_arg);
248 	return 0;
249 }
250 
251 int
252 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
253 				 uint32_t nsid, void *payload, uint32_t payload_size,
254 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
255 {
256 	fake_cpl_sc(cb_fn, cb_arg);
257 	return 0;
258 }
259 
260 int
261 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
262 				     uint32_t nsid, void *payload, uint32_t payload_size,
263 				     uint64_t offset, uint32_t cdw10, uint32_t cdw11,
264 				     uint32_t cdw14, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
265 {
266 	fake_cpl_sc(cb_fn, cb_arg);
267 	return 0;
268 }
269 
270 int
271 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
272 {
273 	CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
274 
275 	/*
276 	 * For the purposes of this unit test, we don't need to bother emulating request submission.
277 	 */
278 
279 	return 0;
280 }
281 
282 static int32_t g_wait_for_completion_return_val;
283 
284 int32_t
285 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
286 {
287 	return g_wait_for_completion_return_val;
288 }
289 
290 void
291 nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
292 {
293 }
294 
295 
296 void
297 nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
298 {
299 	struct nvme_completion_poll_status	*status = arg;
300 	/* This should not happen it test env since this callback is always called
301 	 * before wait_for_completion_* while this field can only be set to true in
302 	 * wait_for_completion_* functions */
303 	CU_ASSERT(status->timed_out == false);
304 
305 	status->cpl = *cpl;
306 	status->done = true;
307 }
308 
309 static struct nvme_completion_poll_status *g_failed_status;
310 
311 int
312 nvme_wait_for_completion_robust_lock_timeout(
313 	struct spdk_nvme_qpair *qpair,
314 	struct nvme_completion_poll_status *status,
315 	pthread_mutex_t *robust_mutex,
316 	uint64_t timeout_in_usecs)
317 {
318 	if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
319 		g_failed_status = status;
320 		status->timed_out = true;
321 		return -1;
322 	}
323 
324 	status->done = true;
325 	if (set_status_cpl == 1) {
326 		status->cpl.status.sc = 1;
327 	}
328 	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
329 }
330 
331 int
332 nvme_wait_for_completion_robust_lock(
333 	struct spdk_nvme_qpair *qpair,
334 	struct nvme_completion_poll_status *status,
335 	pthread_mutex_t *robust_mutex)
336 {
337 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, robust_mutex, 0);
338 }
339 
340 int
341 nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
342 			 struct nvme_completion_poll_status *status)
343 {
344 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, 0);
345 }
346 
347 int
348 nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
349 				 struct nvme_completion_poll_status *status,
350 				 uint64_t timeout_in_usecs)
351 {
352 	return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, timeout_in_usecs);
353 }
354 
355 int
356 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
357 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
358 				      void *cb_arg)
359 {
360 	fake_cpl_sc(cb_fn, cb_arg);
361 	return 0;
362 }
363 
364 int
365 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
366 			uint8_t csi, void *payload, size_t payload_size,
367 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
368 {
369 	if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
370 		uint32_t count = 0;
371 		uint32_t i = 0;
372 		struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
373 
374 		for (i = 1; i <= ctrlr->num_ns; i++) {
375 			if (i <= nsid) {
376 				continue;
377 			}
378 
379 			ns_list->ns_list[count++] = i;
380 			if (count == SPDK_COUNTOF(ns_list->ns_list)) {
381 				break;
382 			}
383 		}
384 
385 	}
386 
387 	fake_cpl_sc(cb_fn, cb_arg);
388 	return 0;
389 }
390 
391 int
392 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
393 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
394 {
395 	fake_cpl_sc(cb_fn, cb_arg);
396 	return 0;
397 }
398 
399 int
400 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
401 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
402 {
403 	CU_ASSERT(0);
404 	return -1;
405 }
406 
407 int
408 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
409 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
410 {
411 	return 0;
412 }
413 
414 int
415 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
416 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
417 {
418 	return 0;
419 }
420 
421 int
422 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
423 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
424 {
425 	return 0;
426 }
427 
428 int
429 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
430 			 void *cb_arg)
431 {
432 	return 0;
433 }
434 
435 int
436 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
437 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
438 {
439 	return 0;
440 }
441 
442 int
443 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
444 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
445 				   void *payload, uint32_t payload_size, uint32_t cdw12,
446 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
447 {
448 	return 0;
449 }
450 
451 int
452 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
453 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
454 				      void *payload, uint32_t payload_size, uint32_t cdw12,
455 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
456 {
457 	return 0;
458 }
459 
460 int
461 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
462 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
463 {
464 	CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
465 	if (fw_commit->fs == 0) {
466 		return -1;
467 	}
468 	set_status_cpl = 1;
469 	if (ctrlr->is_resetting == true) {
470 		set_status_cpl = 0;
471 	}
472 	return 0;
473 }
474 
475 int
476 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
477 				 uint32_t size, uint32_t offset, void *payload,
478 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
479 {
480 	if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
481 		return -1;
482 	}
483 	CU_ASSERT(offset == 0);
484 	return 0;
485 }
486 
487 void
488 nvme_ns_destruct(struct spdk_nvme_ns *ns)
489 {
490 }
491 
492 int
493 nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
494 		  struct spdk_nvme_ctrlr *ctrlr)
495 {
496 	return 0;
497 }
498 
499 int
500 nvme_ns_update(struct spdk_nvme_ns *ns)
501 {
502 	return 0;
503 }
504 
505 void
506 spdk_pci_device_detach(struct spdk_pci_device *device)
507 {
508 }
509 
510 #define DECLARE_AND_CONSTRUCT_CTRLR()	\
511 	struct spdk_nvme_ctrlr	ctrlr = {};	\
512 	struct spdk_nvme_qpair	adminq = {};	\
513 	struct nvme_request	req;		\
514 						\
515 	STAILQ_INIT(&adminq.free_req);		\
516 	STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq);	\
517 	ctrlr.adminq = &adminq;
518 
519 static void
520 test_nvme_ctrlr_init_en_1_rdy_0(void)
521 {
522 	DECLARE_AND_CONSTRUCT_CTRLR();
523 
524 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
525 
526 	/*
527 	 * Initial state: CC.EN = 1, CSTS.RDY = 0
528 	 */
529 	g_ut_nvme_regs.cc.bits.en = 1;
530 	g_ut_nvme_regs.csts.bits.rdy = 0;
531 
532 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
533 	ctrlr.cdata.nn = 1;
534 	ctrlr.page_size = 0x1000;
535 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
536 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
537 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
538 
539 	/*
540 	 * Transition to CSTS.RDY = 1.
541 	 * init() should set CC.EN = 0.
542 	 */
543 	g_ut_nvme_regs.csts.bits.rdy = 1;
544 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
545 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
546 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
547 
548 	/*
549 	 * Transition to CSTS.RDY = 0.
550 	 */
551 	g_ut_nvme_regs.csts.bits.rdy = 0;
552 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
553 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
554 
555 	/*
556 	 * Transition to CC.EN = 1
557 	 */
558 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
559 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
560 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
561 
562 	/*
563 	 * Transition to CSTS.RDY = 1.
564 	 */
565 	g_ut_nvme_regs.csts.bits.rdy = 1;
566 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
567 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
568 
569 	/*
570 	 * Transition to READY.
571 	 */
572 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
573 		nvme_ctrlr_process_init(&ctrlr);
574 	}
575 
576 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
577 	nvme_ctrlr_destruct(&ctrlr);
578 }
579 
580 static void
581 test_nvme_ctrlr_init_en_1_rdy_1(void)
582 {
583 	DECLARE_AND_CONSTRUCT_CTRLR();
584 
585 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
586 
587 	/*
588 	 * Initial state: CC.EN = 1, CSTS.RDY = 1
589 	 * init() should set CC.EN = 0.
590 	 */
591 	g_ut_nvme_regs.cc.bits.en = 1;
592 	g_ut_nvme_regs.csts.bits.rdy = 1;
593 
594 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
595 	ctrlr.cdata.nn = 1;
596 	ctrlr.page_size = 0x1000;
597 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
598 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
599 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
600 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
601 
602 	/*
603 	 * Transition to CSTS.RDY = 0.
604 	 */
605 	g_ut_nvme_regs.csts.bits.rdy = 0;
606 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
607 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
608 
609 	/*
610 	 * Transition to CC.EN = 1
611 	 */
612 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
613 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
614 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
615 
616 	/*
617 	 * Transition to CSTS.RDY = 1.
618 	 */
619 	g_ut_nvme_regs.csts.bits.rdy = 1;
620 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
621 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
622 
623 	/*
624 	 * Transition to READY.
625 	 */
626 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
627 		nvme_ctrlr_process_init(&ctrlr);
628 	}
629 
630 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
631 	nvme_ctrlr_destruct(&ctrlr);
632 }
633 
634 static void
635 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
636 {
637 	DECLARE_AND_CONSTRUCT_CTRLR();
638 
639 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
640 
641 	/*
642 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
643 	 * init() should set CC.EN = 1.
644 	 */
645 	g_ut_nvme_regs.cc.bits.en = 0;
646 	g_ut_nvme_regs.csts.bits.rdy = 0;
647 
648 	/*
649 	 * Default round robin enabled
650 	 */
651 	g_ut_nvme_regs.cap.bits.ams = 0x0;
652 	ctrlr.cap = g_ut_nvme_regs.cap;
653 
654 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
655 	ctrlr.cdata.nn = 1;
656 	ctrlr.page_size = 0x1000;
657 	/*
658 	 * Case 1: default round robin arbitration mechanism selected
659 	 */
660 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
661 
662 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
663 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
664 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
665 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
666 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
667 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
668 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
669 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
670 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
671 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
672 
673 	/*
674 	 * Complete and destroy the controller
675 	 */
676 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
677 	nvme_ctrlr_destruct(&ctrlr);
678 
679 	/*
680 	 * Reset to initial state
681 	 */
682 	g_ut_nvme_regs.cc.bits.en = 0;
683 	g_ut_nvme_regs.csts.bits.rdy = 0;
684 
685 	/*
686 	 * Case 2: weighted round robin arbitration mechanism selected
687 	 */
688 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
689 	ctrlr.cdata.nn = 1;
690 	ctrlr.page_size = 0x1000;
691 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
692 
693 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
694 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
695 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
696 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
697 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
698 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
699 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
700 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
701 
702 	/*
703 	 * Complete and destroy the controller
704 	 */
705 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
706 	nvme_ctrlr_destruct(&ctrlr);
707 
708 	/*
709 	 * Reset to initial state
710 	 */
711 	g_ut_nvme_regs.cc.bits.en = 0;
712 	g_ut_nvme_regs.csts.bits.rdy = 0;
713 
714 	/*
715 	 * Case 3: vendor specific arbitration mechanism selected
716 	 */
717 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
718 	ctrlr.cdata.nn = 1;
719 	ctrlr.page_size = 0x1000;
720 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
721 
722 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
723 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
724 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
725 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
726 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
727 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
728 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
729 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
730 
731 	/*
732 	 * Complete and destroy the controller
733 	 */
734 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
735 	nvme_ctrlr_destruct(&ctrlr);
736 
737 	/*
738 	 * Reset to initial state
739 	 */
740 	g_ut_nvme_regs.cc.bits.en = 0;
741 	g_ut_nvme_regs.csts.bits.rdy = 0;
742 
743 	/*
744 	 * Case 4: invalid arbitration mechanism selected
745 	 */
746 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
747 	ctrlr.cdata.nn = 1;
748 	ctrlr.page_size = 0x1000;
749 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
750 
751 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
752 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
753 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
754 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
755 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
756 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
757 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
758 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
759 
760 	/*
761 	 * Complete and destroy the controller
762 	 */
763 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
764 	nvme_ctrlr_destruct(&ctrlr);
765 
766 	/*
767 	 * Reset to initial state
768 	 */
769 	g_ut_nvme_regs.cc.bits.en = 0;
770 	g_ut_nvme_regs.csts.bits.rdy = 0;
771 
772 	/*
773 	 * Case 5: reset to default round robin arbitration mechanism
774 	 */
775 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
776 	ctrlr.cdata.nn = 1;
777 	ctrlr.page_size = 0x1000;
778 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
779 
780 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
781 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
782 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
783 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
784 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
785 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
786 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
787 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
788 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
789 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
790 
791 	/*
792 	 * Transition to CSTS.RDY = 1.
793 	 */
794 	g_ut_nvme_regs.csts.bits.rdy = 1;
795 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
796 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
797 
798 	/*
799 	 * Transition to READY.
800 	 */
801 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
802 		nvme_ctrlr_process_init(&ctrlr);
803 	}
804 
805 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
806 	nvme_ctrlr_destruct(&ctrlr);
807 }
808 
809 static void
810 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
811 {
812 	DECLARE_AND_CONSTRUCT_CTRLR();
813 
814 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
815 
816 	/*
817 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
818 	 * init() should set CC.EN = 1.
819 	 */
820 	g_ut_nvme_regs.cc.bits.en = 0;
821 	g_ut_nvme_regs.csts.bits.rdy = 0;
822 
823 	/*
824 	 * Weighted round robin enabled
825 	 */
826 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
827 	ctrlr.cap = g_ut_nvme_regs.cap;
828 
829 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
830 	ctrlr.cdata.nn = 1;
831 	ctrlr.page_size = 0x1000;
832 	/*
833 	 * Case 1: default round robin arbitration mechanism selected
834 	 */
835 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
836 
837 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
838 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
839 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
840 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
841 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
842 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
843 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
844 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
845 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
846 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
847 
848 	/*
849 	 * Complete and destroy the controller
850 	 */
851 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
852 	nvme_ctrlr_destruct(&ctrlr);
853 
854 	/*
855 	 * Reset to initial state
856 	 */
857 	g_ut_nvme_regs.cc.bits.en = 0;
858 	g_ut_nvme_regs.csts.bits.rdy = 0;
859 
860 	/*
861 	 * Case 2: weighted round robin arbitration mechanism selected
862 	 */
863 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
864 	ctrlr.cdata.nn = 1;
865 	ctrlr.page_size = 0x1000;
866 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
867 
868 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
869 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
870 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
871 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
872 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
873 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
874 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
875 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
876 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
877 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
878 
879 	/*
880 	 * Complete and destroy the controller
881 	 */
882 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
883 	nvme_ctrlr_destruct(&ctrlr);
884 
885 	/*
886 	 * Reset to initial state
887 	 */
888 	g_ut_nvme_regs.cc.bits.en = 0;
889 	g_ut_nvme_regs.csts.bits.rdy = 0;
890 
891 	/*
892 	 * Case 3: vendor specific arbitration mechanism selected
893 	 */
894 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
895 	ctrlr.cdata.nn = 1;
896 	ctrlr.page_size = 0x1000;
897 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
898 
899 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
900 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
901 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
902 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
903 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
904 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
905 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
906 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
907 
908 	/*
909 	 * Complete and destroy the controller
910 	 */
911 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
912 	nvme_ctrlr_destruct(&ctrlr);
913 
914 	/*
915 	 * Reset to initial state
916 	 */
917 	g_ut_nvme_regs.cc.bits.en = 0;
918 	g_ut_nvme_regs.csts.bits.rdy = 0;
919 
920 	/*
921 	 * Case 4: invalid arbitration mechanism selected
922 	 */
923 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
924 	ctrlr.cdata.nn = 1;
925 	ctrlr.page_size = 0x1000;
926 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
927 
928 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
929 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
930 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
931 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
932 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
933 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
934 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
935 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
936 
937 	/*
938 	 * Complete and destroy the controller
939 	 */
940 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
941 	nvme_ctrlr_destruct(&ctrlr);
942 
943 	/*
944 	 * Reset to initial state
945 	 */
946 	g_ut_nvme_regs.cc.bits.en = 0;
947 	g_ut_nvme_regs.csts.bits.rdy = 0;
948 
949 	/*
950 	 * Case 5: reset to weighted round robin arbitration mechanism
951 	 */
952 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
953 	ctrlr.cdata.nn = 1;
954 	ctrlr.page_size = 0x1000;
955 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
956 
957 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
958 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
959 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
960 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
961 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
962 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
963 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
964 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
965 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
966 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
967 
968 	/*
969 	 * Transition to CSTS.RDY = 1.
970 	 */
971 	g_ut_nvme_regs.csts.bits.rdy = 1;
972 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
973 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
974 
975 	/*
976 	 * Transition to READY.
977 	 */
978 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
979 		nvme_ctrlr_process_init(&ctrlr);
980 	}
981 
982 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
983 	nvme_ctrlr_destruct(&ctrlr);
984 }
985 static void
986 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
987 {
988 	DECLARE_AND_CONSTRUCT_CTRLR();
989 
990 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
991 
992 	/*
993 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
994 	 * init() should set CC.EN = 1.
995 	 */
996 	g_ut_nvme_regs.cc.bits.en = 0;
997 	g_ut_nvme_regs.csts.bits.rdy = 0;
998 
999 	/*
1000 	 * Default round robin enabled
1001 	 */
1002 	g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
1003 	ctrlr.cap = g_ut_nvme_regs.cap;
1004 
1005 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1006 	ctrlr.cdata.nn = 1;
1007 	ctrlr.page_size = 0x1000;
1008 	/*
1009 	 * Case 1: default round robin arbitration mechanism selected
1010 	 */
1011 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
1012 
1013 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1014 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1015 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1016 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1017 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1018 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1019 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1020 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1021 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
1022 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
1023 
1024 	/*
1025 	 * Complete and destroy the controller
1026 	 */
1027 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1028 	nvme_ctrlr_destruct(&ctrlr);
1029 
1030 	/*
1031 	 * Reset to initial state
1032 	 */
1033 	g_ut_nvme_regs.cc.bits.en = 0;
1034 	g_ut_nvme_regs.csts.bits.rdy = 0;
1035 
1036 	/*
1037 	 * Case 2: weighted round robin arbitration mechanism selected
1038 	 */
1039 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1040 	ctrlr.cdata.nn = 1;
1041 	ctrlr.page_size = 0x1000;
1042 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
1043 
1044 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1045 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1046 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1047 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1048 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1049 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1050 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1051 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1052 
1053 	/*
1054 	 * Complete and destroy the controller
1055 	 */
1056 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1057 	nvme_ctrlr_destruct(&ctrlr);
1058 
1059 	/*
1060 	 * Reset to initial state
1061 	 */
1062 	g_ut_nvme_regs.cc.bits.en = 0;
1063 	g_ut_nvme_regs.csts.bits.rdy = 0;
1064 
1065 	/*
1066 	 * Case 3: vendor specific arbitration mechanism selected
1067 	 */
1068 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1069 	ctrlr.cdata.nn = 1;
1070 	ctrlr.page_size = 0x1000;
1071 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1072 
1073 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1074 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1075 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1076 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1077 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1078 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1079 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1080 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1081 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1082 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1083 
1084 	/*
1085 	 * Complete and destroy the controller
1086 	 */
1087 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1088 	nvme_ctrlr_destruct(&ctrlr);
1089 
1090 	/*
1091 	 * Reset to initial state
1092 	 */
1093 	g_ut_nvme_regs.cc.bits.en = 0;
1094 	g_ut_nvme_regs.csts.bits.rdy = 0;
1095 
1096 	/*
1097 	 * Case 4: invalid arbitration mechanism selected
1098 	 */
1099 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1100 	ctrlr.cdata.nn = 1;
1101 	ctrlr.page_size = 0x1000;
1102 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
1103 
1104 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1105 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1106 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1107 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1108 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1109 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
1110 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1111 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
1112 
1113 	/*
1114 	 * Complete and destroy the controller
1115 	 */
1116 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1117 	nvme_ctrlr_destruct(&ctrlr);
1118 
1119 	/*
1120 	 * Reset to initial state
1121 	 */
1122 	g_ut_nvme_regs.cc.bits.en = 0;
1123 	g_ut_nvme_regs.csts.bits.rdy = 0;
1124 
1125 	/*
1126 	 * Case 5: reset to vendor specific arbitration mechanism
1127 	 */
1128 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1129 	ctrlr.cdata.nn = 1;
1130 	ctrlr.page_size = 0x1000;
1131 	ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
1132 
1133 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1134 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1135 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1136 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1137 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1138 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1139 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1140 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1141 	CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
1142 	CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
1143 
1144 	/*
1145 	 * Transition to CSTS.RDY = 1.
1146 	 */
1147 	g_ut_nvme_regs.csts.bits.rdy = 1;
1148 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1149 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1150 
1151 	/*
1152 	 * Transition to READY.
1153 	 */
1154 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1155 		nvme_ctrlr_process_init(&ctrlr);
1156 	}
1157 
1158 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1159 	nvme_ctrlr_destruct(&ctrlr);
1160 }
1161 
1162 static void
1163 test_nvme_ctrlr_init_en_0_rdy_0(void)
1164 {
1165 	DECLARE_AND_CONSTRUCT_CTRLR();
1166 
1167 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1168 
1169 	/*
1170 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1171 	 * init() should set CC.EN = 1.
1172 	 */
1173 	g_ut_nvme_regs.cc.bits.en = 0;
1174 	g_ut_nvme_regs.csts.bits.rdy = 0;
1175 
1176 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1177 	ctrlr.cdata.nn = 1;
1178 	ctrlr.page_size = 0x1000;
1179 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1180 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1181 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1182 
1183 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1184 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1185 
1186 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1187 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1188 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1189 
1190 	/*
1191 	 * Transition to CSTS.RDY = 1.
1192 	 */
1193 	g_ut_nvme_regs.csts.bits.rdy = 1;
1194 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1195 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1196 
1197 	/*
1198 	 * Transition to READY.
1199 	 */
1200 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1201 		nvme_ctrlr_process_init(&ctrlr);
1202 	}
1203 
1204 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1205 	nvme_ctrlr_destruct(&ctrlr);
1206 }
1207 
1208 static void
1209 test_nvme_ctrlr_init_en_0_rdy_1(void)
1210 {
1211 	DECLARE_AND_CONSTRUCT_CTRLR();
1212 
1213 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1214 
1215 	/*
1216 	 * Initial state: CC.EN = 0, CSTS.RDY = 1
1217 	 */
1218 	g_ut_nvme_regs.cc.bits.en = 0;
1219 	g_ut_nvme_regs.csts.bits.rdy = 1;
1220 
1221 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1222 	ctrlr.cdata.nn = 1;
1223 	ctrlr.page_size = 0x1000;
1224 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1225 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1226 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1227 
1228 	/*
1229 	 * Transition to CSTS.RDY = 0.
1230 	 */
1231 	g_ut_nvme_regs.csts.bits.rdy = 0;
1232 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1233 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1234 
1235 	/*
1236 	 * Transition to CC.EN = 1
1237 	 */
1238 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1239 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1240 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1241 
1242 	/*
1243 	 * Transition to CSTS.RDY = 1.
1244 	 */
1245 	g_ut_nvme_regs.csts.bits.rdy = 1;
1246 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1247 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1248 
1249 	/*
1250 	 * Transition to READY.
1251 	 */
1252 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1253 		nvme_ctrlr_process_init(&ctrlr);
1254 	}
1255 
1256 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1257 	nvme_ctrlr_destruct(&ctrlr);
1258 }
1259 
1260 static void
1261 setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
1262 {
1263 	uint32_t i;
1264 
1265 	CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
1266 
1267 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
1268 
1269 	ctrlr->page_size = 0x1000;
1270 	ctrlr->opts.num_io_queues = num_io_queues;
1271 	ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
1272 	SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
1273 
1274 	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1275 	for (i = 1; i <= num_io_queues; i++) {
1276 		spdk_bit_array_set(ctrlr->free_io_qids, i);
1277 	}
1278 }
1279 
1280 static void
1281 cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
1282 {
1283 	nvme_ctrlr_destruct(ctrlr);
1284 }
1285 
1286 static void
1287 test_alloc_io_qpair_rr_1(void)
1288 {
1289 	struct spdk_nvme_io_qpair_opts opts;
1290 	struct spdk_nvme_ctrlr ctrlr = {};
1291 	struct spdk_nvme_qpair *q0;
1292 
1293 	setup_qpairs(&ctrlr, 1);
1294 
1295 	/*
1296 	 * Fake to simulate the controller with default round robin
1297 	 * arbitration mechanism.
1298 	 */
1299 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
1300 
1301 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1302 
1303 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1304 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1305 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1306 	/* Only 1 I/O qpair was allocated, so this should fail */
1307 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
1308 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1309 
1310 	/*
1311 	 * Now that the qpair has been returned to the free list,
1312 	 * we should be able to allocate it again.
1313 	 */
1314 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
1315 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1316 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1317 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1318 
1319 	/* Only 0 qprio is acceptable for default round robin arbitration mechanism */
1320 	opts.qprio = 1;
1321 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1322 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1323 
1324 	opts.qprio = 2;
1325 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1326 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1327 
1328 	opts.qprio = 3;
1329 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1330 	SPDK_CU_ASSERT_FATAL(q0 == NULL);
1331 
1332 	/* Only 0 ~ 3 qprio is acceptable */
1333 	opts.qprio = 4;
1334 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1335 
1336 	cleanup_qpairs(&ctrlr);
1337 }
1338 
1339 static void
1340 test_alloc_io_qpair_wrr_1(void)
1341 {
1342 	struct spdk_nvme_io_qpair_opts opts;
1343 	struct spdk_nvme_ctrlr ctrlr = {};
1344 	struct spdk_nvme_qpair *q0, *q1;
1345 
1346 	setup_qpairs(&ctrlr, 2);
1347 
1348 	/*
1349 	 * Fake to simulate the controller with weighted round robin
1350 	 * arbitration mechanism.
1351 	 */
1352 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1353 
1354 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1355 
1356 	/*
1357 	 * Allocate 2 qpairs and free them
1358 	 */
1359 	opts.qprio = 0;
1360 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1361 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1362 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1363 
1364 	opts.qprio = 1;
1365 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1366 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1367 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1368 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1369 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1370 
1371 	/*
1372 	 * Allocate 2 qpairs and free them in the reverse order
1373 	 */
1374 	opts.qprio = 2;
1375 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1376 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1377 	SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
1378 
1379 	opts.qprio = 3;
1380 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1381 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1382 	SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
1383 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1384 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1385 
1386 	/* Only 0 ~ 3 qprio is acceptable */
1387 	opts.qprio = 4;
1388 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1389 
1390 	cleanup_qpairs(&ctrlr);
1391 }
1392 
1393 static void
1394 test_alloc_io_qpair_wrr_2(void)
1395 {
1396 	struct spdk_nvme_io_qpair_opts opts;
1397 	struct spdk_nvme_ctrlr ctrlr = {};
1398 	struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
1399 
1400 	setup_qpairs(&ctrlr, 4);
1401 
1402 	/*
1403 	 * Fake to simulate the controller with weighted round robin
1404 	 * arbitration mechanism.
1405 	 */
1406 	g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1407 
1408 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1409 
1410 	opts.qprio = 0;
1411 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1412 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1413 	SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1414 
1415 	opts.qprio = 1;
1416 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1417 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1418 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1419 
1420 	opts.qprio = 2;
1421 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1422 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1423 	SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
1424 
1425 	opts.qprio = 3;
1426 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1427 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1428 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1429 
1430 	/* Only 4 I/O qpairs was allocated, so this should fail */
1431 	opts.qprio = 0;
1432 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
1433 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1434 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1435 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1436 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1437 
1438 	/*
1439 	 * Now that the qpair has been returned to the free list,
1440 	 * we should be able to allocate it again.
1441 	 *
1442 	 * Allocate 4 I/O qpairs and half of them with same qprio.
1443 	 */
1444 	opts.qprio = 1;
1445 	q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1446 	SPDK_CU_ASSERT_FATAL(q0 != NULL);
1447 	SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
1448 
1449 	opts.qprio = 1;
1450 	q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1451 	SPDK_CU_ASSERT_FATAL(q1 != NULL);
1452 	SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1453 
1454 	opts.qprio = 3;
1455 	q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1456 	SPDK_CU_ASSERT_FATAL(q2 != NULL);
1457 	SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
1458 
1459 	opts.qprio = 3;
1460 	q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
1461 	SPDK_CU_ASSERT_FATAL(q3 != NULL);
1462 	SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1463 
1464 	/*
1465 	 * Free all I/O qpairs in reverse order
1466 	 */
1467 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1468 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1469 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1470 	SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1471 
1472 	cleanup_qpairs(&ctrlr);
1473 }
1474 
1475 bool g_connect_qpair_called = false;
1476 int g_connect_qpair_return_code = 0;
1477 int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1478 {
1479 	g_connect_qpair_called = true;
1480 	return g_connect_qpair_return_code;
1481 }
1482 
1483 static void
1484 test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
1485 {
1486 	struct spdk_nvme_ctrlr	ctrlr = {};
1487 	struct spdk_nvme_qpair	qpair = {};
1488 	int rc;
1489 
1490 	/* Various states of controller disconnect. */
1491 	qpair.id = 1;
1492 	qpair.ctrlr = &ctrlr;
1493 	ctrlr.is_removed = 1;
1494 	ctrlr.is_failed = 0;
1495 	ctrlr.is_resetting = 0;
1496 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1497 	CU_ASSERT(rc == -ENODEV)
1498 
1499 	ctrlr.is_removed = 0;
1500 	ctrlr.is_failed = 1;
1501 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1502 	CU_ASSERT(rc == -ENXIO)
1503 
1504 	ctrlr.is_failed = 0;
1505 	ctrlr.is_resetting = 1;
1506 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1507 	CU_ASSERT(rc == -EAGAIN)
1508 
1509 	/* Confirm precedence for controller states: removed > resetting > failed */
1510 	ctrlr.is_removed = 1;
1511 	ctrlr.is_failed = 1;
1512 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1513 	CU_ASSERT(rc == -ENODEV)
1514 
1515 	ctrlr.is_removed = 0;
1516 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1517 	CU_ASSERT(rc == -EAGAIN)
1518 
1519 	ctrlr.is_resetting = 0;
1520 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1521 	CU_ASSERT(rc == -ENXIO)
1522 
1523 	/* qpair not failed. Make sure we don't call down to the transport */
1524 	ctrlr.is_failed = 0;
1525 	qpair.state = NVME_QPAIR_CONNECTED;
1526 	g_connect_qpair_called = false;
1527 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1528 	CU_ASSERT(g_connect_qpair_called == false);
1529 	CU_ASSERT(rc == 0)
1530 
1531 	/* transport qpair is failed. make sure we call down to the transport */
1532 	qpair.state = NVME_QPAIR_DISCONNECTED;
1533 	rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
1534 	CU_ASSERT(g_connect_qpair_called == true);
1535 	CU_ASSERT(rc == 0)
1536 }
1537 
1538 static void
1539 test_nvme_ctrlr_fail(void)
1540 {
1541 	struct spdk_nvme_ctrlr	ctrlr = {};
1542 
1543 	ctrlr.opts.num_io_queues = 0;
1544 	nvme_ctrlr_fail(&ctrlr, false);
1545 
1546 	CU_ASSERT(ctrlr.is_failed == true);
1547 }
1548 
1549 static void
1550 test_nvme_ctrlr_construct_intel_support_log_page_list(void)
1551 {
1552 	bool	res;
1553 	struct spdk_nvme_ctrlr				ctrlr = {};
1554 	struct spdk_nvme_intel_log_page_directory	payload = {};
1555 	struct spdk_pci_id				pci_id = {};
1556 
1557 	/* Get quirks for a device with all 0 vendor/device id */
1558 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1559 	CU_ASSERT(ctrlr.quirks == 0);
1560 
1561 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1562 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1563 	CU_ASSERT(res == false);
1564 
1565 	/* Set the vendor to Intel, but provide no device id */
1566 	pci_id.class_id = SPDK_PCI_CLASS_NVME;
1567 	ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1568 	payload.temperature_statistics_log_len = 1;
1569 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1570 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1571 
1572 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1573 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1574 	CU_ASSERT(res == true);
1575 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1576 	CU_ASSERT(res == true);
1577 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1578 	CU_ASSERT(res == false);
1579 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1580 	CU_ASSERT(res == false);
1581 
1582 	/* set valid vendor id, device id and sub device id */
1583 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1584 	payload.temperature_statistics_log_len = 0;
1585 	pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1586 	pci_id.device_id = 0x0953;
1587 	pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
1588 	pci_id.subdevice_id = 0x3702;
1589 	ctrlr.quirks = nvme_get_quirks(&pci_id);
1590 	memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1591 
1592 	nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1593 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1594 	CU_ASSERT(res == true);
1595 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1596 	CU_ASSERT(res == false);
1597 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1598 	CU_ASSERT(res == true);
1599 	res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1600 	CU_ASSERT(res == false);
1601 }
1602 
1603 static void
1604 test_nvme_ctrlr_set_supported_features(void)
1605 {
1606 	bool	res;
1607 	struct spdk_nvme_ctrlr			ctrlr = {};
1608 
1609 	/* set a invalid vendor id */
1610 	ctrlr.cdata.vid = 0xFFFF;
1611 	nvme_ctrlr_set_supported_features(&ctrlr);
1612 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1613 	CU_ASSERT(res == true);
1614 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1615 	CU_ASSERT(res == false);
1616 
1617 	ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1618 	nvme_ctrlr_set_supported_features(&ctrlr);
1619 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1620 	CU_ASSERT(res == true);
1621 	res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1622 	CU_ASSERT(res == true);
1623 }
1624 
1625 static void
1626 test_ctrlr_get_default_ctrlr_opts(void)
1627 {
1628 	struct spdk_nvme_ctrlr_opts opts = {};
1629 
1630 	CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id,
1631 				  "e53e9258-c93b-48b5-be1a-f025af6d232a") == 0);
1632 
1633 	memset(&opts, 0, sizeof(opts));
1634 
1635 	/* set a smaller opts_size */
1636 	CU_ASSERT(sizeof(opts) > 8);
1637 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
1638 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1639 	CU_ASSERT_TRUE(opts.use_cmb_sqs);
1640 	/* check below fields are not initialized by default value */
1641 	CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
1642 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
1643 	CU_ASSERT_EQUAL(opts.io_queue_size, 0);
1644 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1645 	for (int i = 0; i < 8; i++) {
1646 		CU_ASSERT(opts.host_id[i] == 0);
1647 	}
1648 	for (int i = 0; i < 16; i++) {
1649 		CU_ASSERT(opts.extended_host_id[i] == 0);
1650 	}
1651 	CU_ASSERT(strlen(opts.hostnqn) == 0);
1652 	CU_ASSERT(strlen(opts.src_addr) == 0);
1653 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1654 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
1655 
1656 	/* set a consistent opts_size */
1657 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
1658 	CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
1659 	CU_ASSERT_TRUE(opts.use_cmb_sqs);
1660 	CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
1661 	CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
1662 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1663 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1664 	for (int i = 0; i < 8; i++) {
1665 		CU_ASSERT(opts.host_id[i] == 0);
1666 	}
1667 	CU_ASSERT_STRING_EQUAL(opts.hostnqn,
1668 			       "nqn.2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
1669 	CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
1670 			 sizeof(opts.extended_host_id)) == 0);
1671 	CU_ASSERT(strlen(opts.src_addr) == 0);
1672 	CU_ASSERT(strlen(opts.src_svcid) == 0);
1673 	CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
1674 }
1675 
1676 static void
1677 test_ctrlr_get_default_io_qpair_opts(void)
1678 {
1679 	struct spdk_nvme_ctrlr ctrlr = {};
1680 	struct spdk_nvme_io_qpair_opts opts = {};
1681 
1682 	memset(&opts, 0, sizeof(opts));
1683 
1684 	/* set a smaller opts_size */
1685 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
1686 	CU_ASSERT(sizeof(opts) > 8);
1687 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
1688 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
1689 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1690 	/* check below field is not initialized by default value */
1691 	CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
1692 
1693 	/* set a consistent opts_size */
1694 	ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
1695 	ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
1696 	spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
1697 	CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
1698 	CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
1699 	CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
1700 }
1701 
1702 #if 0 /* TODO: move to PCIe-specific unit test */
1703 static void
1704 test_nvme_ctrlr_alloc_cmb(void)
1705 {
1706 	int			rc;
1707 	uint64_t		offset;
1708 	struct spdk_nvme_ctrlr	ctrlr = {};
1709 
1710 	ctrlr.cmb_size = 0x1000000;
1711 	ctrlr.cmb_current_offset = 0x100;
1712 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
1713 	CU_ASSERT(rc == 0);
1714 	CU_ASSERT(offset == 0x1000);
1715 	CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
1716 
1717 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
1718 	CU_ASSERT(rc == 0);
1719 	CU_ASSERT(offset == 0x2000);
1720 	CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
1721 
1722 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
1723 	CU_ASSERT(rc == 0);
1724 	CU_ASSERT(offset == 0x100000);
1725 	CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
1726 
1727 	rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
1728 	CU_ASSERT(rc == -1);
1729 }
1730 #endif
1731 
1732 static void
1733 test_spdk_nvme_ctrlr_update_firmware(void)
1734 {
1735 	struct spdk_nvme_ctrlr ctrlr = {};
1736 	void *payload = NULL;
1737 	int point_payload = 1;
1738 	int slot = 0;
1739 	int ret = 0;
1740 	struct spdk_nvme_status status;
1741 	enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
1742 
1743 	/* Set invalid size check function return value */
1744 	set_size = 5;
1745 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1746 	CU_ASSERT(ret == -1);
1747 
1748 	/* When payload is NULL but set_size < min_page_size */
1749 	set_size = 4;
1750 	ctrlr.min_page_size = 5;
1751 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1752 	CU_ASSERT(ret == -1);
1753 
1754 	/* When payload not NULL but min_page_size is 0 */
1755 	set_size = 4;
1756 	ctrlr.min_page_size = 0;
1757 	payload = &point_payload;
1758 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1759 	CU_ASSERT(ret == -1);
1760 
1761 	/* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
1762 	set_status_cpl = 1;
1763 	set_size = 4;
1764 	ctrlr.min_page_size = 5;
1765 	payload = &point_payload;
1766 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1767 	CU_ASSERT(ret == -ENXIO);
1768 
1769 	/* Check firmware image download and set status.cpl value is 0 */
1770 	set_status_cpl = 0;
1771 	set_size = 4;
1772 	ctrlr.min_page_size = 5;
1773 	payload = &point_payload;
1774 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1775 	CU_ASSERT(ret == -1);
1776 
1777 	/* Check firmware commit */
1778 	ctrlr.is_resetting = false;
1779 	set_status_cpl = 0;
1780 	slot = 1;
1781 	set_size = 4;
1782 	ctrlr.min_page_size = 5;
1783 	payload = &point_payload;
1784 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1785 	CU_ASSERT(ret == -ENXIO);
1786 
1787 	/* Set size check firmware download and firmware commit */
1788 	ctrlr.is_resetting = true;
1789 	set_status_cpl = 0;
1790 	slot = 1;
1791 	set_size = 4;
1792 	ctrlr.min_page_size = 5;
1793 	payload = &point_payload;
1794 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1795 	CU_ASSERT(ret == 0);
1796 
1797 	/* nvme_wait_for_completion returns an error */
1798 	g_wait_for_completion_return_val = -1;
1799 	ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
1800 	CU_ASSERT(ret == -ENXIO);
1801 	CU_ASSERT(g_failed_status != NULL);
1802 	CU_ASSERT(g_failed_status->timed_out == true);
1803 	/* status should be freed by callback, which is not triggered in test env.
1804 	   Store status to global variable and free it manually.
1805 	   If spdk_nvme_ctrlr_update_firmware changes its behaviour and frees the status
1806 	   itself, we'll get a double free here.. */
1807 	free(g_failed_status);
1808 	g_failed_status = NULL;
1809 	g_wait_for_completion_return_val = 0;
1810 
1811 	set_status_cpl = 0;
1812 }
1813 
1814 int
1815 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
1816 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1817 {
1818 	fake_cpl_sc(cb_fn, cb_arg);
1819 	return 0;
1820 }
1821 
1822 static void
1823 test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
1824 {
1825 	struct spdk_nvme_ctrlr ctrlr = {};
1826 	int ret = -1;
1827 
1828 	ctrlr.cdata.oacs.doorbell_buffer_config = 1;
1829 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1830 	ctrlr.page_size = 0x1000;
1831 	MOCK_CLEAR(spdk_malloc);
1832 	MOCK_CLEAR(spdk_zmalloc);
1833 	ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
1834 	CU_ASSERT(ret == 0);
1835 	nvme_ctrlr_free_doorbell_buffer(&ctrlr);
1836 }
1837 
1838 static void
1839 test_nvme_ctrlr_test_active_ns(void)
1840 {
1841 	uint32_t		nsid, minor;
1842 	size_t			ns_id_count;
1843 	struct spdk_nvme_ctrlr	ctrlr = {.state = NVME_CTRLR_STATE_READY};
1844 
1845 	ctrlr.page_size = 0x1000;
1846 
1847 	for (minor = 0; minor <= 2; minor++) {
1848 		ctrlr.vs.bits.mjr = 1;
1849 		ctrlr.vs.bits.mnr = minor;
1850 		ctrlr.vs.bits.ter = 0;
1851 		ctrlr.num_ns = 1531;
1852 		nvme_ctrlr_identify_active_ns(&ctrlr);
1853 
1854 		for (nsid = 1; nsid <= ctrlr.num_ns; nsid++) {
1855 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
1856 		}
1857 		ctrlr.num_ns = 1559;
1858 		for (; nsid <= ctrlr.num_ns; nsid++) {
1859 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
1860 		}
1861 		ctrlr.num_ns = 1531;
1862 		for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
1863 			ctrlr.active_ns_list[nsid] = 0;
1864 		}
1865 		CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
1866 
1867 		ctrlr.active_ns_list[0] = 1;
1868 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
1869 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
1870 		nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
1871 		CU_ASSERT(nsid == 1);
1872 
1873 		ctrlr.active_ns_list[1] = 3;
1874 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
1875 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
1876 		CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
1877 		nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
1878 		CU_ASSERT(nsid == 3);
1879 		nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
1880 		CU_ASSERT(nsid == 0);
1881 
1882 		memset(ctrlr.active_ns_list, 0, ctrlr.num_ns);
1883 		for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
1884 			ctrlr.active_ns_list[nsid] = nsid + 1;
1885 		}
1886 
1887 		ns_id_count = 0;
1888 		for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
1889 		     nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
1890 			CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
1891 			ns_id_count++;
1892 		}
1893 		CU_ASSERT(ns_id_count == ctrlr.num_ns);
1894 
1895 		nvme_ctrlr_destruct(&ctrlr);
1896 	}
1897 }
1898 
1899 static void
1900 test_nvme_ctrlr_test_active_ns_error_case(void)
1901 {
1902 	int rc;
1903 	struct spdk_nvme_ctrlr	ctrlr = {.state = NVME_CTRLR_STATE_READY};
1904 
1905 	ctrlr.page_size = 0x1000;
1906 	ctrlr.vs.bits.mjr = 1;
1907 	ctrlr.vs.bits.mnr = 2;
1908 	ctrlr.vs.bits.ter = 0;
1909 	ctrlr.num_ns = 2;
1910 
1911 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
1912 	rc = nvme_ctrlr_identify_active_ns(&ctrlr);
1913 	CU_ASSERT(rc == -ENXIO);
1914 	set_status_code = SPDK_NVME_SC_SUCCESS;
1915 }
1916 
1917 static void
1918 test_nvme_ctrlr_init_delay(void)
1919 {
1920 	DECLARE_AND_CONSTRUCT_CTRLR();
1921 
1922 	memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1923 
1924 	/*
1925 	 * Initial state: CC.EN = 0, CSTS.RDY = 0
1926 	 * init() should set CC.EN = 1.
1927 	 */
1928 	g_ut_nvme_regs.cc.bits.en = 0;
1929 	g_ut_nvme_regs.csts.bits.rdy = 0;
1930 
1931 	SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1932 	/* Test that the initialization delay works correctly.  We only
1933 	 * do the initialization delay on SSDs that require it, so
1934 	 * set that quirk here.
1935 	 */
1936 	ctrlr.quirks = NVME_QUIRK_DELAY_BEFORE_INIT;
1937 	ctrlr.cdata.nn = 1;
1938 	ctrlr.page_size = 0x1000;
1939 	ctrlr.state = NVME_CTRLR_STATE_INIT_DELAY;
1940 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1941 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1942 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
1943 
1944 	/* delay 1s, just return as sleep time isn't enough */
1945 	spdk_delay_us(1 * spdk_get_ticks_hz());
1946 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1947 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1948 	CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
1949 
1950 	/* sleep timeout, start to initialize */
1951 	spdk_delay_us(2 * spdk_get_ticks_hz());
1952 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1953 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1954 
1955 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1956 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1957 
1958 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1959 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1960 	CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1961 
1962 	/*
1963 	 * Transition to CSTS.RDY = 1.
1964 	 */
1965 	g_ut_nvme_regs.csts.bits.rdy = 1;
1966 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1967 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
1968 
1969 	/*
1970 	 * Transition to READY.
1971 	 */
1972 	while (ctrlr.state != NVME_CTRLR_STATE_READY) {
1973 		nvme_ctrlr_process_init(&ctrlr);
1974 	}
1975 
1976 	g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1977 	nvme_ctrlr_destruct(&ctrlr);
1978 }
1979 
1980 static void
1981 test_spdk_nvme_ctrlr_set_trid(void)
1982 {
1983 	struct spdk_nvme_ctrlr	ctrlr = {0};
1984 	struct spdk_nvme_transport_id	new_trid = {{0}};
1985 
1986 	ctrlr.is_failed = false;
1987 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1988 	snprintf(ctrlr.trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
1989 	snprintf(ctrlr.trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
1990 	snprintf(ctrlr.trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
1991 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EPERM);
1992 
1993 	ctrlr.is_failed = true;
1994 	new_trid.trtype = SPDK_NVME_TRANSPORT_TCP;
1995 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
1996 	CU_ASSERT(ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA);
1997 
1998 	new_trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1999 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode2");
2000 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
2001 	CU_ASSERT(strncmp(ctrlr.trid.subnqn, "nqn.2016-06.io.spdk:cnode1", SPDK_NVMF_NQN_MAX_LEN) == 0);
2002 
2003 
2004 	snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
2005 	snprintf(new_trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
2006 	snprintf(new_trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4421");
2007 	CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == 0);
2008 	CU_ASSERT(strncmp(ctrlr.trid.traddr, "192.168.100.9", SPDK_NVMF_TRADDR_MAX_LEN) == 0);
2009 	CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
2010 }
2011 
2012 static void
2013 test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
2014 {
2015 	DECLARE_AND_CONSTRUCT_CTRLR();
2016 	/* equivalent of 4096 bytes */
2017 	ctrlr.cdata.nvmf_specific.ioccsz = 260;
2018 	ctrlr.cdata.nvmf_specific.icdoff = 1;
2019 
2020 	/* Check PCI trtype, */
2021 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
2022 
2023 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2024 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2025 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2026 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2027 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2028 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2029 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2030 
2031 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2032 	CU_ASSERT(ctrlr.icdoff == 0);
2033 
2034 	nvme_ctrlr_destruct(&ctrlr);
2035 
2036 	/* Check RDMA trtype, */
2037 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2038 
2039 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2040 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2041 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2042 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2043 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2044 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2045 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2046 
2047 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2048 	CU_ASSERT(ctrlr.icdoff == 1);
2049 	ctrlr.ioccsz_bytes = 0;
2050 	ctrlr.icdoff = 0;
2051 
2052 	nvme_ctrlr_destruct(&ctrlr);
2053 
2054 	/* Check TCP trtype, */
2055 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
2056 
2057 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2058 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2059 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2060 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2061 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2062 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2063 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2064 
2065 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2066 	CU_ASSERT(ctrlr.icdoff == 1);
2067 	ctrlr.ioccsz_bytes = 0;
2068 	ctrlr.icdoff = 0;
2069 
2070 	nvme_ctrlr_destruct(&ctrlr);
2071 
2072 	/* Check FC trtype, */
2073 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
2074 
2075 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2076 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2077 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2078 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2079 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2080 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2081 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2082 
2083 	CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
2084 	CU_ASSERT(ctrlr.icdoff == 1);
2085 	ctrlr.ioccsz_bytes = 0;
2086 	ctrlr.icdoff = 0;
2087 
2088 	nvme_ctrlr_destruct(&ctrlr);
2089 
2090 	/* Check CUSTOM trtype, */
2091 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
2092 
2093 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2094 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2095 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2096 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2097 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2098 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
2099 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2100 
2101 	CU_ASSERT(ctrlr.ioccsz_bytes == 0);
2102 	CU_ASSERT(ctrlr.icdoff == 0);
2103 
2104 	nvme_ctrlr_destruct(&ctrlr);
2105 }
2106 
2107 static void
2108 test_nvme_ctrlr_init_set_num_queues(void)
2109 {
2110 	DECLARE_AND_CONSTRUCT_CTRLR();
2111 
2112 	ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
2113 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_IDENTIFY_IOCS_SPECIFIC */
2114 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC);
2115 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_NUM_QUEUES */
2116 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
2117 
2118 	ctrlr.opts.num_io_queues = 64;
2119 	/* Num queues is zero-based. So, use 31 to get 32 queues */
2120 	fake_cpl.cdw0 = 31 + (31 << 16);
2121 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> CONSTRUCT_NS */
2122 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
2123 	CU_ASSERT(ctrlr.opts.num_io_queues == 32);
2124 	fake_cpl.cdw0 = 0;
2125 
2126 	nvme_ctrlr_destruct(&ctrlr);
2127 }
2128 
2129 static void
2130 test_nvme_ctrlr_init_set_keep_alive_timeout(void)
2131 {
2132 	DECLARE_AND_CONSTRUCT_CTRLR();
2133 
2134 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2135 	ctrlr.cdata.kas = 1;
2136 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2137 	fake_cpl.cdw0 = 120000;
2138 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
2139 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
2140 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 120000);
2141 	fake_cpl.cdw0 = 0;
2142 
2143 	/* Target does not support Get Feature "Keep Alive Timer" */
2144 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2145 	ctrlr.cdata.kas = 1;
2146 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2147 	set_status_code = SPDK_NVME_SC_INVALID_FIELD;
2148 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
2149 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
2150 	CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 60000);
2151 	set_status_code = SPDK_NVME_SC_SUCCESS;
2152 
2153 	/* Target fails Get Feature "Keep Alive Timer" for another reason */
2154 	ctrlr.opts.keep_alive_timeout_ms = 60000;
2155 	ctrlr.cdata.kas = 1;
2156 	ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
2157 	set_status_code = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2158 	CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> ERROR */
2159 	CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
2160 	set_status_code = SPDK_NVME_SC_SUCCESS;
2161 
2162 	nvme_ctrlr_destruct(&ctrlr);
2163 }
2164 
2165 int main(int argc, char **argv)
2166 {
2167 	CU_pSuite	suite = NULL;
2168 	unsigned int	num_failures;
2169 
2170 	CU_set_error_action(CUEA_ABORT);
2171 	CU_initialize_registry();
2172 
2173 	suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
2174 
2175 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_0);
2176 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_1);
2177 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0);
2178 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_1);
2179 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_rr);
2180 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr);
2181 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_vs);
2182 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_delay);
2183 	CU_ADD_TEST(suite, test_alloc_io_qpair_rr_1);
2184 	CU_ADD_TEST(suite, test_ctrlr_get_default_ctrlr_opts);
2185 	CU_ADD_TEST(suite, test_ctrlr_get_default_io_qpair_opts);
2186 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_1);
2187 	CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_2);
2188 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_update_firmware);
2189 	CU_ADD_TEST(suite, test_nvme_ctrlr_fail);
2190 	CU_ADD_TEST(suite, test_nvme_ctrlr_construct_intel_support_log_page_list);
2191 	CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_features);
2192 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_doorbell_buffer_config);
2193 #if 0 /* TODO: move to PCIe-specific unit test */
2194 	CU_ADD_TEST(suite, test_nvme_ctrlr_alloc_cmb);
2195 #endif
2196 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns);
2197 	CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns_error_case);
2198 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_reconnect_io_qpair);
2199 	CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_set_trid);
2200 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_nvmf_ioccsz);
2201 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_num_queues);
2202 	CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_keep_alive_timeout);
2203 
2204 	CU_basic_set_mode(CU_BRM_VERBOSE);
2205 	CU_basic_run_tests();
2206 	num_failures = CU_get_number_of_failures();
2207 	CU_cleanup_registry();
2208 	return num_failures;
2209 }
2210