xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision 78ecd30d8e4650007c80a053011116b85f3f17ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "spdk_internal/mock.h"
38 #include "thread/thread_internal.h"
39 
40 #include "common/lib/ut_multithread.c"
41 #include "nvmf/ctrlr.c"
42 
43 SPDK_LOG_REGISTER_COMPONENT(nvmf)
44 
45 struct spdk_bdev {
46 	int ut_mock;
47 	uint64_t blockcnt;
48 };
49 
50 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
51 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
52 
53 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
54 	    struct spdk_nvmf_subsystem *,
55 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
56 	    NULL);
57 
58 DEFINE_STUB(spdk_nvmf_poll_group_create,
59 	    struct spdk_nvmf_poll_group *,
60 	    (struct spdk_nvmf_tgt *tgt),
61 	    NULL);
62 
63 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
64 	    const char *,
65 	    (const struct spdk_nvmf_subsystem *subsystem),
66 	    subsystem_default_sn);
67 
68 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
69 	    const char *,
70 	    (const struct spdk_nvmf_subsystem *subsystem),
71 	    subsystem_default_mn);
72 
73 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
74 	    bool,
75 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
76 	    true);
77 
78 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
79 	    int,
80 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
81 	    0);
82 
83 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
84 	    struct spdk_nvmf_ctrlr *,
85 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
86 	    NULL);
87 
88 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
89 	    bool,
90 	    (struct spdk_nvmf_ctrlr *ctrlr),
91 	    false);
92 
93 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
94 	    bool,
95 	    (struct spdk_nvmf_ctrlr *ctrlr),
96 	    false);
97 
98 DEFINE_STUB_V(nvmf_get_discovery_log_page,
99 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
100 	       uint32_t iovcnt, uint64_t offset, uint32_t length));
101 
102 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
103 	    int,
104 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
105 	    0);
106 
107 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
108 	    bool,
109 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
110 	    true);
111 
112 DEFINE_STUB(nvmf_subsystem_find_listener,
113 	    struct spdk_nvmf_subsystem_listener *,
114 	    (struct spdk_nvmf_subsystem *subsystem,
115 	     const struct spdk_nvme_transport_id *trid),
116 	    (void *)0x1);
117 
118 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
119 	    int,
120 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
121 	     struct spdk_nvmf_request *req),
122 	    0);
123 
124 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *req),
134 	    0);
135 
136 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
140 	    0);
141 
142 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *req),
146 	    0);
147 
148 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
161 	    int,
162 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct spdk_nvmf_request *req),
164 	    0);
165 
166 DEFINE_STUB(nvmf_transport_req_complete,
167 	    int,
168 	    (struct spdk_nvmf_request *req),
169 	    0);
170 
171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
172 
173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
174 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
175 	     struct spdk_dif_ctx *dif_ctx),
176 	    true);
177 
178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
179 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
180 
181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
183 
184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
185 		struct spdk_nvmf_ctrlr *ctrlr));
186 
187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
188 	    int,
189 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
190 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
191 	    0);
192 
193 DEFINE_STUB(nvmf_transport_req_free,
194 	    int,
195 	    (struct spdk_nvmf_request *req),
196 	    0);
197 
198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
199 	    int,
200 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
201 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
202 	    0);
203 
204 int
205 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
206 {
207 	return 0;
208 }
209 
210 void
211 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
212 			    bool dif_insert_or_strip)
213 {
214 	uint64_t num_blocks;
215 
216 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
217 	num_blocks = ns->bdev->blockcnt;
218 	nsdata->nsze = num_blocks;
219 	nsdata->ncap = num_blocks;
220 	nsdata->nuse = num_blocks;
221 	nsdata->nlbaf = 0;
222 	nsdata->flbas.format = 0;
223 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
224 }
225 
226 struct spdk_nvmf_ns *
227 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
228 {
229 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
230 	return subsystem->ns[0];
231 }
232 
233 struct spdk_nvmf_ns *
234 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
235 				struct spdk_nvmf_ns *prev_ns)
236 {
237 	uint32_t nsid;
238 
239 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
240 	nsid = prev_ns->nsid;
241 
242 	if (nsid >= subsystem->max_nsid) {
243 		return NULL;
244 	}
245 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
246 		if (subsystem->ns[nsid - 1]) {
247 			return subsystem->ns[nsid - 1];
248 		}
249 	}
250 	return NULL;
251 }
252 
253 static void
254 test_get_log_page(void)
255 {
256 	struct spdk_nvmf_subsystem subsystem = {};
257 	struct spdk_nvmf_request req = {};
258 	struct spdk_nvmf_qpair qpair = {};
259 	struct spdk_nvmf_ctrlr ctrlr = {};
260 	union nvmf_h2c_msg cmd = {};
261 	union nvmf_c2h_msg rsp = {};
262 	char data[4096];
263 
264 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
265 
266 	ctrlr.subsys = &subsystem;
267 
268 	qpair.ctrlr = &ctrlr;
269 
270 	req.qpair = &qpair;
271 	req.cmd = &cmd;
272 	req.rsp = &rsp;
273 	req.data = &data;
274 	req.length = sizeof(data);
275 
276 	/* Get Log Page - all valid */
277 	memset(&cmd, 0, sizeof(cmd));
278 	memset(&rsp, 0, sizeof(rsp));
279 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
280 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
281 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
282 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
283 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
284 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
285 
286 	/* Get Log Page with invalid log ID */
287 	memset(&cmd, 0, sizeof(cmd));
288 	memset(&rsp, 0, sizeof(rsp));
289 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
290 	cmd.nvme_cmd.cdw10 = 0;
291 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
292 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
293 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
294 
295 	/* Get Log Page with invalid offset (not dword aligned) */
296 	memset(&cmd, 0, sizeof(cmd));
297 	memset(&rsp, 0, sizeof(rsp));
298 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
299 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
300 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
301 	cmd.nvme_cmd.cdw12 = 2;
302 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
303 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
304 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
305 
306 	/* Get Log Page without data buffer */
307 	memset(&cmd, 0, sizeof(cmd));
308 	memset(&rsp, 0, sizeof(rsp));
309 	req.data = NULL;
310 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
311 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
312 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
313 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
314 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
315 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
316 	req.data = data;
317 }
318 
319 static void
320 test_process_fabrics_cmd(void)
321 {
322 	struct	spdk_nvmf_request req = {};
323 	int	ret;
324 	struct	spdk_nvmf_qpair req_qpair = {};
325 	union	nvmf_h2c_msg  req_cmd = {};
326 	union	nvmf_c2h_msg   req_rsp = {};
327 
328 	req.qpair = &req_qpair;
329 	req.cmd  = &req_cmd;
330 	req.rsp  = &req_rsp;
331 	req.qpair->ctrlr = NULL;
332 
333 	/* No ctrlr and invalid command check */
334 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
335 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
336 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
337 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
338 }
339 
340 static bool
341 nvme_status_success(const struct spdk_nvme_status *status)
342 {
343 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
344 }
345 
346 static void
347 test_connect(void)
348 {
349 	struct spdk_nvmf_fabric_connect_data connect_data;
350 	struct spdk_nvmf_poll_group group;
351 	struct spdk_nvmf_subsystem_poll_group *sgroups;
352 	struct spdk_nvmf_transport transport;
353 	struct spdk_nvmf_transport_ops tops = {};
354 	struct spdk_nvmf_subsystem subsystem;
355 	struct spdk_nvmf_request req;
356 	struct spdk_nvmf_qpair admin_qpair;
357 	struct spdk_nvmf_qpair qpair;
358 	struct spdk_nvmf_qpair qpair2;
359 	struct spdk_nvmf_ctrlr ctrlr;
360 	struct spdk_nvmf_tgt tgt;
361 	union nvmf_h2c_msg cmd;
362 	union nvmf_c2h_msg rsp;
363 	const uint8_t hostid[16] = {
364 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
365 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
366 	};
367 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
368 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
369 	int rc;
370 
371 	memset(&group, 0, sizeof(group));
372 	group.thread = spdk_get_thread();
373 
374 	memset(&ctrlr, 0, sizeof(ctrlr));
375 	ctrlr.subsys = &subsystem;
376 	ctrlr.qpair_mask = spdk_bit_array_create(3);
377 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
378 	ctrlr.vcprop.cc.bits.en = 1;
379 	ctrlr.vcprop.cc.bits.iosqes = 6;
380 	ctrlr.vcprop.cc.bits.iocqes = 4;
381 
382 	memset(&admin_qpair, 0, sizeof(admin_qpair));
383 	admin_qpair.group = &group;
384 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
385 
386 	memset(&tgt, 0, sizeof(tgt));
387 	memset(&transport, 0, sizeof(transport));
388 	transport.ops = &tops;
389 	transport.opts.max_aq_depth = 32;
390 	transport.opts.max_queue_depth = 64;
391 	transport.opts.max_qpairs_per_ctrlr = 3;
392 	transport.tgt = &tgt;
393 
394 	memset(&qpair, 0, sizeof(qpair));
395 	qpair.transport = &transport;
396 	qpair.group = &group;
397 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
398 	TAILQ_INIT(&qpair.outstanding);
399 
400 	memset(&connect_data, 0, sizeof(connect_data));
401 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
402 	connect_data.cntlid = 0xFFFF;
403 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
404 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
405 
406 	memset(&subsystem, 0, sizeof(subsystem));
407 	subsystem.thread = spdk_get_thread();
408 	subsystem.id = 1;
409 	TAILQ_INIT(&subsystem.ctrlrs);
410 	subsystem.tgt = &tgt;
411 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
412 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
413 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
414 
415 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
416 	group.sgroups = sgroups;
417 
418 	memset(&cmd, 0, sizeof(cmd));
419 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
420 	cmd.connect_cmd.cid = 1;
421 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
422 	cmd.connect_cmd.recfmt = 0;
423 	cmd.connect_cmd.qid = 0;
424 	cmd.connect_cmd.sqsize = 31;
425 	cmd.connect_cmd.cattr = 0;
426 	cmd.connect_cmd.kato = 120000;
427 
428 	memset(&req, 0, sizeof(req));
429 	req.qpair = &qpair;
430 	req.length = sizeof(connect_data);
431 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
432 	req.data = &connect_data;
433 	req.cmd = &cmd;
434 	req.rsp = &rsp;
435 
436 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
437 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
438 
439 	/* Valid admin connect command */
440 	memset(&rsp, 0, sizeof(rsp));
441 	sgroups[subsystem.id].mgmt_io_outstanding++;
442 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
443 	rc = nvmf_ctrlr_cmd_connect(&req);
444 	poll_threads();
445 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
446 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
447 	CU_ASSERT(qpair.ctrlr != NULL);
448 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
449 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
450 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
451 	free(qpair.ctrlr);
452 	qpair.ctrlr = NULL;
453 
454 	/* Valid admin connect command with kato = 0 */
455 	cmd.connect_cmd.kato = 0;
456 	memset(&rsp, 0, sizeof(rsp));
457 	sgroups[subsystem.id].mgmt_io_outstanding++;
458 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
459 	rc = nvmf_ctrlr_cmd_connect(&req);
460 	poll_threads();
461 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
462 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
463 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
464 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
465 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
466 	free(qpair.ctrlr);
467 	qpair.ctrlr = NULL;
468 	cmd.connect_cmd.kato = 120000;
469 
470 	/* Invalid data length */
471 	memset(&rsp, 0, sizeof(rsp));
472 	req.length = sizeof(connect_data) - 1;
473 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
474 	rc = nvmf_ctrlr_cmd_connect(&req);
475 	poll_threads();
476 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
477 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
478 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
479 	CU_ASSERT(qpair.ctrlr == NULL);
480 	req.length = sizeof(connect_data);
481 
482 	/* Invalid recfmt */
483 	memset(&rsp, 0, sizeof(rsp));
484 	cmd.connect_cmd.recfmt = 1234;
485 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
486 	rc = nvmf_ctrlr_cmd_connect(&req);
487 	poll_threads();
488 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
489 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
490 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
491 	CU_ASSERT(qpair.ctrlr == NULL);
492 	cmd.connect_cmd.recfmt = 0;
493 
494 	/* Subsystem not found */
495 	memset(&rsp, 0, sizeof(rsp));
496 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
497 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
498 	rc = nvmf_ctrlr_cmd_connect(&req);
499 	poll_threads();
500 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
501 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
502 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
503 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
504 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
505 	CU_ASSERT(qpair.ctrlr == NULL);
506 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
507 
508 	/* Unterminated hostnqn */
509 	memset(&rsp, 0, sizeof(rsp));
510 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
511 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
512 	rc = nvmf_ctrlr_cmd_connect(&req);
513 	poll_threads();
514 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
515 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
516 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
517 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
518 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
519 	CU_ASSERT(qpair.ctrlr == NULL);
520 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
521 
522 	/* Host not allowed */
523 	memset(&rsp, 0, sizeof(rsp));
524 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
525 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
526 	rc = nvmf_ctrlr_cmd_connect(&req);
527 	poll_threads();
528 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
529 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
530 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
531 	CU_ASSERT(qpair.ctrlr == NULL);
532 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
533 
534 	/* Invalid sqsize == 0 */
535 	memset(&rsp, 0, sizeof(rsp));
536 	cmd.connect_cmd.sqsize = 0;
537 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
538 	rc = nvmf_ctrlr_cmd_connect(&req);
539 	poll_threads();
540 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
541 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
542 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
543 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
544 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
545 	CU_ASSERT(qpair.ctrlr == NULL);
546 	cmd.connect_cmd.sqsize = 31;
547 
548 	/* Invalid admin sqsize > max_aq_depth */
549 	memset(&rsp, 0, sizeof(rsp));
550 	cmd.connect_cmd.sqsize = 32;
551 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
552 	rc = nvmf_ctrlr_cmd_connect(&req);
553 	poll_threads();
554 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
555 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
556 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
557 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
558 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
559 	CU_ASSERT(qpair.ctrlr == NULL);
560 	cmd.connect_cmd.sqsize = 31;
561 
562 	/* Invalid I/O sqsize > max_queue_depth */
563 	memset(&rsp, 0, sizeof(rsp));
564 	cmd.connect_cmd.qid = 1;
565 	cmd.connect_cmd.sqsize = 64;
566 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
567 	rc = nvmf_ctrlr_cmd_connect(&req);
568 	poll_threads();
569 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
570 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
571 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
572 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
573 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
574 	CU_ASSERT(qpair.ctrlr == NULL);
575 	cmd.connect_cmd.qid = 0;
576 	cmd.connect_cmd.sqsize = 31;
577 
578 	/* Invalid cntlid for admin queue */
579 	memset(&rsp, 0, sizeof(rsp));
580 	connect_data.cntlid = 0x1234;
581 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
582 	rc = nvmf_ctrlr_cmd_connect(&req);
583 	poll_threads();
584 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
585 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
586 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
587 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
588 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
589 	CU_ASSERT(qpair.ctrlr == NULL);
590 	connect_data.cntlid = 0xFFFF;
591 
592 	ctrlr.admin_qpair = &admin_qpair;
593 	ctrlr.subsys = &subsystem;
594 
595 	/* Valid I/O queue connect command */
596 	memset(&rsp, 0, sizeof(rsp));
597 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
598 	cmd.connect_cmd.qid = 1;
599 	cmd.connect_cmd.sqsize = 63;
600 	sgroups[subsystem.id].mgmt_io_outstanding++;
601 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
602 	rc = nvmf_ctrlr_cmd_connect(&req);
603 	poll_threads();
604 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
605 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
606 	CU_ASSERT(qpair.ctrlr == &ctrlr);
607 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
608 	qpair.ctrlr = NULL;
609 	cmd.connect_cmd.sqsize = 31;
610 
611 	/* Non-existent controller */
612 	memset(&rsp, 0, sizeof(rsp));
613 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
614 	sgroups[subsystem.id].mgmt_io_outstanding++;
615 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
616 	rc = nvmf_ctrlr_cmd_connect(&req);
617 	poll_threads();
618 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
619 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
620 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
621 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
622 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
623 	CU_ASSERT(qpair.ctrlr == NULL);
624 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
625 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
626 
627 	/* I/O connect to discovery controller */
628 	memset(&rsp, 0, sizeof(rsp));
629 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
630 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
631 	sgroups[subsystem.id].mgmt_io_outstanding++;
632 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
633 	rc = nvmf_ctrlr_cmd_connect(&req);
634 	poll_threads();
635 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
636 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
637 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
638 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
639 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
640 	CU_ASSERT(qpair.ctrlr == NULL);
641 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
642 
643 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
644 	cmd.connect_cmd.qid = 0;
645 	cmd.connect_cmd.kato = 120000;
646 	memset(&rsp, 0, sizeof(rsp));
647 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
648 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
649 	sgroups[subsystem.id].mgmt_io_outstanding++;
650 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
651 	rc = nvmf_ctrlr_cmd_connect(&req);
652 	poll_threads();
653 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
654 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
655 	CU_ASSERT(qpair.ctrlr != NULL);
656 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
657 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
658 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
659 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
660 	free(qpair.ctrlr);
661 	qpair.ctrlr = NULL;
662 
663 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
664 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
665 	 */
666 	cmd.connect_cmd.kato = 0;
667 	memset(&rsp, 0, sizeof(rsp));
668 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
669 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
670 	sgroups[subsystem.id].mgmt_io_outstanding++;
671 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
672 	rc = nvmf_ctrlr_cmd_connect(&req);
673 	poll_threads();
674 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
675 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
676 	CU_ASSERT(qpair.ctrlr != NULL);
677 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
678 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
679 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
680 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
681 	free(qpair.ctrlr);
682 	qpair.ctrlr = NULL;
683 	cmd.connect_cmd.qid = 1;
684 	cmd.connect_cmd.kato = 120000;
685 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
686 
687 	/* I/O connect to disabled controller */
688 	memset(&rsp, 0, sizeof(rsp));
689 	ctrlr.vcprop.cc.bits.en = 0;
690 	sgroups[subsystem.id].mgmt_io_outstanding++;
691 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
692 	rc = nvmf_ctrlr_cmd_connect(&req);
693 	poll_threads();
694 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
695 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
696 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
697 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
698 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
699 	CU_ASSERT(qpair.ctrlr == NULL);
700 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
701 	ctrlr.vcprop.cc.bits.en = 1;
702 
703 	/* I/O connect with invalid IOSQES */
704 	memset(&rsp, 0, sizeof(rsp));
705 	ctrlr.vcprop.cc.bits.iosqes = 3;
706 	sgroups[subsystem.id].mgmt_io_outstanding++;
707 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
708 	rc = nvmf_ctrlr_cmd_connect(&req);
709 	poll_threads();
710 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
711 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
712 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
713 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
714 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
715 	CU_ASSERT(qpair.ctrlr == NULL);
716 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
717 	ctrlr.vcprop.cc.bits.iosqes = 6;
718 
719 	/* I/O connect with invalid IOCQES */
720 	memset(&rsp, 0, sizeof(rsp));
721 	ctrlr.vcprop.cc.bits.iocqes = 3;
722 	sgroups[subsystem.id].mgmt_io_outstanding++;
723 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
724 	rc = nvmf_ctrlr_cmd_connect(&req);
725 	poll_threads();
726 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
727 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
728 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
729 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
730 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
731 	CU_ASSERT(qpair.ctrlr == NULL);
732 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
733 	ctrlr.vcprop.cc.bits.iocqes = 4;
734 
735 	/* I/O connect with too many existing qpairs */
736 	memset(&rsp, 0, sizeof(rsp));
737 	spdk_bit_array_set(ctrlr.qpair_mask, 0);
738 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
739 	spdk_bit_array_set(ctrlr.qpair_mask, 2);
740 	sgroups[subsystem.id].mgmt_io_outstanding++;
741 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
742 	rc = nvmf_ctrlr_cmd_connect(&req);
743 	poll_threads();
744 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
745 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
746 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
747 	CU_ASSERT(qpair.ctrlr == NULL);
748 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
749 	spdk_bit_array_clear(ctrlr.qpair_mask, 0);
750 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
751 	spdk_bit_array_clear(ctrlr.qpair_mask, 2);
752 
753 	/* I/O connect with duplicate queue ID */
754 	memset(&rsp, 0, sizeof(rsp));
755 	memset(&qpair2, 0, sizeof(qpair2));
756 	qpair2.group = &group;
757 	qpair2.qid = 1;
758 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
759 	cmd.connect_cmd.qid = 1;
760 	sgroups[subsystem.id].mgmt_io_outstanding++;
761 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
762 	rc = nvmf_ctrlr_cmd_connect(&req);
763 	poll_threads();
764 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
765 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
766 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
767 	CU_ASSERT(qpair.ctrlr == NULL);
768 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
769 
770 	/* I/O connect when admin qpair is being destroyed */
771 	admin_qpair.group = NULL;
772 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
773 	memset(&rsp, 0, sizeof(rsp));
774 	sgroups[subsystem.id].mgmt_io_outstanding++;
775 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
776 	rc = nvmf_ctrlr_cmd_connect(&req);
777 	poll_threads();
778 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
779 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
780 	CU_ASSERT(qpair.ctrlr == NULL);
781 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
782 	admin_qpair.group = &group;
783 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
784 
785 	/* Clean up globals */
786 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
787 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
788 
789 	spdk_bit_array_free(&ctrlr.qpair_mask);
790 	free(sgroups);
791 }
792 
793 static void
794 test_get_ns_id_desc_list(void)
795 {
796 	struct spdk_nvmf_subsystem subsystem;
797 	struct spdk_nvmf_qpair qpair;
798 	struct spdk_nvmf_ctrlr ctrlr;
799 	struct spdk_nvmf_request req;
800 	struct spdk_nvmf_ns *ns_ptrs[1];
801 	struct spdk_nvmf_ns ns;
802 	union nvmf_h2c_msg cmd;
803 	union nvmf_c2h_msg rsp;
804 	struct spdk_bdev bdev;
805 	uint8_t buf[4096];
806 
807 	memset(&subsystem, 0, sizeof(subsystem));
808 	ns_ptrs[0] = &ns;
809 	subsystem.ns = ns_ptrs;
810 	subsystem.max_nsid = 1;
811 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
812 
813 	memset(&ns, 0, sizeof(ns));
814 	ns.opts.nsid = 1;
815 	ns.bdev = &bdev;
816 
817 	memset(&qpair, 0, sizeof(qpair));
818 	qpair.ctrlr = &ctrlr;
819 
820 	memset(&ctrlr, 0, sizeof(ctrlr));
821 	ctrlr.subsys = &subsystem;
822 	ctrlr.vcprop.cc.bits.en = 1;
823 
824 	memset(&req, 0, sizeof(req));
825 	req.qpair = &qpair;
826 	req.cmd = &cmd;
827 	req.rsp = &rsp;
828 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
829 	req.data = buf;
830 	req.length = sizeof(buf);
831 
832 	memset(&cmd, 0, sizeof(cmd));
833 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
834 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
835 
836 	/* Invalid NSID */
837 	cmd.nvme_cmd.nsid = 0;
838 	memset(&rsp, 0, sizeof(rsp));
839 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
840 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
841 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
842 
843 	/* Valid NSID, but ns has no IDs defined */
844 	cmd.nvme_cmd.nsid = 1;
845 	memset(&rsp, 0, sizeof(rsp));
846 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
847 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
848 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
849 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
850 
851 	/* Valid NSID, only EUI64 defined */
852 	ns.opts.eui64[0] = 0x11;
853 	ns.opts.eui64[7] = 0xFF;
854 	memset(&rsp, 0, sizeof(rsp));
855 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
856 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
857 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
858 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
859 	CU_ASSERT(buf[1] == 8);
860 	CU_ASSERT(buf[4] == 0x11);
861 	CU_ASSERT(buf[11] == 0xFF);
862 	CU_ASSERT(buf[13] == 0);
863 
864 	/* Valid NSID, only NGUID defined */
865 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
866 	ns.opts.nguid[0] = 0x22;
867 	ns.opts.nguid[15] = 0xEE;
868 	memset(&rsp, 0, sizeof(rsp));
869 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
870 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
871 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
872 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
873 	CU_ASSERT(buf[1] == 16);
874 	CU_ASSERT(buf[4] == 0x22);
875 	CU_ASSERT(buf[19] == 0xEE);
876 	CU_ASSERT(buf[21] == 0);
877 
878 	/* Valid NSID, both EUI64 and NGUID defined */
879 	ns.opts.eui64[0] = 0x11;
880 	ns.opts.eui64[7] = 0xFF;
881 	ns.opts.nguid[0] = 0x22;
882 	ns.opts.nguid[15] = 0xEE;
883 	memset(&rsp, 0, sizeof(rsp));
884 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
885 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
886 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
887 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
888 	CU_ASSERT(buf[1] == 8);
889 	CU_ASSERT(buf[4] == 0x11);
890 	CU_ASSERT(buf[11] == 0xFF);
891 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
892 	CU_ASSERT(buf[13] == 16);
893 	CU_ASSERT(buf[16] == 0x22);
894 	CU_ASSERT(buf[31] == 0xEE);
895 	CU_ASSERT(buf[33] == 0);
896 
897 	/* Valid NSID, EUI64, NGUID, and UUID defined */
898 	ns.opts.eui64[0] = 0x11;
899 	ns.opts.eui64[7] = 0xFF;
900 	ns.opts.nguid[0] = 0x22;
901 	ns.opts.nguid[15] = 0xEE;
902 	ns.opts.uuid.u.raw[0] = 0x33;
903 	ns.opts.uuid.u.raw[15] = 0xDD;
904 	memset(&rsp, 0, sizeof(rsp));
905 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
906 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
907 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
908 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
909 	CU_ASSERT(buf[1] == 8);
910 	CU_ASSERT(buf[4] == 0x11);
911 	CU_ASSERT(buf[11] == 0xFF);
912 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
913 	CU_ASSERT(buf[13] == 16);
914 	CU_ASSERT(buf[16] == 0x22);
915 	CU_ASSERT(buf[31] == 0xEE);
916 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
917 	CU_ASSERT(buf[33] == 16);
918 	CU_ASSERT(buf[36] == 0x33);
919 	CU_ASSERT(buf[51] == 0xDD);
920 	CU_ASSERT(buf[53] == 0);
921 }
922 
923 static void
924 test_identify_ns(void)
925 {
926 	struct spdk_nvmf_subsystem subsystem = {};
927 	struct spdk_nvmf_transport transport = {};
928 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
929 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
930 	struct spdk_nvme_cmd cmd = {};
931 	struct spdk_nvme_cpl rsp = {};
932 	struct spdk_nvme_ns_data nsdata = {};
933 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
934 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
935 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
936 
937 	subsystem.ns = ns_arr;
938 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
939 
940 	/* Invalid NSID 0 */
941 	cmd.nsid = 0;
942 	memset(&nsdata, 0, sizeof(nsdata));
943 	memset(&rsp, 0, sizeof(rsp));
944 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
945 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
946 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
947 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
948 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
949 
950 	/* Valid NSID 1 */
951 	cmd.nsid = 1;
952 	memset(&nsdata, 0, sizeof(nsdata));
953 	memset(&rsp, 0, sizeof(rsp));
954 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
955 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
956 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
957 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
958 	CU_ASSERT(nsdata.nsze == 1234);
959 
960 	/* Valid but inactive NSID 2 */
961 	cmd.nsid = 2;
962 	memset(&nsdata, 0, sizeof(nsdata));
963 	memset(&rsp, 0, sizeof(rsp));
964 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
965 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
966 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
967 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
968 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
969 
970 	/* Valid NSID 3 */
971 	cmd.nsid = 3;
972 	memset(&nsdata, 0, sizeof(nsdata));
973 	memset(&rsp, 0, sizeof(rsp));
974 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
975 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
976 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
977 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
978 	CU_ASSERT(nsdata.nsze == 5678);
979 
980 	/* Invalid NSID 4 */
981 	cmd.nsid = 4;
982 	memset(&nsdata, 0, sizeof(nsdata));
983 	memset(&rsp, 0, sizeof(rsp));
984 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
985 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
986 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
987 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
988 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
989 
990 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
991 	cmd.nsid = 0xFFFFFFFF;
992 	memset(&nsdata, 0, sizeof(nsdata));
993 	memset(&rsp, 0, sizeof(rsp));
994 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
995 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
996 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
997 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
998 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
999 }
1000 
1001 static void
1002 test_set_get_features(void)
1003 {
1004 	struct spdk_nvmf_subsystem subsystem = {};
1005 	struct spdk_nvmf_qpair admin_qpair = {};
1006 	struct spdk_nvmf_subsystem_listener listener = {};
1007 	struct spdk_nvmf_ctrlr ctrlr = {
1008 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1009 	};
1010 	union nvmf_h2c_msg cmd = {};
1011 	union nvmf_c2h_msg rsp = {};
1012 	struct spdk_nvmf_ns ns[3];
1013 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};;
1014 	struct spdk_nvmf_request req;
1015 	int rc;
1016 
1017 	subsystem.ns = ns_arr;
1018 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1019 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1020 	admin_qpair.ctrlr = &ctrlr;
1021 	req.qpair = &admin_qpair;
1022 	cmd.nvme_cmd.nsid = 1;
1023 	req.cmd = &cmd;
1024 	req.rsp = &rsp;
1025 
1026 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1027 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1028 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1029 	ns[0].ptpl_file = "testcfg";
1030 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1031 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1032 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1033 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1034 	CU_ASSERT(ns[0].ptpl_activated == true);
1035 
1036 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1037 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1038 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1039 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1040 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1041 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1042 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1043 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1044 
1045 
1046 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1047 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1048 	cmd.nvme_cmd.cdw11 = 0x42;
1049 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1050 
1051 	rc = nvmf_ctrlr_get_features(&req);
1052 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1053 
1054 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1055 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1056 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1057 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1058 
1059 	rc = nvmf_ctrlr_get_features(&req);
1060 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1061 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1062 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1063 
1064 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1065 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1066 	cmd.nvme_cmd.cdw11 = 0x42;
1067 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1068 
1069 	rc = nvmf_ctrlr_set_features(&req);
1070 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1071 
1072 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1073 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1074 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1075 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1076 
1077 	rc = nvmf_ctrlr_set_features(&req);
1078 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1079 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1080 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1081 
1082 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1083 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1084 	cmd.nvme_cmd.cdw11 = 0x42;
1085 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1086 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1087 
1088 	rc = nvmf_ctrlr_set_features(&req);
1089 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1090 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1091 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1092 
1093 
1094 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1095 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1096 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1097 
1098 	rc = nvmf_ctrlr_get_features(&req);
1099 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1100 
1101 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1102 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1103 	cmd.nvme_cmd.cdw11 = 0x42;
1104 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1105 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1106 
1107 	rc = nvmf_ctrlr_set_features(&req);
1108 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1109 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1110 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1111 
1112 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1113 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1114 	cmd.nvme_cmd.cdw11 = 0x42;
1115 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1116 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1117 
1118 	rc = nvmf_ctrlr_set_features(&req);
1119 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1120 }
1121 
1122 /*
1123  * Reservation Unit Test Configuration
1124  *       --------             --------    --------
1125  *      | Host A |           | Host B |  | Host C |
1126  *       --------             --------    --------
1127  *      /        \               |           |
1128  *  --------   --------       -------     -------
1129  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1130  *  --------   --------       -------     -------
1131  *    \           \              /           /
1132  *     \           \            /           /
1133  *      \           \          /           /
1134  *      --------------------------------------
1135  *     |            NAMESPACE 1               |
1136  *      --------------------------------------
1137  */
1138 
1139 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1140 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1141 
1142 static void
1143 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1144 {
1145 	/* Host A has two controllers */
1146 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1147 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1148 
1149 	/* Host B has 1 controller */
1150 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1151 
1152 	/* Host C has 1 controller */
1153 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1154 
1155 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1156 	g_ns_info.rtype = rtype;
1157 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1158 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1159 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1160 }
1161 
1162 static void
1163 test_reservation_write_exclusive(void)
1164 {
1165 	struct spdk_nvmf_request req = {};
1166 	union nvmf_h2c_msg cmd = {};
1167 	union nvmf_c2h_msg rsp = {};
1168 	int rc;
1169 
1170 	req.cmd = &cmd;
1171 	req.rsp = &rsp;
1172 
1173 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1174 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1175 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1176 
1177 	/* Test Case: Issue a Read command from Host A and Host B */
1178 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1179 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1180 	SPDK_CU_ASSERT_FATAL(rc == 0);
1181 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1182 	SPDK_CU_ASSERT_FATAL(rc == 0);
1183 
1184 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1185 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1186 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1187 	SPDK_CU_ASSERT_FATAL(rc == 0);
1188 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1189 	SPDK_CU_ASSERT_FATAL(rc < 0);
1190 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1191 
1192 	/* Test Case: Issue a Write command from Host C */
1193 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1194 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1195 	SPDK_CU_ASSERT_FATAL(rc < 0);
1196 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1197 
1198 	/* Test Case: Issue a Read command from Host B */
1199 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1200 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1201 	SPDK_CU_ASSERT_FATAL(rc == 0);
1202 
1203 	/* Unregister Host C */
1204 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1205 
1206 	/* Test Case: Read and Write commands from non-registrant Host C */
1207 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1208 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1209 	SPDK_CU_ASSERT_FATAL(rc < 0);
1210 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1211 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1212 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1213 	SPDK_CU_ASSERT_FATAL(rc == 0);
1214 }
1215 
1216 static void
1217 test_reservation_exclusive_access(void)
1218 {
1219 	struct spdk_nvmf_request req = {};
1220 	union nvmf_h2c_msg cmd = {};
1221 	union nvmf_c2h_msg rsp = {};
1222 	int rc;
1223 
1224 	req.cmd = &cmd;
1225 	req.rsp = &rsp;
1226 
1227 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1228 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1229 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1230 
1231 	/* Test Case: Issue a Read command from Host B */
1232 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1233 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1234 	SPDK_CU_ASSERT_FATAL(rc < 0);
1235 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1236 
1237 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1238 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1239 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1240 	SPDK_CU_ASSERT_FATAL(rc == 0);
1241 }
1242 
1243 static void
1244 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1245 {
1246 	struct spdk_nvmf_request req = {};
1247 	union nvmf_h2c_msg cmd = {};
1248 	union nvmf_c2h_msg rsp = {};
1249 	int rc;
1250 
1251 	req.cmd = &cmd;
1252 	req.rsp = &rsp;
1253 
1254 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1255 	ut_reservation_init(rtype);
1256 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1257 
1258 	/* Test Case: Issue a Read command from Host A and Host C */
1259 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1260 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1261 	SPDK_CU_ASSERT_FATAL(rc == 0);
1262 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1263 	SPDK_CU_ASSERT_FATAL(rc == 0);
1264 
1265 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1266 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1267 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1268 	SPDK_CU_ASSERT_FATAL(rc == 0);
1269 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1270 	SPDK_CU_ASSERT_FATAL(rc == 0);
1271 
1272 	/* Unregister Host C */
1273 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1274 
1275 	/* Test Case: Read and Write commands from non-registrant Host C */
1276 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1277 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1278 	SPDK_CU_ASSERT_FATAL(rc == 0);
1279 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1280 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1281 	SPDK_CU_ASSERT_FATAL(rc < 0);
1282 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1283 }
1284 
1285 static void
1286 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1287 {
1288 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1289 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1290 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1291 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1292 }
1293 
1294 static void
1295 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1296 {
1297 	struct spdk_nvmf_request req = {};
1298 	union nvmf_h2c_msg cmd = {};
1299 	union nvmf_c2h_msg rsp = {};
1300 	int rc;
1301 
1302 	req.cmd = &cmd;
1303 	req.rsp = &rsp;
1304 
1305 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1306 	ut_reservation_init(rtype);
1307 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1308 
1309 	/* Test Case: Issue a Write command from Host B */
1310 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1311 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1312 	SPDK_CU_ASSERT_FATAL(rc == 0);
1313 
1314 	/* Unregister Host B */
1315 	memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
1316 
1317 	/* Test Case: Issue a Read command from Host B */
1318 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1319 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1320 	SPDK_CU_ASSERT_FATAL(rc < 0);
1321 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1322 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1323 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1324 	SPDK_CU_ASSERT_FATAL(rc < 0);
1325 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1326 }
1327 
1328 static void
1329 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1330 {
1331 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1332 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1333 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1334 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1335 }
1336 
1337 static void
1338 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1339 {
1340 	STAILQ_INIT(&ctrlr->async_events);
1341 }
1342 
1343 static void
1344 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1345 {
1346 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1347 
1348 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1349 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1350 		free(event);
1351 	}
1352 }
1353 
1354 static int
1355 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1356 {
1357 	int num = 0;
1358 	struct spdk_nvmf_async_event_completion *event;
1359 
1360 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1361 		num++;
1362 	}
1363 	return num;
1364 }
1365 
1366 static void
1367 test_reservation_notification_log_page(void)
1368 {
1369 	struct spdk_nvmf_ctrlr ctrlr;
1370 	struct spdk_nvmf_qpair qpair;
1371 	struct spdk_nvmf_ns ns;
1372 	struct spdk_nvmf_request req;
1373 	union nvmf_h2c_msg cmd = {};
1374 	union nvmf_c2h_msg rsp = {};
1375 	union spdk_nvme_async_event_completion event = {};
1376 	struct spdk_nvme_reservation_notification_log logs[3];
1377 	struct iovec iov;
1378 
1379 	memset(&ctrlr, 0, sizeof(ctrlr));
1380 	ctrlr.thread = spdk_get_thread();
1381 	TAILQ_INIT(&ctrlr.log_head);
1382 	init_pending_async_events(&ctrlr);
1383 	ns.nsid = 1;
1384 
1385 	/* Test Case: Mask all the reservation notifications */
1386 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1387 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1388 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1389 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1390 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1391 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1392 					  SPDK_NVME_RESERVATION_RELEASED);
1393 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1394 					  SPDK_NVME_RESERVATION_PREEMPTED);
1395 	poll_threads();
1396 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1397 
1398 	/* Test Case: Unmask all the reservation notifications,
1399 	 * 3 log pages are generated, and AER was triggered.
1400 	 */
1401 	ns.mask = 0;
1402 	ctrlr.num_avail_log_pages = 0;
1403 	req.cmd = &cmd;
1404 	req.rsp = &rsp;
1405 	ctrlr.aer_req[0] = &req;
1406 	ctrlr.nr_aer_reqs = 1;
1407 	req.qpair = &qpair;
1408 	TAILQ_INIT(&qpair.outstanding);
1409 	qpair.ctrlr = NULL;
1410 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1411 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1412 
1413 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1414 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1415 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1416 					  SPDK_NVME_RESERVATION_RELEASED);
1417 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1418 					  SPDK_NVME_RESERVATION_PREEMPTED);
1419 	poll_threads();
1420 	event.raw = rsp.nvme_cpl.cdw0;
1421 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1422 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1423 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1424 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1425 
1426 	/* Test Case: Get Log Page to clear the log pages */
1427 	iov.iov_base = &logs[0];
1428 	iov.iov_len = sizeof(logs);
1429 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1430 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1431 
1432 	cleanup_pending_async_events(&ctrlr);
1433 }
1434 
1435 static void
1436 test_get_dif_ctx(void)
1437 {
1438 	struct spdk_nvmf_subsystem subsystem = {};
1439 	struct spdk_nvmf_request req = {};
1440 	struct spdk_nvmf_qpair qpair = {};
1441 	struct spdk_nvmf_ctrlr ctrlr = {};
1442 	struct spdk_nvmf_ns ns = {};
1443 	struct spdk_nvmf_ns *_ns = NULL;
1444 	struct spdk_bdev bdev = {};
1445 	union nvmf_h2c_msg cmd = {};
1446 	struct spdk_dif_ctx dif_ctx = {};
1447 	bool ret;
1448 
1449 	ctrlr.subsys = &subsystem;
1450 
1451 	qpair.ctrlr = &ctrlr;
1452 
1453 	req.qpair = &qpair;
1454 	req.cmd = &cmd;
1455 
1456 	ns.bdev = &bdev;
1457 
1458 	ctrlr.dif_insert_or_strip = false;
1459 
1460 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1461 	CU_ASSERT(ret == false);
1462 
1463 	ctrlr.dif_insert_or_strip = true;
1464 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1465 
1466 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1467 	CU_ASSERT(ret == false);
1468 
1469 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1470 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1471 
1472 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1473 	CU_ASSERT(ret == false);
1474 
1475 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1476 
1477 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1478 	CU_ASSERT(ret == false);
1479 
1480 	qpair.qid = 1;
1481 
1482 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1483 	CU_ASSERT(ret == false);
1484 
1485 	cmd.nvme_cmd.nsid = 1;
1486 
1487 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1488 	CU_ASSERT(ret == false);
1489 
1490 	subsystem.max_nsid = 1;
1491 	subsystem.ns = &_ns;
1492 	subsystem.ns[0] = &ns;
1493 
1494 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1495 	CU_ASSERT(ret == false);
1496 
1497 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1498 
1499 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1500 	CU_ASSERT(ret == true);
1501 }
1502 
1503 static void
1504 test_identify_ctrlr(void)
1505 {
1506 	struct spdk_nvmf_tgt tgt = {};
1507 	struct spdk_nvmf_subsystem subsystem = {
1508 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1509 		.tgt = &tgt,
1510 	};
1511 	struct spdk_nvmf_transport_ops tops = {};
1512 	struct spdk_nvmf_transport transport = {
1513 		.ops = &tops,
1514 		.opts = {
1515 			.in_capsule_data_size = 4096,
1516 		},
1517 	};
1518 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1519 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1520 	struct spdk_nvme_ctrlr_data cdata = {};
1521 	uint32_t expected_ioccsz;
1522 
1523 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1524 
1525 	/* Check ioccsz, TCP transport */
1526 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1527 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1528 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1529 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1530 
1531 	/* Check ioccsz, RDMA transport */
1532 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1533 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1534 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1535 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1536 
1537 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1538 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1539 	ctrlr.dif_insert_or_strip = true;
1540 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1541 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1542 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1543 }
1544 
1545 static int
1546 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1547 {
1548 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1549 
1550 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1551 };
1552 
1553 static void
1554 test_custom_admin_cmd(void)
1555 {
1556 	struct spdk_nvmf_subsystem subsystem;
1557 	struct spdk_nvmf_qpair qpair;
1558 	struct spdk_nvmf_ctrlr ctrlr;
1559 	struct spdk_nvmf_request req;
1560 	struct spdk_nvmf_ns *ns_ptrs[1];
1561 	struct spdk_nvmf_ns ns;
1562 	union nvmf_h2c_msg cmd;
1563 	union nvmf_c2h_msg rsp;
1564 	struct spdk_bdev bdev;
1565 	uint8_t buf[4096];
1566 	int rc;
1567 
1568 	memset(&subsystem, 0, sizeof(subsystem));
1569 	ns_ptrs[0] = &ns;
1570 	subsystem.ns = ns_ptrs;
1571 	subsystem.max_nsid = 1;
1572 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1573 
1574 	memset(&ns, 0, sizeof(ns));
1575 	ns.opts.nsid = 1;
1576 	ns.bdev = &bdev;
1577 
1578 	memset(&qpair, 0, sizeof(qpair));
1579 	qpair.ctrlr = &ctrlr;
1580 
1581 	memset(&ctrlr, 0, sizeof(ctrlr));
1582 	ctrlr.subsys = &subsystem;
1583 	ctrlr.vcprop.cc.bits.en = 1;
1584 
1585 	memset(&req, 0, sizeof(req));
1586 	req.qpair = &qpair;
1587 	req.cmd = &cmd;
1588 	req.rsp = &rsp;
1589 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1590 	req.data = buf;
1591 	req.length = sizeof(buf);
1592 
1593 	memset(&cmd, 0, sizeof(cmd));
1594 	cmd.nvme_cmd.opc = 0xc1;
1595 	cmd.nvme_cmd.nsid = 0;
1596 	memset(&rsp, 0, sizeof(rsp));
1597 
1598 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1599 
1600 	/* Ensure that our hdlr is being called */
1601 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1602 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1603 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1604 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1605 }
1606 
1607 static void
1608 test_fused_compare_and_write(void)
1609 {
1610 	struct spdk_nvmf_request req = {};
1611 	struct spdk_nvmf_qpair qpair = {};
1612 	struct spdk_nvme_cmd cmd = {};
1613 	union nvmf_c2h_msg rsp = {};
1614 	struct spdk_nvmf_ctrlr ctrlr = {};
1615 	struct spdk_nvmf_subsystem subsystem = {};
1616 	struct spdk_nvmf_ns ns = {};
1617 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1618 	struct spdk_nvmf_subsystem_listener listener = {};
1619 	struct spdk_bdev bdev = {};
1620 
1621 	struct spdk_nvmf_poll_group group = {};
1622 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1623 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1624 	struct spdk_io_channel io_ch = {};
1625 
1626 	ns.bdev = &bdev;
1627 
1628 	subsystem.id = 0;
1629 	subsystem.max_nsid = 1;
1630 	subsys_ns[0] = &ns;
1631 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1632 
1633 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1634 
1635 	/* Enable controller */
1636 	ctrlr.vcprop.cc.bits.en = 1;
1637 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1638 	ctrlr.listener = &listener;
1639 
1640 	group.num_sgroups = 1;
1641 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1642 	sgroups.num_ns = 1;
1643 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1644 	ns_info.channel = &io_ch;
1645 	sgroups.ns_info = &ns_info;
1646 	TAILQ_INIT(&sgroups.queued);
1647 	group.sgroups = &sgroups;
1648 	TAILQ_INIT(&qpair.outstanding);
1649 
1650 	qpair.ctrlr = &ctrlr;
1651 	qpair.group = &group;
1652 	qpair.qid = 1;
1653 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1654 
1655 	cmd.nsid = 1;
1656 
1657 	req.qpair = &qpair;
1658 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1659 	req.rsp = &rsp;
1660 
1661 	/* SUCCESS/SUCCESS */
1662 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1663 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1664 
1665 	spdk_nvmf_request_exec(&req);
1666 	CU_ASSERT(qpair.first_fused_req != NULL);
1667 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1668 
1669 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1670 	cmd.opc = SPDK_NVME_OPC_WRITE;
1671 
1672 	spdk_nvmf_request_exec(&req);
1673 	CU_ASSERT(qpair.first_fused_req == NULL);
1674 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1675 
1676 	/* Wrong sequence */
1677 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1678 	cmd.opc = SPDK_NVME_OPC_WRITE;
1679 
1680 	spdk_nvmf_request_exec(&req);
1681 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1682 	CU_ASSERT(qpair.first_fused_req == NULL);
1683 
1684 	/* Write as FUSE_FIRST (Wrong op code) */
1685 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1686 	cmd.opc = SPDK_NVME_OPC_WRITE;
1687 
1688 	spdk_nvmf_request_exec(&req);
1689 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1690 	CU_ASSERT(qpair.first_fused_req == NULL);
1691 
1692 	/* Compare as FUSE_SECOND (Wrong op code) */
1693 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1694 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1695 
1696 	spdk_nvmf_request_exec(&req);
1697 	CU_ASSERT(qpair.first_fused_req != NULL);
1698 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1699 
1700 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1701 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1702 
1703 	spdk_nvmf_request_exec(&req);
1704 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1705 	CU_ASSERT(qpair.first_fused_req == NULL);
1706 }
1707 
1708 static void
1709 test_multi_async_event_reqs(void)
1710 {
1711 	struct spdk_nvmf_subsystem subsystem = {};
1712 	struct spdk_nvmf_qpair qpair = {};
1713 	struct spdk_nvmf_ctrlr ctrlr = {};
1714 	struct spdk_nvmf_request req[5] = {};
1715 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1716 	struct spdk_nvmf_ns ns = {};
1717 	union nvmf_h2c_msg cmd[5] = {};
1718 	union nvmf_c2h_msg rsp[5] = {};
1719 
1720 	struct spdk_nvmf_poll_group group = {};
1721 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1722 
1723 	int i;
1724 
1725 	ns_ptrs[0] = &ns;
1726 	subsystem.ns = ns_ptrs;
1727 	subsystem.max_nsid = 1;
1728 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1729 
1730 	ns.opts.nsid = 1;
1731 	group.sgroups = &sgroups;
1732 
1733 	qpair.ctrlr = &ctrlr;
1734 	qpair.group = &group;
1735 	TAILQ_INIT(&qpair.outstanding);
1736 
1737 	ctrlr.subsys = &subsystem;
1738 	ctrlr.vcprop.cc.bits.en = 1;
1739 
1740 	for (i = 0; i < 5; i++) {
1741 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1742 		cmd[i].nvme_cmd.nsid = 1;
1743 		cmd[i].nvme_cmd.cid = i;
1744 
1745 		req[i].qpair = &qpair;
1746 		req[i].cmd = &cmd[i];
1747 		req[i].rsp = &rsp[i];
1748 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1749 	}
1750 
1751 	/* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
1752 	sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS;
1753 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1754 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1755 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
1756 	}
1757 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1758 
1759 	/* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
1760 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1761 	CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
1762 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
1763 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1764 
1765 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
1766 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
1767 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1768 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1769 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
1770 
1771 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
1772 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1773 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1774 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
1775 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
1776 
1777 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
1778 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
1779 }
1780 
1781 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
1782 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
1783 static void
1784 test_get_ana_log_page(void)
1785 {
1786 	struct spdk_nvmf_subsystem subsystem = {};
1787 	struct spdk_nvmf_ctrlr ctrlr = {};
1788 	struct spdk_nvmf_subsystem_listener listener = {};
1789 	struct spdk_nvmf_ns ns[3];
1790 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
1791 	uint64_t offset;
1792 	uint32_t length;
1793 	int i;
1794 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1795 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1796 	struct iovec iov, iovs[2];
1797 	struct spdk_nvme_ana_page *ana_hdr;
1798 	char _ana_desc[UT_ANA_DESC_SIZE];
1799 	struct spdk_nvme_ana_group_descriptor *ana_desc;
1800 
1801 	subsystem.ns = ns_arr;
1802 	subsystem.max_nsid = 3;
1803 	ctrlr.subsys = &subsystem;
1804 	ctrlr.listener = &listener;
1805 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1806 
1807 	for (i = 0; i < 3; i++) {
1808 		ns_arr[i]->nsid = i + 1;
1809 	}
1810 
1811 	/* create expected page */
1812 	ana_hdr = (void *)&expected_page[0];
1813 	ana_hdr->num_ana_group_desc = 3;
1814 	ana_hdr->change_count = 0;
1815 
1816 	/* descriptor may be unaligned. So create data and then copy it to the location. */
1817 	ana_desc = (void *)_ana_desc;
1818 	offset = sizeof(struct spdk_nvme_ana_page);
1819 
1820 	for (i = 0; i < 3; i++) {
1821 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
1822 		ana_desc->ana_group_id = ns_arr[i]->nsid;
1823 		ana_desc->num_of_nsid = 1;
1824 		ana_desc->change_count = 0;
1825 		ana_desc->ana_state = ctrlr.listener->ana_state;
1826 		ana_desc->nsid[0] = ns_arr[i]->nsid;
1827 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
1828 		offset += UT_ANA_DESC_SIZE;
1829 	}
1830 
1831 	/* read entire actual log page */
1832 	offset = 0;
1833 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
1834 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
1835 		iov.iov_base = &actual_page[offset];
1836 		iov.iov_len = length;
1837 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
1838 		offset += length;
1839 	}
1840 
1841 	/* compare expected page and actual page */
1842 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1843 
1844 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
1845 	offset = 0;
1846 	iovs[0].iov_base = &actual_page[offset];
1847 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1848 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1849 	iovs[1].iov_base = &actual_page[offset];
1850 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
1851 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
1852 
1853 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1854 }
1855 
1856 static void
1857 test_multi_async_events(void)
1858 {
1859 	struct spdk_nvmf_subsystem subsystem = {};
1860 	struct spdk_nvmf_qpair qpair = {};
1861 	struct spdk_nvmf_ctrlr ctrlr = {};
1862 	struct spdk_nvmf_request req[4] = {};
1863 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1864 	struct spdk_nvmf_ns ns = {};
1865 	union nvmf_h2c_msg cmd[4] = {};
1866 	union nvmf_c2h_msg rsp[4] = {};
1867 	union spdk_nvme_async_event_completion event = {};
1868 	struct spdk_nvmf_poll_group group = {};
1869 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1870 	int i;
1871 
1872 	ns_ptrs[0] = &ns;
1873 	subsystem.ns = ns_ptrs;
1874 	subsystem.max_nsid = 1;
1875 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1876 
1877 	ns.opts.nsid = 1;
1878 	group.sgroups = &sgroups;
1879 
1880 	qpair.ctrlr = &ctrlr;
1881 	qpair.group = &group;
1882 	TAILQ_INIT(&qpair.outstanding);
1883 
1884 	ctrlr.subsys = &subsystem;
1885 	ctrlr.vcprop.cc.bits.en = 1;
1886 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
1887 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
1888 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
1889 	init_pending_async_events(&ctrlr);
1890 
1891 	/* Target queue pending events when there is no outstanding AER request */
1892 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1893 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
1894 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
1895 
1896 	for (i = 0; i < 4; i++) {
1897 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1898 		cmd[i].nvme_cmd.nsid = 1;
1899 		cmd[i].nvme_cmd.cid = i;
1900 
1901 		req[i].qpair = &qpair;
1902 		req[i].cmd = &cmd[i];
1903 		req[i].rsp = &rsp[i];
1904 
1905 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1906 
1907 		sgroups.mgmt_io_outstanding = 1;
1908 		if (i < 3) {
1909 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1910 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1911 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
1912 		} else {
1913 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1914 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1915 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
1916 		}
1917 	}
1918 
1919 	event.raw = rsp[0].nvme_cpl.cdw0;
1920 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
1921 	event.raw = rsp[1].nvme_cpl.cdw0;
1922 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
1923 	event.raw = rsp[2].nvme_cpl.cdw0;
1924 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
1925 
1926 	cleanup_pending_async_events(&ctrlr);
1927 }
1928 
1929 static void
1930 test_rae(void)
1931 {
1932 	struct spdk_nvmf_subsystem subsystem = {};
1933 	struct spdk_nvmf_qpair qpair = {};
1934 	struct spdk_nvmf_ctrlr ctrlr = {};
1935 	struct spdk_nvmf_request req[3] = {};
1936 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1937 	struct spdk_nvmf_ns ns = {};
1938 	union nvmf_h2c_msg cmd[3] = {};
1939 	union nvmf_c2h_msg rsp[3] = {};
1940 	union spdk_nvme_async_event_completion event = {};
1941 	struct spdk_nvmf_poll_group group = {};
1942 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1943 	int i;
1944 	char data[4096];
1945 
1946 	ns_ptrs[0] = &ns;
1947 	subsystem.ns = ns_ptrs;
1948 	subsystem.max_nsid = 1;
1949 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1950 
1951 	ns.opts.nsid = 1;
1952 	group.sgroups = &sgroups;
1953 
1954 	qpair.ctrlr = &ctrlr;
1955 	qpair.group = &group;
1956 	TAILQ_INIT(&qpair.outstanding);
1957 
1958 	ctrlr.subsys = &subsystem;
1959 	ctrlr.vcprop.cc.bits.en = 1;
1960 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
1961 	init_pending_async_events(&ctrlr);
1962 
1963 	/* Target queue pending events when there is no outstanding AER request */
1964 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1965 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1966 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1967 	/* only one event will be queued before RAE is clear */
1968 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
1969 
1970 	req[0].qpair = &qpair;
1971 	req[0].cmd = &cmd[0];
1972 	req[0].rsp = &rsp[0];
1973 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1974 	cmd[0].nvme_cmd.nsid = 1;
1975 	cmd[0].nvme_cmd.cid = 0;
1976 
1977 	for (i = 1; i < 3; i++) {
1978 		req[i].qpair = &qpair;
1979 		req[i].cmd = &cmd[i];
1980 		req[i].rsp = &rsp[i];
1981 		req[i].data = &data;
1982 		req[i].length = sizeof(data);
1983 
1984 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
1985 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
1986 			SPDK_NVME_LOG_CHANGED_NS_LIST;
1987 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
1988 			spdk_nvme_bytes_to_numd(req[i].length);
1989 		cmd[i].nvme_cmd.cid = i;
1990 	}
1991 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
1992 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
1993 
1994 	/* consume the pending event */
1995 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
1996 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1997 	event.raw = rsp[0].nvme_cpl.cdw0;
1998 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
1999 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2000 
2001 	/* get log with RAE set */
2002 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2003 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2004 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2005 
2006 	/* will not generate new event until RAE is clear */
2007 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2008 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2009 
2010 	/* get log with RAE clear */
2011 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2012 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2013 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2014 
2015 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2016 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2017 
2018 	cleanup_pending_async_events(&ctrlr);
2019 }
2020 
2021 static void
2022 test_nvmf_ctrlr_create_destruct(void)
2023 {
2024 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2025 	struct spdk_nvmf_poll_group group = {};
2026 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2027 	struct spdk_nvmf_transport transport = {};
2028 	struct spdk_nvmf_transport_ops tops = {};
2029 	struct spdk_nvmf_subsystem subsystem = {};
2030 	struct spdk_nvmf_request req = {};
2031 	struct spdk_nvmf_qpair qpair = {};
2032 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2033 	struct spdk_nvmf_tgt tgt = {};
2034 	union nvmf_h2c_msg cmd = {};
2035 	union nvmf_c2h_msg rsp = {};
2036 	const uint8_t hostid[16] = {
2037 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2038 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2039 	};
2040 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2041 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2042 
2043 	group.thread = spdk_get_thread();
2044 	transport.ops = &tops;
2045 	transport.opts.max_aq_depth = 32;
2046 	transport.opts.max_queue_depth = 64;
2047 	transport.opts.max_qpairs_per_ctrlr = 3;
2048 	transport.opts.dif_insert_or_strip = true;
2049 	transport.tgt = &tgt;
2050 	qpair.transport = &transport;
2051 	qpair.group = &group;
2052 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2053 	TAILQ_INIT(&qpair.outstanding);
2054 
2055 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2056 	connect_data.cntlid = 0xFFFF;
2057 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2058 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2059 
2060 	subsystem.thread = spdk_get_thread();
2061 	subsystem.id = 1;
2062 	TAILQ_INIT(&subsystem.ctrlrs);
2063 	subsystem.tgt = &tgt;
2064 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2065 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2066 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2067 
2068 	group.sgroups = sgroups;
2069 
2070 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2071 	cmd.connect_cmd.cid = 1;
2072 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2073 	cmd.connect_cmd.recfmt = 0;
2074 	cmd.connect_cmd.qid = 0;
2075 	cmd.connect_cmd.sqsize = 31;
2076 	cmd.connect_cmd.cattr = 0;
2077 	cmd.connect_cmd.kato = 120000;
2078 
2079 	req.qpair = &qpair;
2080 	req.length = sizeof(connect_data);
2081 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2082 	req.data = &connect_data;
2083 	req.cmd = &cmd;
2084 	req.rsp = &rsp;
2085 
2086 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2087 	sgroups[subsystem.id].mgmt_io_outstanding++;
2088 
2089 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.data);
2090 	poll_threads();
2091 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2092 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2093 	CU_ASSERT(ctrlr->subsys == &subsystem);
2094 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2095 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2096 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2097 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2098 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2099 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2100 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2101 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2102 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2103 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2104 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2105 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2106 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == 1);
2107 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2108 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2109 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2110 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2111 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2112 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2113 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2114 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2115 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2116 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2117 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2118 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2119 
2120 	nvmf_ctrlr_destruct(ctrlr);
2121 	poll_threads();
2122 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2123 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2124 }
2125 
2126 int main(int argc, char **argv)
2127 {
2128 	CU_pSuite	suite = NULL;
2129 	unsigned int	num_failures;
2130 
2131 	CU_set_error_action(CUEA_ABORT);
2132 	CU_initialize_registry();
2133 
2134 	suite = CU_add_suite("nvmf", NULL, NULL);
2135 	CU_ADD_TEST(suite, test_get_log_page);
2136 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
2137 	CU_ADD_TEST(suite, test_connect);
2138 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
2139 	CU_ADD_TEST(suite, test_identify_ns);
2140 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
2141 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
2142 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
2143 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
2144 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
2145 	CU_ADD_TEST(suite, test_get_dif_ctx);
2146 	CU_ADD_TEST(suite, test_set_get_features);
2147 	CU_ADD_TEST(suite, test_identify_ctrlr);
2148 	CU_ADD_TEST(suite, test_custom_admin_cmd);
2149 	CU_ADD_TEST(suite, test_fused_compare_and_write);
2150 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
2151 	CU_ADD_TEST(suite, test_get_ana_log_page);
2152 	CU_ADD_TEST(suite, test_multi_async_events);
2153 	CU_ADD_TEST(suite, test_rae);
2154 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
2155 
2156 	allocate_threads(1);
2157 	set_thread(0);
2158 
2159 	CU_basic_set_mode(CU_BRM_VERBOSE);
2160 	CU_basic_run_tests();
2161 	num_failures = CU_get_number_of_failures();
2162 	CU_cleanup_registry();
2163 
2164 	free_threads();
2165 
2166 	return num_failures;
2167 }
2168