xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision b30d57cdad6d2bc75cc1e4e2ebbcebcb0d98dcfa)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "spdk_internal/mock.h"
38 
39 #include "common/lib/ut_multithread.c"
40 #include "nvmf/ctrlr.c"
41 
42 SPDK_LOG_REGISTER_COMPONENT(nvmf)
43 
44 struct spdk_bdev {
45 	int ut_mock;
46 	uint64_t blockcnt;
47 };
48 
49 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
50 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
51 
52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
53 	    struct spdk_nvmf_subsystem *,
54 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
55 	    NULL);
56 
57 DEFINE_STUB(spdk_nvmf_poll_group_create,
58 	    struct spdk_nvmf_poll_group *,
59 	    (struct spdk_nvmf_tgt *tgt),
60 	    NULL);
61 
62 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
63 	    const char *,
64 	    (const struct spdk_nvmf_subsystem *subsystem),
65 	    subsystem_default_sn);
66 
67 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
68 	    const char *,
69 	    (const struct spdk_nvmf_subsystem *subsystem),
70 	    subsystem_default_mn);
71 
72 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
73 	    bool,
74 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
75 	    true);
76 
77 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
78 	    int,
79 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
80 	    0);
81 
82 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
83 	    struct spdk_nvmf_ctrlr *,
84 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
85 	    NULL);
86 
87 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
88 	    bool,
89 	    (struct spdk_nvmf_ctrlr *ctrlr),
90 	    false);
91 
92 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
93 	    bool,
94 	    (struct spdk_nvmf_ctrlr *ctrlr),
95 	    false);
96 
97 DEFINE_STUB_V(nvmf_get_discovery_log_page,
98 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
99 	       uint32_t iovcnt, uint64_t offset, uint32_t length));
100 
101 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
102 	    int,
103 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
104 	    0);
105 
106 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
107 	    bool,
108 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
109 	    true);
110 
111 DEFINE_STUB(nvmf_subsystem_find_listener,
112 	    struct spdk_nvmf_subsystem_listener *,
113 	    (struct spdk_nvmf_subsystem *subsystem,
114 	     const struct spdk_nvme_transport_id *trid),
115 	    (void *)0x1);
116 
117 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
118 	    int,
119 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
120 	     struct spdk_nvmf_request *req),
121 	    0);
122 
123 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
124 	    int,
125 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
126 	     struct spdk_nvmf_request *req),
127 	    0);
128 
129 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
130 	    int,
131 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
132 	     struct spdk_nvmf_request *req),
133 	    0);
134 
135 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
136 	    int,
137 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
138 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
139 	    0);
140 
141 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
142 	    int,
143 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
144 	     struct spdk_nvmf_request *req),
145 	    0);
146 
147 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
148 	    int,
149 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
150 	     struct spdk_nvmf_request *req),
151 	    0);
152 
153 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
154 	    int,
155 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
156 	     struct spdk_nvmf_request *req),
157 	    0);
158 
159 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
160 	    int,
161 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
162 	     struct spdk_nvmf_request *req),
163 	    0);
164 
165 DEFINE_STUB(nvmf_transport_req_complete,
166 	    int,
167 	    (struct spdk_nvmf_request *req),
168 	    0);
169 
170 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
171 
172 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
173 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
174 	     struct spdk_dif_ctx *dif_ctx),
175 	    true);
176 
177 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
178 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
179 
180 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
181 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
182 
183 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
184 		struct spdk_nvmf_ctrlr *ctrlr));
185 
186 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
187 	    int,
188 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
189 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
190 	    0);
191 
192 DEFINE_STUB(nvmf_transport_req_free,
193 	    int,
194 	    (struct spdk_nvmf_request *req),
195 	    0);
196 
197 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
198 	    int,
199 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
200 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
201 	    0);
202 
203 int
204 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
205 {
206 	return 0;
207 }
208 
209 void
210 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
211 			    bool dif_insert_or_strip)
212 {
213 	uint64_t num_blocks;
214 
215 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
216 	num_blocks = ns->bdev->blockcnt;
217 	nsdata->nsze = num_blocks;
218 	nsdata->ncap = num_blocks;
219 	nsdata->nuse = num_blocks;
220 	nsdata->nlbaf = 0;
221 	nsdata->flbas.format = 0;
222 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
223 }
224 
225 struct spdk_nvmf_ns *
226 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
227 {
228 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
229 	return subsystem->ns[0];
230 }
231 
232 struct spdk_nvmf_ns *
233 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
234 				struct spdk_nvmf_ns *prev_ns)
235 {
236 	uint32_t nsid;
237 
238 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
239 	nsid = prev_ns->nsid;
240 
241 	if (nsid >= subsystem->max_nsid) {
242 		return NULL;
243 	}
244 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
245 		if (subsystem->ns[nsid - 1]) {
246 			return subsystem->ns[nsid - 1];
247 		}
248 	}
249 	return NULL;
250 }
251 
252 static void
253 test_get_log_page(void)
254 {
255 	struct spdk_nvmf_subsystem subsystem = {};
256 	struct spdk_nvmf_request req = {};
257 	struct spdk_nvmf_qpair qpair = {};
258 	struct spdk_nvmf_ctrlr ctrlr = {};
259 	union nvmf_h2c_msg cmd = {};
260 	union nvmf_c2h_msg rsp = {};
261 	char data[4096];
262 
263 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
264 
265 	ctrlr.subsys = &subsystem;
266 
267 	qpair.ctrlr = &ctrlr;
268 
269 	req.qpair = &qpair;
270 	req.cmd = &cmd;
271 	req.rsp = &rsp;
272 	req.data = &data;
273 	req.length = sizeof(data);
274 
275 	/* Get Log Page - all valid */
276 	memset(&cmd, 0, sizeof(cmd));
277 	memset(&rsp, 0, sizeof(rsp));
278 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
279 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
280 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
281 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
282 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
283 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
284 
285 	/* Get Log Page with invalid log ID */
286 	memset(&cmd, 0, sizeof(cmd));
287 	memset(&rsp, 0, sizeof(rsp));
288 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
289 	cmd.nvme_cmd.cdw10 = 0;
290 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
291 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
292 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
293 
294 	/* Get Log Page with invalid offset (not dword aligned) */
295 	memset(&cmd, 0, sizeof(cmd));
296 	memset(&rsp, 0, sizeof(rsp));
297 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
298 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
299 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
300 	cmd.nvme_cmd.cdw12 = 2;
301 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
302 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
303 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
304 
305 	/* Get Log Page without data buffer */
306 	memset(&cmd, 0, sizeof(cmd));
307 	memset(&rsp, 0, sizeof(rsp));
308 	req.data = NULL;
309 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
310 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
311 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
312 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
313 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
314 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
315 	req.data = data;
316 }
317 
318 static void
319 test_process_fabrics_cmd(void)
320 {
321 	struct	spdk_nvmf_request req = {};
322 	int	ret;
323 	struct	spdk_nvmf_qpair req_qpair = {};
324 	union	nvmf_h2c_msg  req_cmd = {};
325 	union	nvmf_c2h_msg   req_rsp = {};
326 
327 	req.qpair = &req_qpair;
328 	req.cmd  = &req_cmd;
329 	req.rsp  = &req_rsp;
330 	req.qpair->ctrlr = NULL;
331 
332 	/* No ctrlr and invalid command check */
333 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
334 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
335 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
336 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
337 }
338 
339 static bool
340 nvme_status_success(const struct spdk_nvme_status *status)
341 {
342 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
343 }
344 
345 static void
346 test_connect(void)
347 {
348 	struct spdk_nvmf_fabric_connect_data connect_data;
349 	struct spdk_nvmf_poll_group group;
350 	struct spdk_nvmf_subsystem_poll_group *sgroups;
351 	struct spdk_nvmf_transport transport;
352 	struct spdk_nvmf_transport_ops tops = {};
353 	struct spdk_nvmf_subsystem subsystem;
354 	struct spdk_nvmf_request req;
355 	struct spdk_nvmf_qpair admin_qpair;
356 	struct spdk_nvmf_qpair qpair;
357 	struct spdk_nvmf_qpair qpair2;
358 	struct spdk_nvmf_ctrlr ctrlr;
359 	struct spdk_nvmf_tgt tgt;
360 	union nvmf_h2c_msg cmd;
361 	union nvmf_c2h_msg rsp;
362 	const uint8_t hostid[16] = {
363 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
364 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
365 	};
366 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
367 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
368 	int rc;
369 
370 	memset(&group, 0, sizeof(group));
371 	group.thread = spdk_get_thread();
372 
373 	memset(&ctrlr, 0, sizeof(ctrlr));
374 	ctrlr.subsys = &subsystem;
375 	ctrlr.qpair_mask = spdk_bit_array_create(3);
376 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
377 	ctrlr.vcprop.cc.bits.en = 1;
378 	ctrlr.vcprop.cc.bits.iosqes = 6;
379 	ctrlr.vcprop.cc.bits.iocqes = 4;
380 
381 	memset(&admin_qpair, 0, sizeof(admin_qpair));
382 	admin_qpair.group = &group;
383 
384 	memset(&tgt, 0, sizeof(tgt));
385 	memset(&transport, 0, sizeof(transport));
386 	transport.ops = &tops;
387 	transport.opts.max_aq_depth = 32;
388 	transport.opts.max_queue_depth = 64;
389 	transport.opts.max_qpairs_per_ctrlr = 3;
390 	transport.tgt = &tgt;
391 
392 	memset(&qpair, 0, sizeof(qpair));
393 	qpair.transport = &transport;
394 	qpair.group = &group;
395 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
396 	TAILQ_INIT(&qpair.outstanding);
397 
398 	memset(&connect_data, 0, sizeof(connect_data));
399 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
400 	connect_data.cntlid = 0xFFFF;
401 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
402 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
403 
404 	memset(&subsystem, 0, sizeof(subsystem));
405 	subsystem.thread = spdk_get_thread();
406 	subsystem.id = 1;
407 	TAILQ_INIT(&subsystem.ctrlrs);
408 	subsystem.tgt = &tgt;
409 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
410 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
411 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
412 
413 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
414 	group.sgroups = sgroups;
415 
416 	memset(&cmd, 0, sizeof(cmd));
417 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
418 	cmd.connect_cmd.cid = 1;
419 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
420 	cmd.connect_cmd.recfmt = 0;
421 	cmd.connect_cmd.qid = 0;
422 	cmd.connect_cmd.sqsize = 31;
423 	cmd.connect_cmd.cattr = 0;
424 	cmd.connect_cmd.kato = 120000;
425 
426 	memset(&req, 0, sizeof(req));
427 	req.qpair = &qpair;
428 	req.length = sizeof(connect_data);
429 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
430 	req.data = &connect_data;
431 	req.cmd = &cmd;
432 	req.rsp = &rsp;
433 
434 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
435 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
436 
437 	/* Valid admin connect command */
438 	memset(&rsp, 0, sizeof(rsp));
439 	sgroups[subsystem.id].io_outstanding++;
440 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
441 	rc = nvmf_ctrlr_cmd_connect(&req);
442 	poll_threads();
443 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
444 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
445 	CU_ASSERT(qpair.ctrlr != NULL);
446 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
447 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
448 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
449 	free(qpair.ctrlr);
450 	qpair.ctrlr = NULL;
451 
452 	/* Valid admin connect command with kato = 0 */
453 	cmd.connect_cmd.kato = 0;
454 	memset(&rsp, 0, sizeof(rsp));
455 	sgroups[subsystem.id].io_outstanding++;
456 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
457 	rc = nvmf_ctrlr_cmd_connect(&req);
458 	poll_threads();
459 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
460 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
461 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
462 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
463 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
464 	free(qpair.ctrlr);
465 	qpair.ctrlr = NULL;
466 	cmd.connect_cmd.kato = 120000;
467 
468 	/* Invalid data length */
469 	memset(&rsp, 0, sizeof(rsp));
470 	req.length = sizeof(connect_data) - 1;
471 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
472 	rc = nvmf_ctrlr_cmd_connect(&req);
473 	poll_threads();
474 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
475 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
476 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
477 	CU_ASSERT(qpair.ctrlr == NULL);
478 	req.length = sizeof(connect_data);
479 
480 	/* Invalid recfmt */
481 	memset(&rsp, 0, sizeof(rsp));
482 	cmd.connect_cmd.recfmt = 1234;
483 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
484 	rc = nvmf_ctrlr_cmd_connect(&req);
485 	poll_threads();
486 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
487 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
488 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
489 	CU_ASSERT(qpair.ctrlr == NULL);
490 	cmd.connect_cmd.recfmt = 0;
491 
492 	/* Subsystem not found */
493 	memset(&rsp, 0, sizeof(rsp));
494 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
495 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
496 	rc = nvmf_ctrlr_cmd_connect(&req);
497 	poll_threads();
498 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
499 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
500 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
501 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
502 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
503 	CU_ASSERT(qpair.ctrlr == NULL);
504 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
505 
506 	/* Unterminated hostnqn */
507 	memset(&rsp, 0, sizeof(rsp));
508 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
509 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
510 	rc = nvmf_ctrlr_cmd_connect(&req);
511 	poll_threads();
512 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
513 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
514 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
515 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
516 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
517 	CU_ASSERT(qpair.ctrlr == NULL);
518 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
519 
520 	/* Host not allowed */
521 	memset(&rsp, 0, sizeof(rsp));
522 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
523 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
524 	rc = nvmf_ctrlr_cmd_connect(&req);
525 	poll_threads();
526 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
527 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
528 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
529 	CU_ASSERT(qpair.ctrlr == NULL);
530 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
531 
532 	/* Invalid sqsize == 0 */
533 	memset(&rsp, 0, sizeof(rsp));
534 	cmd.connect_cmd.sqsize = 0;
535 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
536 	rc = nvmf_ctrlr_cmd_connect(&req);
537 	poll_threads();
538 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
539 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
540 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
541 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
542 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
543 	CU_ASSERT(qpair.ctrlr == NULL);
544 	cmd.connect_cmd.sqsize = 31;
545 
546 	/* Invalid admin sqsize > max_aq_depth */
547 	memset(&rsp, 0, sizeof(rsp));
548 	cmd.connect_cmd.sqsize = 32;
549 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
550 	rc = nvmf_ctrlr_cmd_connect(&req);
551 	poll_threads();
552 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
553 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
554 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
555 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
556 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
557 	CU_ASSERT(qpair.ctrlr == NULL);
558 	cmd.connect_cmd.sqsize = 31;
559 
560 	/* Invalid I/O sqsize > max_queue_depth */
561 	memset(&rsp, 0, sizeof(rsp));
562 	cmd.connect_cmd.qid = 1;
563 	cmd.connect_cmd.sqsize = 64;
564 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
565 	rc = nvmf_ctrlr_cmd_connect(&req);
566 	poll_threads();
567 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
568 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
569 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
570 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
571 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
572 	CU_ASSERT(qpair.ctrlr == NULL);
573 	cmd.connect_cmd.qid = 0;
574 	cmd.connect_cmd.sqsize = 31;
575 
576 	/* Invalid cntlid for admin queue */
577 	memset(&rsp, 0, sizeof(rsp));
578 	connect_data.cntlid = 0x1234;
579 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
580 	rc = nvmf_ctrlr_cmd_connect(&req);
581 	poll_threads();
582 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
583 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
584 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
585 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
586 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
587 	CU_ASSERT(qpair.ctrlr == NULL);
588 	connect_data.cntlid = 0xFFFF;
589 
590 	ctrlr.admin_qpair = &admin_qpair;
591 	ctrlr.subsys = &subsystem;
592 
593 	/* Valid I/O queue connect command */
594 	memset(&rsp, 0, sizeof(rsp));
595 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
596 	cmd.connect_cmd.qid = 1;
597 	cmd.connect_cmd.sqsize = 63;
598 	sgroups[subsystem.id].io_outstanding++;
599 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
600 	rc = nvmf_ctrlr_cmd_connect(&req);
601 	poll_threads();
602 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
603 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
604 	CU_ASSERT(qpair.ctrlr == &ctrlr);
605 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
606 	qpair.ctrlr = NULL;
607 	cmd.connect_cmd.sqsize = 31;
608 
609 	/* Non-existent controller */
610 	memset(&rsp, 0, sizeof(rsp));
611 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
612 	sgroups[subsystem.id].io_outstanding++;
613 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
614 	rc = nvmf_ctrlr_cmd_connect(&req);
615 	poll_threads();
616 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
617 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
618 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
619 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
620 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
621 	CU_ASSERT(qpair.ctrlr == NULL);
622 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
623 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
624 
625 	/* I/O connect to discovery controller */
626 	memset(&rsp, 0, sizeof(rsp));
627 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
628 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
629 	sgroups[subsystem.id].io_outstanding++;
630 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
631 	rc = nvmf_ctrlr_cmd_connect(&req);
632 	poll_threads();
633 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
634 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
635 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
636 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
637 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
638 	CU_ASSERT(qpair.ctrlr == NULL);
639 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
640 
641 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
642 	cmd.connect_cmd.qid = 0;
643 	cmd.connect_cmd.kato = 120000;
644 	memset(&rsp, 0, sizeof(rsp));
645 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
646 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
647 	sgroups[subsystem.id].io_outstanding++;
648 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
649 	rc = nvmf_ctrlr_cmd_connect(&req);
650 	poll_threads();
651 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
652 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
653 	CU_ASSERT(qpair.ctrlr != NULL);
654 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
655 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
656 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
657 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
658 	free(qpair.ctrlr);
659 	qpair.ctrlr = NULL;
660 
661 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
662 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
663 	 */
664 	cmd.connect_cmd.kato = 0;
665 	memset(&rsp, 0, sizeof(rsp));
666 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
667 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
668 	sgroups[subsystem.id].io_outstanding++;
669 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
670 	rc = nvmf_ctrlr_cmd_connect(&req);
671 	poll_threads();
672 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
673 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
674 	CU_ASSERT(qpair.ctrlr != NULL);
675 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
676 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
677 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
678 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
679 	free(qpair.ctrlr);
680 	qpair.ctrlr = NULL;
681 	cmd.connect_cmd.qid = 1;
682 	cmd.connect_cmd.kato = 120000;
683 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
684 
685 	/* I/O connect to disabled controller */
686 	memset(&rsp, 0, sizeof(rsp));
687 	ctrlr.vcprop.cc.bits.en = 0;
688 	sgroups[subsystem.id].io_outstanding++;
689 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
690 	rc = nvmf_ctrlr_cmd_connect(&req);
691 	poll_threads();
692 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
693 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
694 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
695 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
696 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
697 	CU_ASSERT(qpair.ctrlr == NULL);
698 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
699 	ctrlr.vcprop.cc.bits.en = 1;
700 
701 	/* I/O connect with invalid IOSQES */
702 	memset(&rsp, 0, sizeof(rsp));
703 	ctrlr.vcprop.cc.bits.iosqes = 3;
704 	sgroups[subsystem.id].io_outstanding++;
705 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
706 	rc = nvmf_ctrlr_cmd_connect(&req);
707 	poll_threads();
708 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
709 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
710 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
711 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
712 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
713 	CU_ASSERT(qpair.ctrlr == NULL);
714 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
715 	ctrlr.vcprop.cc.bits.iosqes = 6;
716 
717 	/* I/O connect with invalid IOCQES */
718 	memset(&rsp, 0, sizeof(rsp));
719 	ctrlr.vcprop.cc.bits.iocqes = 3;
720 	sgroups[subsystem.id].io_outstanding++;
721 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
722 	rc = nvmf_ctrlr_cmd_connect(&req);
723 	poll_threads();
724 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
725 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
726 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
727 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
728 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
729 	CU_ASSERT(qpair.ctrlr == NULL);
730 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
731 	ctrlr.vcprop.cc.bits.iocqes = 4;
732 
733 	/* I/O connect with too many existing qpairs */
734 	memset(&rsp, 0, sizeof(rsp));
735 	spdk_bit_array_set(ctrlr.qpair_mask, 0);
736 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
737 	spdk_bit_array_set(ctrlr.qpair_mask, 2);
738 	sgroups[subsystem.id].io_outstanding++;
739 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
740 	rc = nvmf_ctrlr_cmd_connect(&req);
741 	poll_threads();
742 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
743 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
744 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
745 	CU_ASSERT(qpair.ctrlr == NULL);
746 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
747 	spdk_bit_array_clear(ctrlr.qpair_mask, 0);
748 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
749 	spdk_bit_array_clear(ctrlr.qpair_mask, 2);
750 
751 	/* I/O connect with duplicate queue ID */
752 	memset(&rsp, 0, sizeof(rsp));
753 	memset(&qpair2, 0, sizeof(qpair2));
754 	qpair2.group = &group;
755 	qpair2.qid = 1;
756 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
757 	cmd.connect_cmd.qid = 1;
758 	sgroups[subsystem.id].io_outstanding++;
759 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
760 	rc = nvmf_ctrlr_cmd_connect(&req);
761 	poll_threads();
762 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
763 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
764 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
765 	CU_ASSERT(qpair.ctrlr == NULL);
766 	CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
767 
768 	/* Clean up globals */
769 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
770 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
771 
772 	spdk_bit_array_free(&ctrlr.qpair_mask);
773 	free(sgroups);
774 }
775 
776 static void
777 test_get_ns_id_desc_list(void)
778 {
779 	struct spdk_nvmf_subsystem subsystem;
780 	struct spdk_nvmf_qpair qpair;
781 	struct spdk_nvmf_ctrlr ctrlr;
782 	struct spdk_nvmf_request req;
783 	struct spdk_nvmf_ns *ns_ptrs[1];
784 	struct spdk_nvmf_ns ns;
785 	union nvmf_h2c_msg cmd;
786 	union nvmf_c2h_msg rsp;
787 	struct spdk_bdev bdev;
788 	uint8_t buf[4096];
789 
790 	memset(&subsystem, 0, sizeof(subsystem));
791 	ns_ptrs[0] = &ns;
792 	subsystem.ns = ns_ptrs;
793 	subsystem.max_nsid = 1;
794 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
795 
796 	memset(&ns, 0, sizeof(ns));
797 	ns.opts.nsid = 1;
798 	ns.bdev = &bdev;
799 
800 	memset(&qpair, 0, sizeof(qpair));
801 	qpair.ctrlr = &ctrlr;
802 
803 	memset(&ctrlr, 0, sizeof(ctrlr));
804 	ctrlr.subsys = &subsystem;
805 	ctrlr.vcprop.cc.bits.en = 1;
806 
807 	memset(&req, 0, sizeof(req));
808 	req.qpair = &qpair;
809 	req.cmd = &cmd;
810 	req.rsp = &rsp;
811 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
812 	req.data = buf;
813 	req.length = sizeof(buf);
814 
815 	memset(&cmd, 0, sizeof(cmd));
816 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
817 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
818 
819 	/* Invalid NSID */
820 	cmd.nvme_cmd.nsid = 0;
821 	memset(&rsp, 0, sizeof(rsp));
822 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
823 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
824 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
825 
826 	/* Valid NSID, but ns has no IDs defined */
827 	cmd.nvme_cmd.nsid = 1;
828 	memset(&rsp, 0, sizeof(rsp));
829 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
830 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
831 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
832 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
833 
834 	/* Valid NSID, only EUI64 defined */
835 	ns.opts.eui64[0] = 0x11;
836 	ns.opts.eui64[7] = 0xFF;
837 	memset(&rsp, 0, sizeof(rsp));
838 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
839 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
840 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
841 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
842 	CU_ASSERT(buf[1] == 8);
843 	CU_ASSERT(buf[4] == 0x11);
844 	CU_ASSERT(buf[11] == 0xFF);
845 	CU_ASSERT(buf[13] == 0);
846 
847 	/* Valid NSID, only NGUID defined */
848 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
849 	ns.opts.nguid[0] = 0x22;
850 	ns.opts.nguid[15] = 0xEE;
851 	memset(&rsp, 0, sizeof(rsp));
852 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
853 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
854 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
855 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
856 	CU_ASSERT(buf[1] == 16);
857 	CU_ASSERT(buf[4] == 0x22);
858 	CU_ASSERT(buf[19] == 0xEE);
859 	CU_ASSERT(buf[21] == 0);
860 
861 	/* Valid NSID, both EUI64 and NGUID defined */
862 	ns.opts.eui64[0] = 0x11;
863 	ns.opts.eui64[7] = 0xFF;
864 	ns.opts.nguid[0] = 0x22;
865 	ns.opts.nguid[15] = 0xEE;
866 	memset(&rsp, 0, sizeof(rsp));
867 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
868 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
869 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
870 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
871 	CU_ASSERT(buf[1] == 8);
872 	CU_ASSERT(buf[4] == 0x11);
873 	CU_ASSERT(buf[11] == 0xFF);
874 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
875 	CU_ASSERT(buf[13] == 16);
876 	CU_ASSERT(buf[16] == 0x22);
877 	CU_ASSERT(buf[31] == 0xEE);
878 	CU_ASSERT(buf[33] == 0);
879 
880 	/* Valid NSID, EUI64, NGUID, and UUID defined */
881 	ns.opts.eui64[0] = 0x11;
882 	ns.opts.eui64[7] = 0xFF;
883 	ns.opts.nguid[0] = 0x22;
884 	ns.opts.nguid[15] = 0xEE;
885 	ns.opts.uuid.u.raw[0] = 0x33;
886 	ns.opts.uuid.u.raw[15] = 0xDD;
887 	memset(&rsp, 0, sizeof(rsp));
888 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
889 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
890 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
891 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
892 	CU_ASSERT(buf[1] == 8);
893 	CU_ASSERT(buf[4] == 0x11);
894 	CU_ASSERT(buf[11] == 0xFF);
895 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
896 	CU_ASSERT(buf[13] == 16);
897 	CU_ASSERT(buf[16] == 0x22);
898 	CU_ASSERT(buf[31] == 0xEE);
899 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
900 	CU_ASSERT(buf[33] == 16);
901 	CU_ASSERT(buf[36] == 0x33);
902 	CU_ASSERT(buf[51] == 0xDD);
903 	CU_ASSERT(buf[53] == 0);
904 }
905 
906 static void
907 test_identify_ns(void)
908 {
909 	struct spdk_nvmf_subsystem subsystem = {};
910 	struct spdk_nvmf_transport transport = {};
911 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
912 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
913 	struct spdk_nvme_cmd cmd = {};
914 	struct spdk_nvme_cpl rsp = {};
915 	struct spdk_nvme_ns_data nsdata = {};
916 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
917 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
918 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
919 
920 	subsystem.ns = ns_arr;
921 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
922 
923 	/* Invalid NSID 0 */
924 	cmd.nsid = 0;
925 	memset(&nsdata, 0, sizeof(nsdata));
926 	memset(&rsp, 0, sizeof(rsp));
927 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
928 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
929 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
930 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
931 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
932 
933 	/* Valid NSID 1 */
934 	cmd.nsid = 1;
935 	memset(&nsdata, 0, sizeof(nsdata));
936 	memset(&rsp, 0, sizeof(rsp));
937 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
938 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
939 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
940 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
941 	CU_ASSERT(nsdata.nsze == 1234);
942 
943 	/* Valid but inactive NSID 2 */
944 	cmd.nsid = 2;
945 	memset(&nsdata, 0, sizeof(nsdata));
946 	memset(&rsp, 0, sizeof(rsp));
947 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
948 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
949 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
950 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
951 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
952 
953 	/* Valid NSID 3 */
954 	cmd.nsid = 3;
955 	memset(&nsdata, 0, sizeof(nsdata));
956 	memset(&rsp, 0, sizeof(rsp));
957 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
958 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
959 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
960 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
961 	CU_ASSERT(nsdata.nsze == 5678);
962 
963 	/* Invalid NSID 4 */
964 	cmd.nsid = 4;
965 	memset(&nsdata, 0, sizeof(nsdata));
966 	memset(&rsp, 0, sizeof(rsp));
967 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
968 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
969 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
970 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
971 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
972 
973 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
974 	cmd.nsid = 0xFFFFFFFF;
975 	memset(&nsdata, 0, sizeof(nsdata));
976 	memset(&rsp, 0, sizeof(rsp));
977 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
978 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
979 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
980 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
981 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
982 }
983 
984 static void
985 test_set_get_features(void)
986 {
987 	struct spdk_nvmf_subsystem subsystem = {};
988 	struct spdk_nvmf_qpair admin_qpair = {};
989 	struct spdk_nvmf_subsystem_listener listener = {};
990 	struct spdk_nvmf_ctrlr ctrlr = {
991 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
992 	};
993 	union nvmf_h2c_msg cmd = {};
994 	union nvmf_c2h_msg rsp = {};
995 	struct spdk_nvmf_ns ns[3];
996 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};;
997 	struct spdk_nvmf_request req;
998 	int rc;
999 
1000 	subsystem.ns = ns_arr;
1001 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1002 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1003 	admin_qpair.ctrlr = &ctrlr;
1004 	req.qpair = &admin_qpair;
1005 	cmd.nvme_cmd.nsid = 1;
1006 	req.cmd = &cmd;
1007 	req.rsp = &rsp;
1008 
1009 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1010 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1011 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1012 	ns[0].ptpl_file = "testcfg";
1013 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1014 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1015 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1016 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1017 	CU_ASSERT(ns[0].ptpl_activated == true);
1018 
1019 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1020 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1021 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1022 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1023 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1024 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1025 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1026 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1027 
1028 
1029 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1030 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1031 	cmd.nvme_cmd.cdw11 = 0x42;
1032 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1033 
1034 	rc = nvmf_ctrlr_get_features(&req);
1035 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1036 
1037 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1038 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1039 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1040 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1041 
1042 	rc = nvmf_ctrlr_get_features(&req);
1043 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1044 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1045 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1046 
1047 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1048 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1049 	cmd.nvme_cmd.cdw11 = 0x42;
1050 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1051 
1052 	rc = nvmf_ctrlr_set_features(&req);
1053 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1054 
1055 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1056 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1057 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1058 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1059 
1060 	rc = nvmf_ctrlr_set_features(&req);
1061 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1062 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1063 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1064 
1065 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1066 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1067 	cmd.nvme_cmd.cdw11 = 0x42;
1068 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1069 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1070 
1071 	rc = nvmf_ctrlr_set_features(&req);
1072 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1073 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1074 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1075 
1076 
1077 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1078 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1079 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1080 
1081 	rc = nvmf_ctrlr_get_features(&req);
1082 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1083 
1084 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1085 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1086 	cmd.nvme_cmd.cdw11 = 0x42;
1087 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1088 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1089 
1090 	rc = nvmf_ctrlr_set_features(&req);
1091 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1092 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1093 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1094 
1095 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1096 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1097 	cmd.nvme_cmd.cdw11 = 0x42;
1098 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1099 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1100 
1101 	rc = nvmf_ctrlr_set_features(&req);
1102 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1103 }
1104 
1105 /*
1106  * Reservation Unit Test Configuration
1107  *       --------             --------    --------
1108  *      | Host A |           | Host B |  | Host C |
1109  *       --------             --------    --------
1110  *      /        \               |           |
1111  *  --------   --------       -------     -------
1112  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1113  *  --------   --------       -------     -------
1114  *    \           \              /           /
1115  *     \           \            /           /
1116  *      \           \          /           /
1117  *      --------------------------------------
1118  *     |            NAMESPACE 1               |
1119  *      --------------------------------------
1120  */
1121 
1122 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1123 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1124 
1125 static void
1126 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1127 {
1128 	/* Host A has two controllers */
1129 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1130 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1131 
1132 	/* Host B has 1 controller */
1133 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1134 
1135 	/* Host C has 1 controller */
1136 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1137 
1138 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1139 	g_ns_info.rtype = rtype;
1140 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1141 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1142 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1143 }
1144 
1145 static void
1146 test_reservation_write_exclusive(void)
1147 {
1148 	struct spdk_nvmf_request req = {};
1149 	union nvmf_h2c_msg cmd = {};
1150 	union nvmf_c2h_msg rsp = {};
1151 	int rc;
1152 
1153 	req.cmd = &cmd;
1154 	req.rsp = &rsp;
1155 
1156 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1157 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1158 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1159 
1160 	/* Test Case: Issue a Read command from Host A and Host B */
1161 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1162 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1163 	SPDK_CU_ASSERT_FATAL(rc == 0);
1164 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1165 	SPDK_CU_ASSERT_FATAL(rc == 0);
1166 
1167 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1168 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1169 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1170 	SPDK_CU_ASSERT_FATAL(rc == 0);
1171 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1172 	SPDK_CU_ASSERT_FATAL(rc < 0);
1173 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1174 
1175 	/* Test Case: Issue a Write command from Host C */
1176 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1177 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1178 	SPDK_CU_ASSERT_FATAL(rc < 0);
1179 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1180 
1181 	/* Test Case: Issue a Read command from Host B */
1182 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1183 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1184 	SPDK_CU_ASSERT_FATAL(rc == 0);
1185 
1186 	/* Unregister Host C */
1187 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1188 
1189 	/* Test Case: Read and Write commands from non-registrant Host C */
1190 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1191 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1192 	SPDK_CU_ASSERT_FATAL(rc < 0);
1193 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1194 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1195 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1196 	SPDK_CU_ASSERT_FATAL(rc == 0);
1197 }
1198 
1199 static void
1200 test_reservation_exclusive_access(void)
1201 {
1202 	struct spdk_nvmf_request req = {};
1203 	union nvmf_h2c_msg cmd = {};
1204 	union nvmf_c2h_msg rsp = {};
1205 	int rc;
1206 
1207 	req.cmd = &cmd;
1208 	req.rsp = &rsp;
1209 
1210 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1211 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1212 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1213 
1214 	/* Test Case: Issue a Read command from Host B */
1215 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1216 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1217 	SPDK_CU_ASSERT_FATAL(rc < 0);
1218 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1219 
1220 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1221 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1222 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1223 	SPDK_CU_ASSERT_FATAL(rc == 0);
1224 }
1225 
1226 static void
1227 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1228 {
1229 	struct spdk_nvmf_request req = {};
1230 	union nvmf_h2c_msg cmd = {};
1231 	union nvmf_c2h_msg rsp = {};
1232 	int rc;
1233 
1234 	req.cmd = &cmd;
1235 	req.rsp = &rsp;
1236 
1237 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1238 	ut_reservation_init(rtype);
1239 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1240 
1241 	/* Test Case: Issue a Read command from Host A and Host C */
1242 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1243 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1244 	SPDK_CU_ASSERT_FATAL(rc == 0);
1245 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1246 	SPDK_CU_ASSERT_FATAL(rc == 0);
1247 
1248 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1249 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1250 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1251 	SPDK_CU_ASSERT_FATAL(rc == 0);
1252 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1253 	SPDK_CU_ASSERT_FATAL(rc == 0);
1254 
1255 	/* Unregister Host C */
1256 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1257 
1258 	/* Test Case: Read and Write commands from non-registrant Host C */
1259 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1260 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1261 	SPDK_CU_ASSERT_FATAL(rc == 0);
1262 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1263 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1264 	SPDK_CU_ASSERT_FATAL(rc < 0);
1265 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1266 }
1267 
1268 static void
1269 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1270 {
1271 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1272 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1273 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1274 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1275 }
1276 
1277 static void
1278 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1279 {
1280 	struct spdk_nvmf_request req = {};
1281 	union nvmf_h2c_msg cmd = {};
1282 	union nvmf_c2h_msg rsp = {};
1283 	int rc;
1284 
1285 	req.cmd = &cmd;
1286 	req.rsp = &rsp;
1287 
1288 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1289 	ut_reservation_init(rtype);
1290 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1291 
1292 	/* Test Case: Issue a Write command from Host B */
1293 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1294 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1295 	SPDK_CU_ASSERT_FATAL(rc == 0);
1296 
1297 	/* Unregister Host B */
1298 	memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
1299 
1300 	/* Test Case: Issue a Read command from Host B */
1301 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1302 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1303 	SPDK_CU_ASSERT_FATAL(rc < 0);
1304 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1305 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1306 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1307 	SPDK_CU_ASSERT_FATAL(rc < 0);
1308 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1309 }
1310 
1311 static void
1312 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1313 {
1314 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1315 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1316 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1317 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1318 }
1319 
1320 static void
1321 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1322 {
1323 	STAILQ_INIT(&ctrlr->async_events);
1324 }
1325 
1326 static void
1327 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1328 {
1329 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1330 
1331 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1332 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1333 		free(event);
1334 	}
1335 }
1336 
1337 static int
1338 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1339 {
1340 	int num = 0;
1341 	struct spdk_nvmf_async_event_completion *event;
1342 
1343 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1344 		num++;
1345 	}
1346 	return num;
1347 }
1348 
1349 static void
1350 test_reservation_notification_log_page(void)
1351 {
1352 	struct spdk_nvmf_ctrlr ctrlr;
1353 	struct spdk_nvmf_qpair qpair;
1354 	struct spdk_nvmf_ns ns;
1355 	struct spdk_nvmf_request req;
1356 	union nvmf_h2c_msg cmd = {};
1357 	union nvmf_c2h_msg rsp = {};
1358 	union spdk_nvme_async_event_completion event = {};
1359 	struct spdk_nvme_reservation_notification_log logs[3];
1360 
1361 	memset(&ctrlr, 0, sizeof(ctrlr));
1362 	ctrlr.thread = spdk_get_thread();
1363 	TAILQ_INIT(&ctrlr.log_head);
1364 	init_pending_async_events(&ctrlr);
1365 	ns.nsid = 1;
1366 
1367 	/* Test Case: Mask all the reservation notifications */
1368 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1369 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1370 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1371 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1372 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1373 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1374 					  SPDK_NVME_RESERVATION_RELEASED);
1375 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1376 					  SPDK_NVME_RESERVATION_PREEMPTED);
1377 	poll_threads();
1378 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1379 
1380 	/* Test Case: Unmask all the reservation notifications,
1381 	 * 3 log pages are generated, and AER was triggered.
1382 	 */
1383 	ns.mask = 0;
1384 	ctrlr.num_avail_log_pages = 0;
1385 	req.cmd = &cmd;
1386 	req.rsp = &rsp;
1387 	ctrlr.aer_req[0] = &req;
1388 	ctrlr.nr_aer_reqs = 1;
1389 	req.qpair = &qpair;
1390 	TAILQ_INIT(&qpair.outstanding);
1391 	qpair.ctrlr = NULL;
1392 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1393 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1394 
1395 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1396 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1397 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1398 					  SPDK_NVME_RESERVATION_RELEASED);
1399 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1400 					  SPDK_NVME_RESERVATION_PREEMPTED);
1401 	poll_threads();
1402 	event.raw = rsp.nvme_cpl.cdw0;
1403 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1404 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1405 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1406 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1407 
1408 	/* Test Case: Get Log Page to clear the log pages */
1409 	nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs), 0);
1410 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1411 
1412 	cleanup_pending_async_events(&ctrlr);
1413 }
1414 
1415 static void
1416 test_get_dif_ctx(void)
1417 {
1418 	struct spdk_nvmf_subsystem subsystem = {};
1419 	struct spdk_nvmf_request req = {};
1420 	struct spdk_nvmf_qpair qpair = {};
1421 	struct spdk_nvmf_ctrlr ctrlr = {};
1422 	struct spdk_nvmf_ns ns = {};
1423 	struct spdk_nvmf_ns *_ns = NULL;
1424 	struct spdk_bdev bdev = {};
1425 	union nvmf_h2c_msg cmd = {};
1426 	struct spdk_dif_ctx dif_ctx = {};
1427 	bool ret;
1428 
1429 	ctrlr.subsys = &subsystem;
1430 
1431 	qpair.ctrlr = &ctrlr;
1432 
1433 	req.qpair = &qpair;
1434 	req.cmd = &cmd;
1435 
1436 	ns.bdev = &bdev;
1437 
1438 	ctrlr.dif_insert_or_strip = false;
1439 
1440 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1441 	CU_ASSERT(ret == false);
1442 
1443 	ctrlr.dif_insert_or_strip = true;
1444 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1445 
1446 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1447 	CU_ASSERT(ret == false);
1448 
1449 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1450 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1451 
1452 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1453 	CU_ASSERT(ret == false);
1454 
1455 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1456 
1457 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1458 	CU_ASSERT(ret == false);
1459 
1460 	qpair.qid = 1;
1461 
1462 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1463 	CU_ASSERT(ret == false);
1464 
1465 	cmd.nvme_cmd.nsid = 1;
1466 
1467 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1468 	CU_ASSERT(ret == false);
1469 
1470 	subsystem.max_nsid = 1;
1471 	subsystem.ns = &_ns;
1472 	subsystem.ns[0] = &ns;
1473 
1474 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1475 	CU_ASSERT(ret == false);
1476 
1477 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1478 
1479 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1480 	CU_ASSERT(ret == true);
1481 }
1482 
1483 static void
1484 test_identify_ctrlr(void)
1485 {
1486 	struct spdk_nvmf_subsystem subsystem = {
1487 		.subtype = SPDK_NVMF_SUBTYPE_NVME
1488 	};
1489 	struct spdk_nvmf_transport_ops tops = {};
1490 	struct spdk_nvmf_transport transport = {
1491 		.ops = &tops,
1492 		.opts = {
1493 			.in_capsule_data_size = 4096,
1494 		},
1495 	};
1496 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1497 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1498 	struct spdk_nvme_ctrlr_data cdata = {};
1499 	uint32_t expected_ioccsz;
1500 
1501 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1502 
1503 	/* Check ioccsz, TCP transport */
1504 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1505 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1506 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1507 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1508 
1509 	/* Check ioccsz, RDMA transport */
1510 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1511 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1512 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1513 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1514 
1515 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1516 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1517 	ctrlr.dif_insert_or_strip = true;
1518 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1519 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1520 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1521 }
1522 
1523 static int
1524 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1525 {
1526 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1527 
1528 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1529 };
1530 
1531 static void
1532 test_custom_admin_cmd(void)
1533 {
1534 	struct spdk_nvmf_subsystem subsystem;
1535 	struct spdk_nvmf_qpair qpair;
1536 	struct spdk_nvmf_ctrlr ctrlr;
1537 	struct spdk_nvmf_request req;
1538 	struct spdk_nvmf_ns *ns_ptrs[1];
1539 	struct spdk_nvmf_ns ns;
1540 	union nvmf_h2c_msg cmd;
1541 	union nvmf_c2h_msg rsp;
1542 	struct spdk_bdev bdev;
1543 	uint8_t buf[4096];
1544 	int rc;
1545 
1546 	memset(&subsystem, 0, sizeof(subsystem));
1547 	ns_ptrs[0] = &ns;
1548 	subsystem.ns = ns_ptrs;
1549 	subsystem.max_nsid = 1;
1550 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1551 
1552 	memset(&ns, 0, sizeof(ns));
1553 	ns.opts.nsid = 1;
1554 	ns.bdev = &bdev;
1555 
1556 	memset(&qpair, 0, sizeof(qpair));
1557 	qpair.ctrlr = &ctrlr;
1558 
1559 	memset(&ctrlr, 0, sizeof(ctrlr));
1560 	ctrlr.subsys = &subsystem;
1561 	ctrlr.vcprop.cc.bits.en = 1;
1562 
1563 	memset(&req, 0, sizeof(req));
1564 	req.qpair = &qpair;
1565 	req.cmd = &cmd;
1566 	req.rsp = &rsp;
1567 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1568 	req.data = buf;
1569 	req.length = sizeof(buf);
1570 
1571 	memset(&cmd, 0, sizeof(cmd));
1572 	cmd.nvme_cmd.opc = 0xc1;
1573 	cmd.nvme_cmd.nsid = 0;
1574 	memset(&rsp, 0, sizeof(rsp));
1575 
1576 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1577 
1578 	/* Ensure that our hdlr is being called */
1579 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1580 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1581 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1582 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1583 }
1584 
1585 static void
1586 test_fused_compare_and_write(void)
1587 {
1588 	struct spdk_nvmf_request req = {};
1589 	struct spdk_nvmf_qpair qpair = {};
1590 	struct spdk_nvme_cmd cmd = {};
1591 	union nvmf_c2h_msg rsp = {};
1592 	struct spdk_nvmf_ctrlr ctrlr = {};
1593 	struct spdk_nvmf_subsystem subsystem = {};
1594 	struct spdk_nvmf_ns ns = {};
1595 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1596 	struct spdk_nvmf_subsystem_listener listener = {};
1597 	struct spdk_bdev bdev = {};
1598 
1599 	struct spdk_nvmf_poll_group group = {};
1600 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1601 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1602 
1603 	ns.bdev = &bdev;
1604 
1605 	subsystem.id = 0;
1606 	subsystem.max_nsid = 1;
1607 	subsys_ns[0] = &ns;
1608 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1609 
1610 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1611 
1612 	/* Enable controller */
1613 	ctrlr.vcprop.cc.bits.en = 1;
1614 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1615 	ctrlr.listener = &listener;
1616 
1617 	group.num_sgroups = 1;
1618 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1619 	sgroups.num_ns = 1;
1620 	sgroups.ns_info = &ns_info;
1621 	TAILQ_INIT(&sgroups.queued);
1622 	group.sgroups = &sgroups;
1623 	TAILQ_INIT(&qpair.outstanding);
1624 
1625 	qpair.ctrlr = &ctrlr;
1626 	qpair.group = &group;
1627 	qpair.qid = 1;
1628 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1629 
1630 	cmd.nsid = 1;
1631 
1632 	req.qpair = &qpair;
1633 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1634 	req.rsp = &rsp;
1635 
1636 	/* SUCCESS/SUCCESS */
1637 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1638 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1639 
1640 	spdk_nvmf_request_exec(&req);
1641 	CU_ASSERT(qpair.first_fused_req != NULL);
1642 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1643 
1644 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1645 	cmd.opc = SPDK_NVME_OPC_WRITE;
1646 
1647 	spdk_nvmf_request_exec(&req);
1648 	CU_ASSERT(qpair.first_fused_req == NULL);
1649 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1650 
1651 	/* Wrong sequence */
1652 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1653 	cmd.opc = SPDK_NVME_OPC_WRITE;
1654 
1655 	spdk_nvmf_request_exec(&req);
1656 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1657 	CU_ASSERT(qpair.first_fused_req == NULL);
1658 
1659 	/* Write as FUSE_FIRST (Wrong op code) */
1660 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1661 	cmd.opc = SPDK_NVME_OPC_WRITE;
1662 
1663 	spdk_nvmf_request_exec(&req);
1664 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1665 	CU_ASSERT(qpair.first_fused_req == NULL);
1666 
1667 	/* Compare as FUSE_SECOND (Wrong op code) */
1668 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1669 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1670 
1671 	spdk_nvmf_request_exec(&req);
1672 	CU_ASSERT(qpair.first_fused_req != NULL);
1673 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1674 
1675 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1676 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1677 
1678 	spdk_nvmf_request_exec(&req);
1679 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1680 	CU_ASSERT(qpair.first_fused_req == NULL);
1681 }
1682 
1683 static void
1684 test_multi_async_event_reqs(void)
1685 {
1686 	struct spdk_nvmf_subsystem subsystem = {};
1687 	struct spdk_nvmf_qpair qpair = {};
1688 	struct spdk_nvmf_ctrlr ctrlr = {};
1689 	struct spdk_nvmf_request req[5] = {};
1690 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1691 	struct spdk_nvmf_ns ns = {};
1692 	union nvmf_h2c_msg cmd[5] = {};
1693 	union nvmf_c2h_msg rsp[5] = {};
1694 
1695 	struct spdk_nvmf_poll_group group = {};
1696 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1697 
1698 	int i;
1699 
1700 	ns_ptrs[0] = &ns;
1701 	subsystem.ns = ns_ptrs;
1702 	subsystem.max_nsid = 1;
1703 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1704 
1705 	ns.opts.nsid = 1;
1706 	group.sgroups = &sgroups;
1707 
1708 	qpair.ctrlr = &ctrlr;
1709 	qpair.group = &group;
1710 	TAILQ_INIT(&qpair.outstanding);
1711 
1712 	ctrlr.subsys = &subsystem;
1713 	ctrlr.vcprop.cc.bits.en = 1;
1714 
1715 	for (i = 0; i < 5; i++) {
1716 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1717 		cmd[i].nvme_cmd.nsid = 1;
1718 		cmd[i].nvme_cmd.cid = i;
1719 
1720 		req[i].qpair = &qpair;
1721 		req[i].cmd = &cmd[i];
1722 		req[i].rsp = &rsp[i];
1723 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1724 	}
1725 
1726 	/* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
1727 	sgroups.io_outstanding = NVMF_MAX_ASYNC_EVENTS;
1728 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1729 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1730 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
1731 	}
1732 	CU_ASSERT(sgroups.io_outstanding == 0);
1733 
1734 	/* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
1735 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1736 	CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
1737 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
1738 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1739 
1740 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
1741 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
1742 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1743 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1744 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
1745 
1746 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
1747 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1748 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1749 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
1750 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
1751 
1752 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
1753 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
1754 }
1755 
1756 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
1757 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
1758 static void
1759 test_get_ana_log_page(void)
1760 {
1761 	struct spdk_nvmf_subsystem subsystem = {};
1762 	struct spdk_nvmf_ctrlr ctrlr = {};
1763 	struct spdk_nvmf_subsystem_listener listener = {};
1764 	struct spdk_nvmf_ns ns[3];
1765 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
1766 	uint64_t offset;
1767 	uint32_t length;
1768 	int i;
1769 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1770 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1771 	struct spdk_nvme_ana_page *ana_hdr;
1772 	char _ana_desc[UT_ANA_DESC_SIZE];
1773 	struct spdk_nvme_ana_group_descriptor *ana_desc;
1774 
1775 	subsystem.ns = ns_arr;
1776 	subsystem.max_nsid = 3;
1777 	ctrlr.subsys = &subsystem;
1778 	ctrlr.listener = &listener;
1779 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1780 
1781 	for (i = 0; i < 3; i++) {
1782 		ns_arr[i]->nsid = i + 1;
1783 	}
1784 
1785 	/* create expected page */
1786 	ana_hdr = (void *)&expected_page[0];
1787 	ana_hdr->num_ana_group_desc = 3;
1788 	ana_hdr->change_count = 0;
1789 
1790 	/* descriptor may be unaligned. So create data and then copy it to the location. */
1791 	ana_desc = (void *)_ana_desc;
1792 	offset = sizeof(struct spdk_nvme_ana_page);
1793 
1794 	for (i = 0; i < 3; i++) {
1795 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
1796 		ana_desc->ana_group_id = ns_arr[i]->nsid;
1797 		ana_desc->num_of_nsid = 1;
1798 		ana_desc->change_count = 0;
1799 		ana_desc->ana_state = ctrlr.listener->ana_state;
1800 		ana_desc->nsid[0] = ns_arr[i]->nsid;
1801 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
1802 		offset += UT_ANA_DESC_SIZE;
1803 	}
1804 
1805 	/* read entire actual log page */
1806 	offset = 0;
1807 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
1808 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
1809 		nvmf_get_ana_log_page(&ctrlr, &actual_page[offset], offset, length, 0);
1810 		offset += length;
1811 	}
1812 
1813 	/* compare expected page and actual page */
1814 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1815 }
1816 
1817 static void
1818 test_multi_async_events(void)
1819 {
1820 	struct spdk_nvmf_subsystem subsystem = {};
1821 	struct spdk_nvmf_qpair qpair = {};
1822 	struct spdk_nvmf_ctrlr ctrlr = {};
1823 	struct spdk_nvmf_request req[4] = {};
1824 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1825 	struct spdk_nvmf_ns ns = {};
1826 	union nvmf_h2c_msg cmd[4] = {};
1827 	union nvmf_c2h_msg rsp[4] = {};
1828 	union spdk_nvme_async_event_completion event = {};
1829 	struct spdk_nvmf_poll_group group = {};
1830 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1831 	int i;
1832 
1833 	ns_ptrs[0] = &ns;
1834 	subsystem.ns = ns_ptrs;
1835 	subsystem.max_nsid = 1;
1836 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1837 
1838 	ns.opts.nsid = 1;
1839 	group.sgroups = &sgroups;
1840 
1841 	qpair.ctrlr = &ctrlr;
1842 	qpair.group = &group;
1843 	TAILQ_INIT(&qpair.outstanding);
1844 
1845 	ctrlr.subsys = &subsystem;
1846 	ctrlr.vcprop.cc.bits.en = 1;
1847 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
1848 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
1849 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
1850 	init_pending_async_events(&ctrlr);
1851 
1852 	/* Target queue pending events when there is no outstanding AER request */
1853 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1854 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
1855 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
1856 
1857 	for (i = 0; i < 4; i++) {
1858 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1859 		cmd[i].nvme_cmd.nsid = 1;
1860 		cmd[i].nvme_cmd.cid = i;
1861 
1862 		req[i].qpair = &qpair;
1863 		req[i].cmd = &cmd[i];
1864 		req[i].rsp = &rsp[i];
1865 
1866 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1867 
1868 		sgroups.io_outstanding = 1;
1869 		if (i < 3) {
1870 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1871 			CU_ASSERT(sgroups.io_outstanding == 0);
1872 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
1873 		} else {
1874 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1875 			CU_ASSERT(sgroups.io_outstanding == 0);
1876 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
1877 		}
1878 	}
1879 
1880 	event.raw = rsp[0].nvme_cpl.cdw0;
1881 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
1882 	event.raw = rsp[1].nvme_cpl.cdw0;
1883 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
1884 	event.raw = rsp[2].nvme_cpl.cdw0;
1885 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
1886 
1887 	cleanup_pending_async_events(&ctrlr);
1888 }
1889 
1890 static void
1891 test_rae(void)
1892 {
1893 	struct spdk_nvmf_subsystem subsystem = {};
1894 	struct spdk_nvmf_qpair qpair = {};
1895 	struct spdk_nvmf_ctrlr ctrlr = {};
1896 	struct spdk_nvmf_request req[3] = {};
1897 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1898 	struct spdk_nvmf_ns ns = {};
1899 	union nvmf_h2c_msg cmd[3] = {};
1900 	union nvmf_c2h_msg rsp[3] = {};
1901 	union spdk_nvme_async_event_completion event = {};
1902 	struct spdk_nvmf_poll_group group = {};
1903 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1904 	int i;
1905 	char data[4096];
1906 
1907 	ns_ptrs[0] = &ns;
1908 	subsystem.ns = ns_ptrs;
1909 	subsystem.max_nsid = 1;
1910 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1911 
1912 	ns.opts.nsid = 1;
1913 	group.sgroups = &sgroups;
1914 
1915 	qpair.ctrlr = &ctrlr;
1916 	qpair.group = &group;
1917 	TAILQ_INIT(&qpair.outstanding);
1918 
1919 	ctrlr.subsys = &subsystem;
1920 	ctrlr.vcprop.cc.bits.en = 1;
1921 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
1922 	init_pending_async_events(&ctrlr);
1923 
1924 	/* Target queue pending events when there is no outstanding AER request */
1925 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1926 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1927 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1928 	/* only one event will be queued before RAE is clear */
1929 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
1930 
1931 	req[0].qpair = &qpair;
1932 	req[0].cmd = &cmd[0];
1933 	req[0].rsp = &rsp[0];
1934 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1935 	cmd[0].nvme_cmd.nsid = 1;
1936 	cmd[0].nvme_cmd.cid = 0;
1937 
1938 	for (i = 1; i < 3; i++) {
1939 		req[i].qpair = &qpair;
1940 		req[i].cmd = &cmd[i];
1941 		req[i].rsp = &rsp[i];
1942 		req[i].data = &data;
1943 		req[i].length = sizeof(data);
1944 
1945 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
1946 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
1947 			SPDK_NVME_LOG_CHANGED_NS_LIST;
1948 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
1949 			spdk_nvme_bytes_to_numd(req[i].length);
1950 		cmd[i].nvme_cmd.cid = i;
1951 	}
1952 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
1953 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
1954 
1955 	/* consume the pending event */
1956 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
1957 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1958 	event.raw = rsp[0].nvme_cpl.cdw0;
1959 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
1960 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
1961 
1962 	/* get log with RAE set */
1963 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1964 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1965 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1966 
1967 	/* will not generate new event until RAE is clear */
1968 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1969 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
1970 
1971 	/* get log with RAE clear */
1972 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1973 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1974 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1975 
1976 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1977 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
1978 
1979 	cleanup_pending_async_events(&ctrlr);
1980 }
1981 
1982 int main(int argc, char **argv)
1983 {
1984 	CU_pSuite	suite = NULL;
1985 	unsigned int	num_failures;
1986 
1987 	CU_set_error_action(CUEA_ABORT);
1988 	CU_initialize_registry();
1989 
1990 	suite = CU_add_suite("nvmf", NULL, NULL);
1991 	CU_ADD_TEST(suite, test_get_log_page);
1992 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
1993 	CU_ADD_TEST(suite, test_connect);
1994 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
1995 	CU_ADD_TEST(suite, test_identify_ns);
1996 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
1997 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
1998 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
1999 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
2000 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
2001 	CU_ADD_TEST(suite, test_get_dif_ctx);
2002 	CU_ADD_TEST(suite, test_set_get_features);
2003 	CU_ADD_TEST(suite, test_identify_ctrlr);
2004 	CU_ADD_TEST(suite, test_custom_admin_cmd);
2005 	CU_ADD_TEST(suite, test_fused_compare_and_write);
2006 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
2007 	CU_ADD_TEST(suite, test_get_ana_log_page);
2008 	CU_ADD_TEST(suite, test_multi_async_events);
2009 	CU_ADD_TEST(suite, test_rae);
2010 
2011 	allocate_threads(1);
2012 	set_thread(0);
2013 
2014 	CU_basic_set_mode(CU_BRM_VERBOSE);
2015 	CU_basic_run_tests();
2016 	num_failures = CU_get_number_of_failures();
2017 	CU_cleanup_registry();
2018 
2019 	free_threads();
2020 
2021 	return num_failures;
2022 }
2023