xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision 8e9bf1815df2455d994df622f5b43078193b4a84)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "spdk_cunit.h"
38 #include "spdk_internal/mock.h"
39 #include "thread/thread_internal.h"
40 
41 #include "common/lib/ut_multithread.c"
42 #include "nvmf/ctrlr.c"
43 
44 SPDK_LOG_REGISTER_COMPONENT(nvmf)
45 
46 struct spdk_bdev {
47 	int ut_mock;
48 	uint64_t blockcnt;
49 	uint32_t blocklen;
50 };
51 
52 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
53 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
54 
55 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
56 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
57 		0x8877665544332211UL;
58 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
59 
60 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
61 	    struct spdk_nvmf_subsystem *,
62 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
63 	    NULL);
64 
65 DEFINE_STUB(spdk_nvmf_poll_group_create,
66 	    struct spdk_nvmf_poll_group *,
67 	    (struct spdk_nvmf_tgt *tgt),
68 	    NULL);
69 
70 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
71 	    const char *,
72 	    (const struct spdk_nvmf_subsystem *subsystem),
73 	    subsystem_default_sn);
74 
75 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
76 	    const char *,
77 	    (const struct spdk_nvmf_subsystem *subsystem),
78 	    subsystem_default_mn);
79 
80 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
81 	    bool,
82 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
83 	    true);
84 
85 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
86 	    int,
87 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
88 	    0);
89 
90 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
91 	    struct spdk_nvmf_ctrlr *,
92 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
93 	    NULL);
94 
95 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
96 	    bool,
97 	    (struct spdk_nvmf_ctrlr *ctrlr),
98 	    false);
99 
100 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
101 	    bool,
102 	    (struct spdk_nvmf_ctrlr *ctrlr),
103 	    false);
104 
105 DEFINE_STUB_V(nvmf_get_discovery_log_page,
106 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
107 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
108 
109 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
110 	    int,
111 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
112 	    0);
113 
114 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
115 	    bool,
116 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
117 	    true);
118 
119 DEFINE_STUB(nvmf_subsystem_find_listener,
120 	    struct spdk_nvmf_subsystem_listener *,
121 	    (struct spdk_nvmf_subsystem *subsystem,
122 	     const struct spdk_nvme_transport_id *trid),
123 	    (void *)0x1);
124 
125 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
126 	    int,
127 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
128 	     struct spdk_nvmf_request *req),
129 	    0);
130 
131 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
132 	    int,
133 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
134 	     struct spdk_nvmf_request *req),
135 	    0);
136 
137 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
138 	    int,
139 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
140 	     struct spdk_nvmf_request *req),
141 	    0);
142 
143 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
144 	    int,
145 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
146 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
147 	    0);
148 
149 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
150 	    int,
151 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
152 	     struct spdk_nvmf_request *req),
153 	    0);
154 
155 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
156 	    int,
157 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
158 	     struct spdk_nvmf_request *req),
159 	    0);
160 
161 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
162 	    int,
163 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
164 	     struct spdk_nvmf_request *req),
165 	    0);
166 
167 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
168 	    int,
169 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
170 	     struct spdk_nvmf_request *req),
171 	    0);
172 
173 DEFINE_STUB(nvmf_transport_req_complete,
174 	    int,
175 	    (struct spdk_nvmf_request *req),
176 	    0);
177 
178 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
179 
180 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
181 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
182 	     struct spdk_dif_ctx *dif_ctx),
183 	    true);
184 
185 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
186 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
187 
188 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
189 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
190 
191 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
192 		struct spdk_nvmf_ctrlr *ctrlr));
193 
194 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
195 	    int,
196 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
197 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
198 	    0);
199 
200 DEFINE_STUB(nvmf_transport_req_free,
201 	    int,
202 	    (struct spdk_nvmf_request *req),
203 	    0);
204 
205 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
206 	    int,
207 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
208 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
209 	    0);
210 
211 int
212 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
213 {
214 	return 0;
215 }
216 
217 void
218 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
219 			    bool dif_insert_or_strip)
220 {
221 	uint64_t num_blocks;
222 
223 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
224 	num_blocks = ns->bdev->blockcnt;
225 	nsdata->nsze = num_blocks;
226 	nsdata->ncap = num_blocks;
227 	nsdata->nuse = num_blocks;
228 	nsdata->nlbaf = 0;
229 	nsdata->flbas.format = 0;
230 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
231 }
232 
233 struct spdk_nvmf_ns *
234 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
235 {
236 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
237 	return subsystem->ns[0];
238 }
239 
240 struct spdk_nvmf_ns *
241 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
242 				struct spdk_nvmf_ns *prev_ns)
243 {
244 	uint32_t nsid;
245 
246 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
247 	nsid = prev_ns->nsid;
248 
249 	if (nsid >= subsystem->max_nsid) {
250 		return NULL;
251 	}
252 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
253 		if (subsystem->ns[nsid - 1]) {
254 			return subsystem->ns[nsid - 1];
255 		}
256 	}
257 	return NULL;
258 }
259 
260 bool
261 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
262 {
263 	return true;
264 }
265 
266 int
267 nvmf_bdev_ctrlr_start_zcopy(struct spdk_bdev *bdev,
268 			    struct spdk_bdev_desc *desc,
269 			    struct spdk_io_channel *ch,
270 			    struct spdk_nvmf_request *req)
271 {
272 	uint64_t start_lba;
273 	uint64_t num_blocks;
274 
275 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
276 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
277 
278 	if ((start_lba + num_blocks) > bdev->blockcnt) {
279 		return -ENXIO;
280 	}
281 
282 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
283 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
284 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
285 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
286 	} else {
287 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
288 	}
289 
290 
291 	spdk_nvmf_request_complete(req);
292 	return 0;
293 }
294 
295 int
296 nvmf_bdev_ctrlr_end_zcopy(struct spdk_nvmf_request *req, bool commit)
297 {
298 	req->zcopy_bdev_io = NULL;
299 	spdk_nvmf_request_complete(req);
300 	return 0;
301 }
302 
303 static void
304 test_get_log_page(void)
305 {
306 	struct spdk_nvmf_subsystem subsystem = {};
307 	struct spdk_nvmf_request req = {};
308 	struct spdk_nvmf_qpair qpair = {};
309 	struct spdk_nvmf_ctrlr ctrlr = {};
310 	union nvmf_h2c_msg cmd = {};
311 	union nvmf_c2h_msg rsp = {};
312 	char data[4096];
313 
314 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
315 
316 	ctrlr.subsys = &subsystem;
317 
318 	qpair.ctrlr = &ctrlr;
319 
320 	req.qpair = &qpair;
321 	req.cmd = &cmd;
322 	req.rsp = &rsp;
323 	req.data = &data;
324 	req.length = sizeof(data);
325 
326 	/* Get Log Page - all valid */
327 	memset(&cmd, 0, sizeof(cmd));
328 	memset(&rsp, 0, sizeof(rsp));
329 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
330 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
331 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
332 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
333 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
334 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
335 
336 	/* Get Log Page with invalid log ID */
337 	memset(&cmd, 0, sizeof(cmd));
338 	memset(&rsp, 0, sizeof(rsp));
339 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
340 	cmd.nvme_cmd.cdw10 = 0;
341 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
342 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
343 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
344 
345 	/* Get Log Page with invalid offset (not dword aligned) */
346 	memset(&cmd, 0, sizeof(cmd));
347 	memset(&rsp, 0, sizeof(rsp));
348 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
349 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
350 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
351 	cmd.nvme_cmd.cdw12 = 2;
352 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
353 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
354 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
355 
356 	/* Get Log Page without data buffer */
357 	memset(&cmd, 0, sizeof(cmd));
358 	memset(&rsp, 0, sizeof(rsp));
359 	req.data = NULL;
360 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
361 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
362 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
363 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
364 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
365 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
366 	req.data = data;
367 }
368 
369 static void
370 test_process_fabrics_cmd(void)
371 {
372 	struct	spdk_nvmf_request req = {};
373 	int	ret;
374 	struct	spdk_nvmf_qpair req_qpair = {};
375 	union	nvmf_h2c_msg  req_cmd = {};
376 	union	nvmf_c2h_msg   req_rsp = {};
377 
378 	req.qpair = &req_qpair;
379 	req.cmd  = &req_cmd;
380 	req.rsp  = &req_rsp;
381 	req.qpair->ctrlr = NULL;
382 
383 	/* No ctrlr and invalid command check */
384 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
385 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
386 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
387 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
388 }
389 
390 static bool
391 nvme_status_success(const struct spdk_nvme_status *status)
392 {
393 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
394 }
395 
396 static void
397 test_connect(void)
398 {
399 	struct spdk_nvmf_fabric_connect_data connect_data;
400 	struct spdk_nvmf_poll_group group;
401 	struct spdk_nvmf_subsystem_poll_group *sgroups;
402 	struct spdk_nvmf_transport transport;
403 	struct spdk_nvmf_transport_ops tops = {};
404 	struct spdk_nvmf_subsystem subsystem;
405 	struct spdk_nvmf_request req;
406 	struct spdk_nvmf_qpair admin_qpair;
407 	struct spdk_nvmf_qpair qpair;
408 	struct spdk_nvmf_qpair qpair2;
409 	struct spdk_nvmf_ctrlr ctrlr;
410 	struct spdk_nvmf_tgt tgt;
411 	union nvmf_h2c_msg cmd;
412 	union nvmf_c2h_msg rsp;
413 	const uint8_t hostid[16] = {
414 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
415 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
416 	};
417 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
418 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
419 	int rc;
420 
421 	memset(&group, 0, sizeof(group));
422 	group.thread = spdk_get_thread();
423 
424 	memset(&ctrlr, 0, sizeof(ctrlr));
425 	ctrlr.subsys = &subsystem;
426 	ctrlr.qpair_mask = spdk_bit_array_create(3);
427 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
428 	ctrlr.vcprop.cc.bits.en = 1;
429 	ctrlr.vcprop.cc.bits.iosqes = 6;
430 	ctrlr.vcprop.cc.bits.iocqes = 4;
431 
432 	memset(&admin_qpair, 0, sizeof(admin_qpair));
433 	admin_qpair.group = &group;
434 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
435 
436 	memset(&tgt, 0, sizeof(tgt));
437 	memset(&transport, 0, sizeof(transport));
438 	transport.ops = &tops;
439 	transport.opts.max_aq_depth = 32;
440 	transport.opts.max_queue_depth = 64;
441 	transport.opts.max_qpairs_per_ctrlr = 3;
442 	transport.tgt = &tgt;
443 
444 	memset(&qpair, 0, sizeof(qpair));
445 	qpair.transport = &transport;
446 	qpair.group = &group;
447 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
448 	TAILQ_INIT(&qpair.outstanding);
449 
450 	memset(&connect_data, 0, sizeof(connect_data));
451 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
452 	connect_data.cntlid = 0xFFFF;
453 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
454 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
455 
456 	memset(&subsystem, 0, sizeof(subsystem));
457 	subsystem.thread = spdk_get_thread();
458 	subsystem.id = 1;
459 	TAILQ_INIT(&subsystem.ctrlrs);
460 	subsystem.tgt = &tgt;
461 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
462 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
463 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
464 
465 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
466 	group.sgroups = sgroups;
467 
468 	memset(&cmd, 0, sizeof(cmd));
469 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
470 	cmd.connect_cmd.cid = 1;
471 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
472 	cmd.connect_cmd.recfmt = 0;
473 	cmd.connect_cmd.qid = 0;
474 	cmd.connect_cmd.sqsize = 31;
475 	cmd.connect_cmd.cattr = 0;
476 	cmd.connect_cmd.kato = 120000;
477 
478 	memset(&req, 0, sizeof(req));
479 	req.qpair = &qpair;
480 	req.length = sizeof(connect_data);
481 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
482 	req.data = &connect_data;
483 	req.cmd = &cmd;
484 	req.rsp = &rsp;
485 
486 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
487 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
488 
489 	/* Valid admin connect command */
490 	memset(&rsp, 0, sizeof(rsp));
491 	sgroups[subsystem.id].mgmt_io_outstanding++;
492 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
493 	rc = nvmf_ctrlr_cmd_connect(&req);
494 	poll_threads();
495 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
496 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
497 	CU_ASSERT(qpair.ctrlr != NULL);
498 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
499 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
500 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
501 	free(qpair.ctrlr);
502 	qpair.ctrlr = NULL;
503 
504 	/* Valid admin connect command with kato = 0 */
505 	cmd.connect_cmd.kato = 0;
506 	memset(&rsp, 0, sizeof(rsp));
507 	sgroups[subsystem.id].mgmt_io_outstanding++;
508 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
509 	rc = nvmf_ctrlr_cmd_connect(&req);
510 	poll_threads();
511 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
512 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
513 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
514 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
515 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
516 	free(qpair.ctrlr);
517 	qpair.ctrlr = NULL;
518 	cmd.connect_cmd.kato = 120000;
519 
520 	/* Invalid data length */
521 	memset(&rsp, 0, sizeof(rsp));
522 	req.length = sizeof(connect_data) - 1;
523 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
524 	rc = nvmf_ctrlr_cmd_connect(&req);
525 	poll_threads();
526 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
527 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
528 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
529 	CU_ASSERT(qpair.ctrlr == NULL);
530 	req.length = sizeof(connect_data);
531 
532 	/* Invalid recfmt */
533 	memset(&rsp, 0, sizeof(rsp));
534 	cmd.connect_cmd.recfmt = 1234;
535 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
536 	rc = nvmf_ctrlr_cmd_connect(&req);
537 	poll_threads();
538 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
539 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
540 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
541 	CU_ASSERT(qpair.ctrlr == NULL);
542 	cmd.connect_cmd.recfmt = 0;
543 
544 	/* Subsystem not found */
545 	memset(&rsp, 0, sizeof(rsp));
546 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
547 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
548 	rc = nvmf_ctrlr_cmd_connect(&req);
549 	poll_threads();
550 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
551 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
552 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
553 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
554 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
555 	CU_ASSERT(qpair.ctrlr == NULL);
556 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
557 
558 	/* Unterminated hostnqn */
559 	memset(&rsp, 0, sizeof(rsp));
560 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
561 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
562 	rc = nvmf_ctrlr_cmd_connect(&req);
563 	poll_threads();
564 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
565 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
566 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
567 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
568 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
569 	CU_ASSERT(qpair.ctrlr == NULL);
570 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
571 
572 	/* Host not allowed */
573 	memset(&rsp, 0, sizeof(rsp));
574 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
575 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
576 	rc = nvmf_ctrlr_cmd_connect(&req);
577 	poll_threads();
578 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
579 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
580 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
581 	CU_ASSERT(qpair.ctrlr == NULL);
582 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
583 
584 	/* Invalid sqsize == 0 */
585 	memset(&rsp, 0, sizeof(rsp));
586 	cmd.connect_cmd.sqsize = 0;
587 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
588 	rc = nvmf_ctrlr_cmd_connect(&req);
589 	poll_threads();
590 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
591 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
592 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
593 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
594 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
595 	CU_ASSERT(qpair.ctrlr == NULL);
596 	cmd.connect_cmd.sqsize = 31;
597 
598 	/* Invalid admin sqsize > max_aq_depth */
599 	memset(&rsp, 0, sizeof(rsp));
600 	cmd.connect_cmd.sqsize = 32;
601 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
602 	rc = nvmf_ctrlr_cmd_connect(&req);
603 	poll_threads();
604 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
605 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
606 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
607 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
608 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
609 	CU_ASSERT(qpair.ctrlr == NULL);
610 	cmd.connect_cmd.sqsize = 31;
611 
612 	/* Invalid I/O sqsize > max_queue_depth */
613 	memset(&rsp, 0, sizeof(rsp));
614 	cmd.connect_cmd.qid = 1;
615 	cmd.connect_cmd.sqsize = 64;
616 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
617 	rc = nvmf_ctrlr_cmd_connect(&req);
618 	poll_threads();
619 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
620 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
621 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
622 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
623 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
624 	CU_ASSERT(qpair.ctrlr == NULL);
625 	cmd.connect_cmd.qid = 0;
626 	cmd.connect_cmd.sqsize = 31;
627 
628 	/* Invalid cntlid for admin queue */
629 	memset(&rsp, 0, sizeof(rsp));
630 	connect_data.cntlid = 0x1234;
631 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
632 	rc = nvmf_ctrlr_cmd_connect(&req);
633 	poll_threads();
634 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
635 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
636 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
637 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
638 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
639 	CU_ASSERT(qpair.ctrlr == NULL);
640 	connect_data.cntlid = 0xFFFF;
641 
642 	ctrlr.admin_qpair = &admin_qpair;
643 	ctrlr.subsys = &subsystem;
644 
645 	/* Valid I/O queue connect command */
646 	memset(&rsp, 0, sizeof(rsp));
647 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
648 	cmd.connect_cmd.qid = 1;
649 	cmd.connect_cmd.sqsize = 63;
650 	sgroups[subsystem.id].mgmt_io_outstanding++;
651 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
652 	rc = nvmf_ctrlr_cmd_connect(&req);
653 	poll_threads();
654 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
655 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
656 	CU_ASSERT(qpair.ctrlr == &ctrlr);
657 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
658 	qpair.ctrlr = NULL;
659 	cmd.connect_cmd.sqsize = 31;
660 
661 	/* Non-existent controller */
662 	memset(&rsp, 0, sizeof(rsp));
663 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
664 	sgroups[subsystem.id].mgmt_io_outstanding++;
665 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
666 	rc = nvmf_ctrlr_cmd_connect(&req);
667 	poll_threads();
668 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
669 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
670 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
671 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
672 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
673 	CU_ASSERT(qpair.ctrlr == NULL);
674 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
675 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
676 
677 	/* I/O connect to discovery controller */
678 	memset(&rsp, 0, sizeof(rsp));
679 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
680 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
681 	sgroups[subsystem.id].mgmt_io_outstanding++;
682 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
683 	rc = nvmf_ctrlr_cmd_connect(&req);
684 	poll_threads();
685 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
686 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
687 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
688 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
689 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
690 	CU_ASSERT(qpair.ctrlr == NULL);
691 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
692 
693 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
694 	cmd.connect_cmd.qid = 0;
695 	cmd.connect_cmd.kato = 120000;
696 	memset(&rsp, 0, sizeof(rsp));
697 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
698 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
699 	sgroups[subsystem.id].mgmt_io_outstanding++;
700 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
701 	rc = nvmf_ctrlr_cmd_connect(&req);
702 	poll_threads();
703 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
704 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
705 	CU_ASSERT(qpair.ctrlr != NULL);
706 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
707 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
708 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
709 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
710 	free(qpair.ctrlr);
711 	qpair.ctrlr = NULL;
712 
713 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
714 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
715 	 */
716 	cmd.connect_cmd.kato = 0;
717 	memset(&rsp, 0, sizeof(rsp));
718 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
719 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
720 	sgroups[subsystem.id].mgmt_io_outstanding++;
721 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
722 	rc = nvmf_ctrlr_cmd_connect(&req);
723 	poll_threads();
724 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
725 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
726 	CU_ASSERT(qpair.ctrlr != NULL);
727 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
728 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
729 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
730 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
731 	free(qpair.ctrlr);
732 	qpair.ctrlr = NULL;
733 	cmd.connect_cmd.qid = 1;
734 	cmd.connect_cmd.kato = 120000;
735 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
736 
737 	/* I/O connect to disabled controller */
738 	memset(&rsp, 0, sizeof(rsp));
739 	ctrlr.vcprop.cc.bits.en = 0;
740 	sgroups[subsystem.id].mgmt_io_outstanding++;
741 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
742 	rc = nvmf_ctrlr_cmd_connect(&req);
743 	poll_threads();
744 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
745 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
746 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
747 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
748 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
749 	CU_ASSERT(qpair.ctrlr == NULL);
750 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
751 	ctrlr.vcprop.cc.bits.en = 1;
752 
753 	/* I/O connect with invalid IOSQES */
754 	memset(&rsp, 0, sizeof(rsp));
755 	ctrlr.vcprop.cc.bits.iosqes = 3;
756 	sgroups[subsystem.id].mgmt_io_outstanding++;
757 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
758 	rc = nvmf_ctrlr_cmd_connect(&req);
759 	poll_threads();
760 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
761 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
762 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
763 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
764 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
765 	CU_ASSERT(qpair.ctrlr == NULL);
766 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
767 	ctrlr.vcprop.cc.bits.iosqes = 6;
768 
769 	/* I/O connect with invalid IOCQES */
770 	memset(&rsp, 0, sizeof(rsp));
771 	ctrlr.vcprop.cc.bits.iocqes = 3;
772 	sgroups[subsystem.id].mgmt_io_outstanding++;
773 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
774 	rc = nvmf_ctrlr_cmd_connect(&req);
775 	poll_threads();
776 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
777 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
778 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
779 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
780 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
781 	CU_ASSERT(qpair.ctrlr == NULL);
782 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
783 	ctrlr.vcprop.cc.bits.iocqes = 4;
784 
785 	/* I/O connect with too many existing qpairs */
786 	memset(&rsp, 0, sizeof(rsp));
787 	spdk_bit_array_set(ctrlr.qpair_mask, 0);
788 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
789 	spdk_bit_array_set(ctrlr.qpair_mask, 2);
790 	sgroups[subsystem.id].mgmt_io_outstanding++;
791 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
792 	rc = nvmf_ctrlr_cmd_connect(&req);
793 	poll_threads();
794 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
795 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
796 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
797 	CU_ASSERT(qpair.ctrlr == NULL);
798 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
799 	spdk_bit_array_clear(ctrlr.qpair_mask, 0);
800 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
801 	spdk_bit_array_clear(ctrlr.qpair_mask, 2);
802 
803 	/* I/O connect with duplicate queue ID */
804 	memset(&rsp, 0, sizeof(rsp));
805 	memset(&qpair2, 0, sizeof(qpair2));
806 	qpair2.group = &group;
807 	qpair2.qid = 1;
808 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
809 	cmd.connect_cmd.qid = 1;
810 	sgroups[subsystem.id].mgmt_io_outstanding++;
811 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
812 	rc = nvmf_ctrlr_cmd_connect(&req);
813 	poll_threads();
814 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
815 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
816 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
817 	CU_ASSERT(qpair.ctrlr == NULL);
818 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
819 
820 	/* I/O connect when admin qpair is being destroyed */
821 	admin_qpair.group = NULL;
822 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
823 	memset(&rsp, 0, sizeof(rsp));
824 	sgroups[subsystem.id].mgmt_io_outstanding++;
825 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
826 	rc = nvmf_ctrlr_cmd_connect(&req);
827 	poll_threads();
828 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
829 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
830 	CU_ASSERT(qpair.ctrlr == NULL);
831 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
832 	admin_qpair.group = &group;
833 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
834 
835 	/* Clean up globals */
836 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
837 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
838 
839 	spdk_bit_array_free(&ctrlr.qpair_mask);
840 	free(sgroups);
841 }
842 
843 static void
844 test_get_ns_id_desc_list(void)
845 {
846 	struct spdk_nvmf_subsystem subsystem;
847 	struct spdk_nvmf_qpair qpair;
848 	struct spdk_nvmf_ctrlr ctrlr;
849 	struct spdk_nvmf_request req;
850 	struct spdk_nvmf_ns *ns_ptrs[1];
851 	struct spdk_nvmf_ns ns;
852 	union nvmf_h2c_msg cmd;
853 	union nvmf_c2h_msg rsp;
854 	struct spdk_bdev bdev;
855 	uint8_t buf[4096];
856 
857 	memset(&subsystem, 0, sizeof(subsystem));
858 	ns_ptrs[0] = &ns;
859 	subsystem.ns = ns_ptrs;
860 	subsystem.max_nsid = 1;
861 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
862 
863 	memset(&ns, 0, sizeof(ns));
864 	ns.opts.nsid = 1;
865 	ns.bdev = &bdev;
866 
867 	memset(&qpair, 0, sizeof(qpair));
868 	qpair.ctrlr = &ctrlr;
869 
870 	memset(&ctrlr, 0, sizeof(ctrlr));
871 	ctrlr.subsys = &subsystem;
872 	ctrlr.vcprop.cc.bits.en = 1;
873 
874 	memset(&req, 0, sizeof(req));
875 	req.qpair = &qpair;
876 	req.cmd = &cmd;
877 	req.rsp = &rsp;
878 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
879 	req.data = buf;
880 	req.length = sizeof(buf);
881 
882 	memset(&cmd, 0, sizeof(cmd));
883 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
884 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
885 
886 	/* Invalid NSID */
887 	cmd.nvme_cmd.nsid = 0;
888 	memset(&rsp, 0, sizeof(rsp));
889 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
890 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
891 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
892 
893 	/* Valid NSID, but ns has no IDs defined */
894 	cmd.nvme_cmd.nsid = 1;
895 	memset(&rsp, 0, sizeof(rsp));
896 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
897 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
898 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
899 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
900 
901 	/* Valid NSID, only EUI64 defined */
902 	ns.opts.eui64[0] = 0x11;
903 	ns.opts.eui64[7] = 0xFF;
904 	memset(&rsp, 0, sizeof(rsp));
905 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
906 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
907 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
908 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
909 	CU_ASSERT(buf[1] == 8);
910 	CU_ASSERT(buf[4] == 0x11);
911 	CU_ASSERT(buf[11] == 0xFF);
912 	CU_ASSERT(buf[13] == 0);
913 
914 	/* Valid NSID, only NGUID defined */
915 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
916 	ns.opts.nguid[0] = 0x22;
917 	ns.opts.nguid[15] = 0xEE;
918 	memset(&rsp, 0, sizeof(rsp));
919 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
920 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
921 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
922 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
923 	CU_ASSERT(buf[1] == 16);
924 	CU_ASSERT(buf[4] == 0x22);
925 	CU_ASSERT(buf[19] == 0xEE);
926 	CU_ASSERT(buf[21] == 0);
927 
928 	/* Valid NSID, both EUI64 and NGUID defined */
929 	ns.opts.eui64[0] = 0x11;
930 	ns.opts.eui64[7] = 0xFF;
931 	ns.opts.nguid[0] = 0x22;
932 	ns.opts.nguid[15] = 0xEE;
933 	memset(&rsp, 0, sizeof(rsp));
934 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
935 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
936 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
937 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
938 	CU_ASSERT(buf[1] == 8);
939 	CU_ASSERT(buf[4] == 0x11);
940 	CU_ASSERT(buf[11] == 0xFF);
941 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
942 	CU_ASSERT(buf[13] == 16);
943 	CU_ASSERT(buf[16] == 0x22);
944 	CU_ASSERT(buf[31] == 0xEE);
945 	CU_ASSERT(buf[33] == 0);
946 
947 	/* Valid NSID, EUI64, NGUID, and UUID defined */
948 	ns.opts.eui64[0] = 0x11;
949 	ns.opts.eui64[7] = 0xFF;
950 	ns.opts.nguid[0] = 0x22;
951 	ns.opts.nguid[15] = 0xEE;
952 	ns.opts.uuid.u.raw[0] = 0x33;
953 	ns.opts.uuid.u.raw[15] = 0xDD;
954 	memset(&rsp, 0, sizeof(rsp));
955 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
956 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
957 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
958 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
959 	CU_ASSERT(buf[1] == 8);
960 	CU_ASSERT(buf[4] == 0x11);
961 	CU_ASSERT(buf[11] == 0xFF);
962 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
963 	CU_ASSERT(buf[13] == 16);
964 	CU_ASSERT(buf[16] == 0x22);
965 	CU_ASSERT(buf[31] == 0xEE);
966 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
967 	CU_ASSERT(buf[33] == 16);
968 	CU_ASSERT(buf[36] == 0x33);
969 	CU_ASSERT(buf[51] == 0xDD);
970 	CU_ASSERT(buf[53] == 0);
971 }
972 
973 static void
974 test_identify_ns(void)
975 {
976 	struct spdk_nvmf_subsystem subsystem = {};
977 	struct spdk_nvmf_transport transport = {};
978 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
979 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
980 	struct spdk_nvme_cmd cmd = {};
981 	struct spdk_nvme_cpl rsp = {};
982 	struct spdk_nvme_ns_data nsdata = {};
983 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
984 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
985 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
986 
987 	subsystem.ns = ns_arr;
988 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
989 
990 	/* Invalid NSID 0 */
991 	cmd.nsid = 0;
992 	memset(&nsdata, 0, sizeof(nsdata));
993 	memset(&rsp, 0, sizeof(rsp));
994 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
995 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
996 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
997 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
998 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
999 
1000 	/* Valid NSID 1 */
1001 	cmd.nsid = 1;
1002 	memset(&nsdata, 0, sizeof(nsdata));
1003 	memset(&rsp, 0, sizeof(rsp));
1004 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1005 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1006 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1007 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1008 	CU_ASSERT(nsdata.nsze == 1234);
1009 
1010 	/* Valid but inactive NSID 2 */
1011 	cmd.nsid = 2;
1012 	memset(&nsdata, 0, sizeof(nsdata));
1013 	memset(&rsp, 0, sizeof(rsp));
1014 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1015 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1016 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1017 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1018 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1019 
1020 	/* Valid NSID 3 */
1021 	cmd.nsid = 3;
1022 	memset(&nsdata, 0, sizeof(nsdata));
1023 	memset(&rsp, 0, sizeof(rsp));
1024 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1025 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1026 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1027 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1028 	CU_ASSERT(nsdata.nsze == 5678);
1029 
1030 	/* Invalid NSID 4 */
1031 	cmd.nsid = 4;
1032 	memset(&nsdata, 0, sizeof(nsdata));
1033 	memset(&rsp, 0, sizeof(rsp));
1034 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1035 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1036 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1037 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1038 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1039 
1040 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1041 	cmd.nsid = 0xFFFFFFFF;
1042 	memset(&nsdata, 0, sizeof(nsdata));
1043 	memset(&rsp, 0, sizeof(rsp));
1044 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1045 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1046 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1047 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1048 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1049 }
1050 
1051 static void
1052 test_set_get_features(void)
1053 {
1054 	struct spdk_nvmf_subsystem subsystem = {};
1055 	struct spdk_nvmf_qpair admin_qpair = {};
1056 	enum spdk_nvme_ana_state ana_state[3];
1057 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1058 	struct spdk_nvmf_ctrlr ctrlr = {
1059 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1060 	};
1061 	union nvmf_h2c_msg cmd = {};
1062 	union nvmf_c2h_msg rsp = {};
1063 	struct spdk_nvmf_ns ns[3];
1064 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1065 	struct spdk_nvmf_request req;
1066 	int rc;
1067 
1068 	ns[0].anagrpid = 1;
1069 	ns[2].anagrpid = 3;
1070 	subsystem.ns = ns_arr;
1071 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1072 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1073 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1074 	admin_qpair.ctrlr = &ctrlr;
1075 	req.qpair = &admin_qpair;
1076 	cmd.nvme_cmd.nsid = 1;
1077 	req.cmd = &cmd;
1078 	req.rsp = &rsp;
1079 
1080 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1081 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1082 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1083 	ns[0].ptpl_file = "testcfg";
1084 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1085 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1086 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1087 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1088 	CU_ASSERT(ns[0].ptpl_activated == true);
1089 
1090 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1091 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1092 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1093 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1094 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1095 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1096 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1097 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1098 
1099 
1100 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1101 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1102 	cmd.nvme_cmd.cdw11 = 0x42;
1103 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1104 
1105 	rc = nvmf_ctrlr_get_features(&req);
1106 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1107 
1108 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1109 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1110 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1111 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1112 
1113 	rc = nvmf_ctrlr_get_features(&req);
1114 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1115 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1116 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1117 
1118 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1119 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1120 	cmd.nvme_cmd.cdw11 = 0x42;
1121 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1122 
1123 	rc = nvmf_ctrlr_set_features(&req);
1124 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1125 
1126 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1127 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1128 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1129 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1130 
1131 	rc = nvmf_ctrlr_set_features(&req);
1132 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1133 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1134 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1135 
1136 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1137 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1138 	cmd.nvme_cmd.cdw11 = 0x42;
1139 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1140 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1141 
1142 	rc = nvmf_ctrlr_set_features(&req);
1143 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1144 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1145 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1146 
1147 
1148 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1149 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1150 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1151 
1152 	rc = nvmf_ctrlr_get_features(&req);
1153 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1154 
1155 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1156 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1157 	cmd.nvme_cmd.cdw11 = 0x42;
1158 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1159 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1160 
1161 	rc = nvmf_ctrlr_set_features(&req);
1162 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1163 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1164 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1165 
1166 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1167 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1168 	cmd.nvme_cmd.cdw11 = 0x42;
1169 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1170 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1171 
1172 	rc = nvmf_ctrlr_set_features(&req);
1173 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1174 }
1175 
1176 /*
1177  * Reservation Unit Test Configuration
1178  *       --------             --------    --------
1179  *      | Host A |           | Host B |  | Host C |
1180  *       --------             --------    --------
1181  *      /        \               |           |
1182  *  --------   --------       -------     -------
1183  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1184  *  --------   --------       -------     -------
1185  *    \           \              /           /
1186  *     \           \            /           /
1187  *      \           \          /           /
1188  *      --------------------------------------
1189  *     |            NAMESPACE 1               |
1190  *      --------------------------------------
1191  */
1192 
1193 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1194 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1195 
1196 static void
1197 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1198 {
1199 	/* Host A has two controllers */
1200 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1201 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1202 
1203 	/* Host B has 1 controller */
1204 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1205 
1206 	/* Host C has 1 controller */
1207 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1208 
1209 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1210 	g_ns_info.rtype = rtype;
1211 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1212 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1213 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1214 }
1215 
1216 static void
1217 test_reservation_write_exclusive(void)
1218 {
1219 	struct spdk_nvmf_request req = {};
1220 	union nvmf_h2c_msg cmd = {};
1221 	union nvmf_c2h_msg rsp = {};
1222 	int rc;
1223 
1224 	req.cmd = &cmd;
1225 	req.rsp = &rsp;
1226 
1227 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1228 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1229 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1230 
1231 	/* Test Case: Issue a Read command from Host A and Host B */
1232 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1233 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1234 	SPDK_CU_ASSERT_FATAL(rc == 0);
1235 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1236 	SPDK_CU_ASSERT_FATAL(rc == 0);
1237 
1238 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1239 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1240 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1241 	SPDK_CU_ASSERT_FATAL(rc == 0);
1242 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1243 	SPDK_CU_ASSERT_FATAL(rc < 0);
1244 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1245 
1246 	/* Test Case: Issue a Write command from Host C */
1247 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1248 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1249 	SPDK_CU_ASSERT_FATAL(rc < 0);
1250 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1251 
1252 	/* Test Case: Issue a Read command from Host B */
1253 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1254 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1255 	SPDK_CU_ASSERT_FATAL(rc == 0);
1256 
1257 	/* Unregister Host C */
1258 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1259 
1260 	/* Test Case: Read and Write commands from non-registrant Host C */
1261 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1262 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1263 	SPDK_CU_ASSERT_FATAL(rc < 0);
1264 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1265 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1266 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1267 	SPDK_CU_ASSERT_FATAL(rc == 0);
1268 }
1269 
1270 static void
1271 test_reservation_exclusive_access(void)
1272 {
1273 	struct spdk_nvmf_request req = {};
1274 	union nvmf_h2c_msg cmd = {};
1275 	union nvmf_c2h_msg rsp = {};
1276 	int rc;
1277 
1278 	req.cmd = &cmd;
1279 	req.rsp = &rsp;
1280 
1281 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1282 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1283 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1284 
1285 	/* Test Case: Issue a Read command from Host B */
1286 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1287 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1288 	SPDK_CU_ASSERT_FATAL(rc < 0);
1289 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1290 
1291 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1292 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1293 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1294 	SPDK_CU_ASSERT_FATAL(rc == 0);
1295 }
1296 
1297 static void
1298 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1299 {
1300 	struct spdk_nvmf_request req = {};
1301 	union nvmf_h2c_msg cmd = {};
1302 	union nvmf_c2h_msg rsp = {};
1303 	int rc;
1304 
1305 	req.cmd = &cmd;
1306 	req.rsp = &rsp;
1307 
1308 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1309 	ut_reservation_init(rtype);
1310 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1311 
1312 	/* Test Case: Issue a Read command from Host A and Host C */
1313 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1314 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1315 	SPDK_CU_ASSERT_FATAL(rc == 0);
1316 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1317 	SPDK_CU_ASSERT_FATAL(rc == 0);
1318 
1319 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1320 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1321 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1322 	SPDK_CU_ASSERT_FATAL(rc == 0);
1323 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1324 	SPDK_CU_ASSERT_FATAL(rc == 0);
1325 
1326 	/* Unregister Host C */
1327 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1328 
1329 	/* Test Case: Read and Write commands from non-registrant Host C */
1330 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1331 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1332 	SPDK_CU_ASSERT_FATAL(rc == 0);
1333 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1334 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1335 	SPDK_CU_ASSERT_FATAL(rc < 0);
1336 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1337 }
1338 
1339 static void
1340 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1341 {
1342 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1343 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1344 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1345 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1346 }
1347 
1348 static void
1349 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1350 {
1351 	struct spdk_nvmf_request req = {};
1352 	union nvmf_h2c_msg cmd = {};
1353 	union nvmf_c2h_msg rsp = {};
1354 	int rc;
1355 
1356 	req.cmd = &cmd;
1357 	req.rsp = &rsp;
1358 
1359 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1360 	ut_reservation_init(rtype);
1361 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1362 
1363 	/* Test Case: Issue a Write command from Host B */
1364 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1365 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1366 	SPDK_CU_ASSERT_FATAL(rc == 0);
1367 
1368 	/* Unregister Host B */
1369 	memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
1370 
1371 	/* Test Case: Issue a Read command from Host B */
1372 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1373 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1374 	SPDK_CU_ASSERT_FATAL(rc < 0);
1375 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1376 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1377 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1378 	SPDK_CU_ASSERT_FATAL(rc < 0);
1379 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1380 }
1381 
1382 static void
1383 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1384 {
1385 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1386 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1387 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1388 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1389 }
1390 
1391 static void
1392 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1393 {
1394 	STAILQ_INIT(&ctrlr->async_events);
1395 }
1396 
1397 static void
1398 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1399 {
1400 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1401 
1402 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1403 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1404 		free(event);
1405 	}
1406 }
1407 
1408 static int
1409 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1410 {
1411 	int num = 0;
1412 	struct spdk_nvmf_async_event_completion *event;
1413 
1414 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1415 		num++;
1416 	}
1417 	return num;
1418 }
1419 
1420 static void
1421 test_reservation_notification_log_page(void)
1422 {
1423 	struct spdk_nvmf_ctrlr ctrlr;
1424 	struct spdk_nvmf_qpair qpair;
1425 	struct spdk_nvmf_ns ns;
1426 	struct spdk_nvmf_request req = {};
1427 	union nvmf_h2c_msg cmd = {};
1428 	union nvmf_c2h_msg rsp = {};
1429 	union spdk_nvme_async_event_completion event = {};
1430 	struct spdk_nvme_reservation_notification_log logs[3];
1431 	struct iovec iov;
1432 
1433 	memset(&ctrlr, 0, sizeof(ctrlr));
1434 	ctrlr.thread = spdk_get_thread();
1435 	TAILQ_INIT(&ctrlr.log_head);
1436 	init_pending_async_events(&ctrlr);
1437 	ns.nsid = 1;
1438 
1439 	/* Test Case: Mask all the reservation notifications */
1440 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1441 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1442 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1443 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1444 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1445 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1446 					  SPDK_NVME_RESERVATION_RELEASED);
1447 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1448 					  SPDK_NVME_RESERVATION_PREEMPTED);
1449 	poll_threads();
1450 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1451 
1452 	/* Test Case: Unmask all the reservation notifications,
1453 	 * 3 log pages are generated, and AER was triggered.
1454 	 */
1455 	ns.mask = 0;
1456 	ctrlr.num_avail_log_pages = 0;
1457 	req.cmd = &cmd;
1458 	req.rsp = &rsp;
1459 	ctrlr.aer_req[0] = &req;
1460 	ctrlr.nr_aer_reqs = 1;
1461 	req.qpair = &qpair;
1462 	TAILQ_INIT(&qpair.outstanding);
1463 	qpair.ctrlr = NULL;
1464 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1465 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1466 
1467 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1468 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1469 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1470 					  SPDK_NVME_RESERVATION_RELEASED);
1471 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1472 					  SPDK_NVME_RESERVATION_PREEMPTED);
1473 	poll_threads();
1474 	event.raw = rsp.nvme_cpl.cdw0;
1475 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1476 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1477 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1478 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1479 
1480 	/* Test Case: Get Log Page to clear the log pages */
1481 	iov.iov_base = &logs[0];
1482 	iov.iov_len = sizeof(logs);
1483 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1484 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1485 
1486 	cleanup_pending_async_events(&ctrlr);
1487 }
1488 
1489 static void
1490 test_get_dif_ctx(void)
1491 {
1492 	struct spdk_nvmf_subsystem subsystem = {};
1493 	struct spdk_nvmf_request req = {};
1494 	struct spdk_nvmf_qpair qpair = {};
1495 	struct spdk_nvmf_ctrlr ctrlr = {};
1496 	struct spdk_nvmf_ns ns = {};
1497 	struct spdk_nvmf_ns *_ns = NULL;
1498 	struct spdk_bdev bdev = {};
1499 	union nvmf_h2c_msg cmd = {};
1500 	struct spdk_dif_ctx dif_ctx = {};
1501 	bool ret;
1502 
1503 	ctrlr.subsys = &subsystem;
1504 
1505 	qpair.ctrlr = &ctrlr;
1506 
1507 	req.qpair = &qpair;
1508 	req.cmd = &cmd;
1509 
1510 	ns.bdev = &bdev;
1511 
1512 	ctrlr.dif_insert_or_strip = false;
1513 
1514 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1515 	CU_ASSERT(ret == false);
1516 
1517 	ctrlr.dif_insert_or_strip = true;
1518 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1519 
1520 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1521 	CU_ASSERT(ret == false);
1522 
1523 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1524 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1525 
1526 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1527 	CU_ASSERT(ret == false);
1528 
1529 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1530 
1531 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1532 	CU_ASSERT(ret == false);
1533 
1534 	qpair.qid = 1;
1535 
1536 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1537 	CU_ASSERT(ret == false);
1538 
1539 	cmd.nvme_cmd.nsid = 1;
1540 
1541 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1542 	CU_ASSERT(ret == false);
1543 
1544 	subsystem.max_nsid = 1;
1545 	subsystem.ns = &_ns;
1546 	subsystem.ns[0] = &ns;
1547 
1548 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1549 	CU_ASSERT(ret == false);
1550 
1551 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1552 
1553 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1554 	CU_ASSERT(ret == true);
1555 }
1556 
1557 static void
1558 test_identify_ctrlr(void)
1559 {
1560 	struct spdk_nvmf_tgt tgt = {};
1561 	struct spdk_nvmf_subsystem subsystem = {
1562 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1563 		.tgt = &tgt,
1564 	};
1565 	struct spdk_nvmf_transport_ops tops = {};
1566 	struct spdk_nvmf_transport transport = {
1567 		.ops = &tops,
1568 		.opts = {
1569 			.in_capsule_data_size = 4096,
1570 		},
1571 	};
1572 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1573 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1574 	struct spdk_nvme_ctrlr_data cdata = {};
1575 	uint32_t expected_ioccsz;
1576 
1577 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1578 
1579 	/* Check ioccsz, TCP transport */
1580 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1581 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1582 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1583 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1584 
1585 	/* Check ioccsz, RDMA transport */
1586 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1587 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1588 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1589 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1590 
1591 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1592 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1593 	ctrlr.dif_insert_or_strip = true;
1594 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1595 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1596 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1597 }
1598 
1599 static int
1600 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1601 {
1602 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1603 
1604 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1605 };
1606 
1607 static void
1608 test_custom_admin_cmd(void)
1609 {
1610 	struct spdk_nvmf_subsystem subsystem;
1611 	struct spdk_nvmf_qpair qpair;
1612 	struct spdk_nvmf_ctrlr ctrlr;
1613 	struct spdk_nvmf_request req;
1614 	struct spdk_nvmf_ns *ns_ptrs[1];
1615 	struct spdk_nvmf_ns ns;
1616 	union nvmf_h2c_msg cmd;
1617 	union nvmf_c2h_msg rsp;
1618 	struct spdk_bdev bdev;
1619 	uint8_t buf[4096];
1620 	int rc;
1621 
1622 	memset(&subsystem, 0, sizeof(subsystem));
1623 	ns_ptrs[0] = &ns;
1624 	subsystem.ns = ns_ptrs;
1625 	subsystem.max_nsid = 1;
1626 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1627 
1628 	memset(&ns, 0, sizeof(ns));
1629 	ns.opts.nsid = 1;
1630 	ns.bdev = &bdev;
1631 
1632 	memset(&qpair, 0, sizeof(qpair));
1633 	qpair.ctrlr = &ctrlr;
1634 
1635 	memset(&ctrlr, 0, sizeof(ctrlr));
1636 	ctrlr.subsys = &subsystem;
1637 	ctrlr.vcprop.cc.bits.en = 1;
1638 
1639 	memset(&req, 0, sizeof(req));
1640 	req.qpair = &qpair;
1641 	req.cmd = &cmd;
1642 	req.rsp = &rsp;
1643 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1644 	req.data = buf;
1645 	req.length = sizeof(buf);
1646 
1647 	memset(&cmd, 0, sizeof(cmd));
1648 	cmd.nvme_cmd.opc = 0xc1;
1649 	cmd.nvme_cmd.nsid = 0;
1650 	memset(&rsp, 0, sizeof(rsp));
1651 
1652 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1653 
1654 	/* Ensure that our hdlr is being called */
1655 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1656 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1657 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1658 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1659 }
1660 
1661 static void
1662 test_fused_compare_and_write(void)
1663 {
1664 	struct spdk_nvmf_request req = {};
1665 	struct spdk_nvmf_qpair qpair = {};
1666 	struct spdk_nvme_cmd cmd = {};
1667 	union nvmf_c2h_msg rsp = {};
1668 	struct spdk_nvmf_ctrlr ctrlr = {};
1669 	struct spdk_nvmf_subsystem subsystem = {};
1670 	struct spdk_nvmf_ns ns = {};
1671 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1672 	enum spdk_nvme_ana_state ana_state[1];
1673 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1674 	struct spdk_bdev bdev = {};
1675 
1676 	struct spdk_nvmf_poll_group group = {};
1677 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1678 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1679 	struct spdk_io_channel io_ch = {};
1680 
1681 	ns.bdev = &bdev;
1682 	ns.anagrpid = 1;
1683 
1684 	subsystem.id = 0;
1685 	subsystem.max_nsid = 1;
1686 	subsys_ns[0] = &ns;
1687 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1688 
1689 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1690 
1691 	/* Enable controller */
1692 	ctrlr.vcprop.cc.bits.en = 1;
1693 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1694 	ctrlr.listener = &listener;
1695 
1696 	group.num_sgroups = 1;
1697 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1698 	sgroups.num_ns = 1;
1699 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1700 	ns_info.channel = &io_ch;
1701 	sgroups.ns_info = &ns_info;
1702 	TAILQ_INIT(&sgroups.queued);
1703 	group.sgroups = &sgroups;
1704 	TAILQ_INIT(&qpair.outstanding);
1705 
1706 	qpair.ctrlr = &ctrlr;
1707 	qpair.group = &group;
1708 	qpair.qid = 1;
1709 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1710 
1711 	cmd.nsid = 1;
1712 
1713 	req.qpair = &qpair;
1714 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1715 	req.rsp = &rsp;
1716 
1717 	/* SUCCESS/SUCCESS */
1718 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1719 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1720 
1721 	spdk_nvmf_request_exec(&req);
1722 	CU_ASSERT(qpair.first_fused_req != NULL);
1723 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1724 
1725 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1726 	cmd.opc = SPDK_NVME_OPC_WRITE;
1727 
1728 	spdk_nvmf_request_exec(&req);
1729 	CU_ASSERT(qpair.first_fused_req == NULL);
1730 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1731 
1732 	/* Wrong sequence */
1733 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1734 	cmd.opc = SPDK_NVME_OPC_WRITE;
1735 
1736 	spdk_nvmf_request_exec(&req);
1737 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1738 	CU_ASSERT(qpair.first_fused_req == NULL);
1739 
1740 	/* Write as FUSE_FIRST (Wrong op code) */
1741 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1742 	cmd.opc = SPDK_NVME_OPC_WRITE;
1743 
1744 	spdk_nvmf_request_exec(&req);
1745 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1746 	CU_ASSERT(qpair.first_fused_req == NULL);
1747 
1748 	/* Compare as FUSE_SECOND (Wrong op code) */
1749 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1750 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1751 
1752 	spdk_nvmf_request_exec(&req);
1753 	CU_ASSERT(qpair.first_fused_req != NULL);
1754 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1755 
1756 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1757 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1758 
1759 	spdk_nvmf_request_exec(&req);
1760 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1761 	CU_ASSERT(qpair.first_fused_req == NULL);
1762 }
1763 
1764 static void
1765 test_multi_async_event_reqs(void)
1766 {
1767 	struct spdk_nvmf_subsystem subsystem = {};
1768 	struct spdk_nvmf_qpair qpair = {};
1769 	struct spdk_nvmf_ctrlr ctrlr = {};
1770 	struct spdk_nvmf_request req[5] = {};
1771 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1772 	struct spdk_nvmf_ns ns = {};
1773 	union nvmf_h2c_msg cmd[5] = {};
1774 	union nvmf_c2h_msg rsp[5] = {};
1775 
1776 	struct spdk_nvmf_poll_group group = {};
1777 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1778 
1779 	int i;
1780 
1781 	ns_ptrs[0] = &ns;
1782 	subsystem.ns = ns_ptrs;
1783 	subsystem.max_nsid = 1;
1784 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1785 
1786 	ns.opts.nsid = 1;
1787 	group.sgroups = &sgroups;
1788 
1789 	qpair.ctrlr = &ctrlr;
1790 	qpair.group = &group;
1791 	TAILQ_INIT(&qpair.outstanding);
1792 
1793 	ctrlr.subsys = &subsystem;
1794 	ctrlr.vcprop.cc.bits.en = 1;
1795 
1796 	for (i = 0; i < 5; i++) {
1797 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1798 		cmd[i].nvme_cmd.nsid = 1;
1799 		cmd[i].nvme_cmd.cid = i;
1800 
1801 		req[i].qpair = &qpair;
1802 		req[i].cmd = &cmd[i];
1803 		req[i].rsp = &rsp[i];
1804 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1805 	}
1806 
1807 	/* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
1808 	sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS;
1809 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1810 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1811 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
1812 	}
1813 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1814 
1815 	/* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
1816 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1817 	CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
1818 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
1819 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1820 
1821 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
1822 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
1823 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1824 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1825 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
1826 
1827 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
1828 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1829 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1830 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
1831 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
1832 
1833 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
1834 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
1835 }
1836 
1837 static void
1838 test_get_ana_log_page_one_ns_per_anagrp(void)
1839 {
1840 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
1841 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
1842 	uint32_t ana_group[3];
1843 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
1844 	struct spdk_nvmf_ctrlr ctrlr = {};
1845 	enum spdk_nvme_ana_state ana_state[3];
1846 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1847 	struct spdk_nvmf_ns ns[3];
1848 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
1849 	uint64_t offset;
1850 	uint32_t length;
1851 	int i;
1852 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1853 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1854 	struct iovec iov, iovs[2];
1855 	struct spdk_nvme_ana_page *ana_hdr;
1856 	char _ana_desc[UT_ANA_DESC_SIZE];
1857 	struct spdk_nvme_ana_group_descriptor *ana_desc;
1858 
1859 	subsystem.ns = ns_arr;
1860 	subsystem.max_nsid = 3;
1861 	for (i = 0; i < 3; i++) {
1862 		subsystem.ana_group[i] = 1;
1863 	}
1864 	ctrlr.subsys = &subsystem;
1865 	ctrlr.listener = &listener;
1866 
1867 	for (i = 0; i < 3; i++) {
1868 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1869 	}
1870 
1871 	for (i = 0; i < 3; i++) {
1872 		ns_arr[i]->nsid = i + 1;
1873 		ns_arr[i]->anagrpid = i + 1;
1874 	}
1875 
1876 	/* create expected page */
1877 	ana_hdr = (void *)&expected_page[0];
1878 	ana_hdr->num_ana_group_desc = 3;
1879 	ana_hdr->change_count = 0;
1880 
1881 	/* descriptor may be unaligned. So create data and then copy it to the location. */
1882 	ana_desc = (void *)_ana_desc;
1883 	offset = sizeof(struct spdk_nvme_ana_page);
1884 
1885 	for (i = 0; i < 3; i++) {
1886 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
1887 		ana_desc->ana_group_id = ns_arr[i]->nsid;
1888 		ana_desc->num_of_nsid = 1;
1889 		ana_desc->change_count = 0;
1890 		ana_desc->ana_state = ctrlr.listener->ana_state[i];
1891 		ana_desc->nsid[0] = ns_arr[i]->nsid;
1892 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
1893 		offset += UT_ANA_DESC_SIZE;
1894 	}
1895 
1896 	/* read entire actual log page */
1897 	offset = 0;
1898 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
1899 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
1900 		iov.iov_base = &actual_page[offset];
1901 		iov.iov_len = length;
1902 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
1903 		offset += length;
1904 	}
1905 
1906 	/* compare expected page and actual page */
1907 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1908 
1909 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
1910 	offset = 0;
1911 	iovs[0].iov_base = &actual_page[offset];
1912 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1913 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1914 	iovs[1].iov_base = &actual_page[offset];
1915 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
1916 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
1917 
1918 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1919 
1920 #undef UT_ANA_DESC_SIZE
1921 #undef UT_ANA_LOG_PAGE_SIZE
1922 }
1923 
1924 static void
1925 test_get_ana_log_page_multi_ns_per_anagrp(void)
1926 {
1927 #define UT_ANA_LOG_PAGE_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
1928 				 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
1929 				 sizeof(uint32_t) * 5)
1930 	struct spdk_nvmf_ns ns[5];
1931 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
1932 	uint32_t ana_group[5] = {0};
1933 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
1934 	enum spdk_nvme_ana_state ana_state[5];
1935 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
1936 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
1937 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1938 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1939 	struct iovec iov, iovs[2];
1940 	struct spdk_nvme_ana_page *ana_hdr;
1941 	char _ana_desc[UT_ANA_LOG_PAGE_SIZE];
1942 	struct spdk_nvme_ana_group_descriptor *ana_desc;
1943 	uint64_t offset;
1944 	uint32_t length;
1945 	int i;
1946 
1947 	subsystem.max_nsid = 5;
1948 	subsystem.ana_group[1] = 3;
1949 	subsystem.ana_group[2] = 2;
1950 	for (i = 0; i < 5; i++) {
1951 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1952 	}
1953 
1954 	for (i = 0; i < 5; i++) {
1955 		ns_arr[i]->nsid = i + 1;
1956 	}
1957 	ns_arr[0]->anagrpid = 2;
1958 	ns_arr[1]->anagrpid = 3;
1959 	ns_arr[2]->anagrpid = 2;
1960 	ns_arr[3]->anagrpid = 3;
1961 	ns_arr[4]->anagrpid = 2;
1962 
1963 	/* create expected page */
1964 	ana_hdr = (void *)&expected_page[0];
1965 	ana_hdr->num_ana_group_desc = 2;
1966 	ana_hdr->change_count = 0;
1967 
1968 	/* descriptor may be unaligned. So create data and then copy it to the location. */
1969 	ana_desc = (void *)_ana_desc;
1970 	offset = sizeof(struct spdk_nvme_ana_page);
1971 
1972 	memset(_ana_desc, 0, sizeof(_ana_desc));
1973 	ana_desc->ana_group_id = 2;
1974 	ana_desc->num_of_nsid = 3;
1975 	ana_desc->change_count = 0;
1976 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1977 	ana_desc->nsid[0] = 1;
1978 	ana_desc->nsid[1] = 3;
1979 	ana_desc->nsid[2] = 5;
1980 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
1981 	       sizeof(uint32_t) * 3);
1982 	offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3;
1983 
1984 	memset(_ana_desc, 0, sizeof(_ana_desc));
1985 	ana_desc->ana_group_id = 3;
1986 	ana_desc->num_of_nsid = 2;
1987 	ana_desc->change_count = 0;
1988 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1989 	ana_desc->nsid[0] = 2;
1990 	ana_desc->nsid[1] = 4;
1991 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
1992 	       sizeof(uint32_t) * 2);
1993 
1994 	/* read entire actual log page, and compare expected page and actual page. */
1995 	offset = 0;
1996 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
1997 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
1998 		iov.iov_base = &actual_page[offset];
1999 		iov.iov_len = length;
2000 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2001 		offset += length;
2002 	}
2003 
2004 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2005 
2006 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2007 	offset = 0;
2008 	iovs[0].iov_base = &actual_page[offset];
2009 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2010 	offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2011 	iovs[1].iov_base = &actual_page[offset];
2012 	iovs[1].iov_len = sizeof(uint32_t) * 5;
2013 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2014 
2015 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2016 
2017 #undef UT_ANA_LOG_PAGE_SIZE
2018 }
2019 static void
2020 test_multi_async_events(void)
2021 {
2022 	struct spdk_nvmf_subsystem subsystem = {};
2023 	struct spdk_nvmf_qpair qpair = {};
2024 	struct spdk_nvmf_ctrlr ctrlr = {};
2025 	struct spdk_nvmf_request req[4] = {};
2026 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2027 	struct spdk_nvmf_ns ns = {};
2028 	union nvmf_h2c_msg cmd[4] = {};
2029 	union nvmf_c2h_msg rsp[4] = {};
2030 	union spdk_nvme_async_event_completion event = {};
2031 	struct spdk_nvmf_poll_group group = {};
2032 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2033 	int i;
2034 
2035 	ns_ptrs[0] = &ns;
2036 	subsystem.ns = ns_ptrs;
2037 	subsystem.max_nsid = 1;
2038 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2039 
2040 	ns.opts.nsid = 1;
2041 	group.sgroups = &sgroups;
2042 
2043 	qpair.ctrlr = &ctrlr;
2044 	qpair.group = &group;
2045 	TAILQ_INIT(&qpair.outstanding);
2046 
2047 	ctrlr.subsys = &subsystem;
2048 	ctrlr.vcprop.cc.bits.en = 1;
2049 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2050 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2051 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2052 	init_pending_async_events(&ctrlr);
2053 
2054 	/* Target queue pending events when there is no outstanding AER request */
2055 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2056 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2057 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2058 
2059 	for (i = 0; i < 4; i++) {
2060 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2061 		cmd[i].nvme_cmd.nsid = 1;
2062 		cmd[i].nvme_cmd.cid = i;
2063 
2064 		req[i].qpair = &qpair;
2065 		req[i].cmd = &cmd[i];
2066 		req[i].rsp = &rsp[i];
2067 
2068 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2069 
2070 		sgroups.mgmt_io_outstanding = 1;
2071 		if (i < 3) {
2072 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2073 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2074 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2075 		} else {
2076 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2077 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2078 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2079 		}
2080 	}
2081 
2082 	event.raw = rsp[0].nvme_cpl.cdw0;
2083 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2084 	event.raw = rsp[1].nvme_cpl.cdw0;
2085 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2086 	event.raw = rsp[2].nvme_cpl.cdw0;
2087 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2088 
2089 	cleanup_pending_async_events(&ctrlr);
2090 }
2091 
2092 static void
2093 test_rae(void)
2094 {
2095 	struct spdk_nvmf_subsystem subsystem = {};
2096 	struct spdk_nvmf_qpair qpair = {};
2097 	struct spdk_nvmf_ctrlr ctrlr = {};
2098 	struct spdk_nvmf_request req[3] = {};
2099 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2100 	struct spdk_nvmf_ns ns = {};
2101 	union nvmf_h2c_msg cmd[3] = {};
2102 	union nvmf_c2h_msg rsp[3] = {};
2103 	union spdk_nvme_async_event_completion event = {};
2104 	struct spdk_nvmf_poll_group group = {};
2105 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2106 	int i;
2107 	char data[4096];
2108 
2109 	ns_ptrs[0] = &ns;
2110 	subsystem.ns = ns_ptrs;
2111 	subsystem.max_nsid = 1;
2112 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2113 
2114 	ns.opts.nsid = 1;
2115 	group.sgroups = &sgroups;
2116 
2117 	qpair.ctrlr = &ctrlr;
2118 	qpair.group = &group;
2119 	TAILQ_INIT(&qpair.outstanding);
2120 
2121 	ctrlr.subsys = &subsystem;
2122 	ctrlr.vcprop.cc.bits.en = 1;
2123 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2124 	init_pending_async_events(&ctrlr);
2125 
2126 	/* Target queue pending events when there is no outstanding AER request */
2127 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2128 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2129 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2130 	/* only one event will be queued before RAE is clear */
2131 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2132 
2133 	req[0].qpair = &qpair;
2134 	req[0].cmd = &cmd[0];
2135 	req[0].rsp = &rsp[0];
2136 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2137 	cmd[0].nvme_cmd.nsid = 1;
2138 	cmd[0].nvme_cmd.cid = 0;
2139 
2140 	for (i = 1; i < 3; i++) {
2141 		req[i].qpair = &qpair;
2142 		req[i].cmd = &cmd[i];
2143 		req[i].rsp = &rsp[i];
2144 		req[i].data = &data;
2145 		req[i].length = sizeof(data);
2146 
2147 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2148 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2149 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2150 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2151 			spdk_nvme_bytes_to_numd(req[i].length);
2152 		cmd[i].nvme_cmd.cid = i;
2153 	}
2154 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2155 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2156 
2157 	/* consume the pending event */
2158 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2159 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2160 	event.raw = rsp[0].nvme_cpl.cdw0;
2161 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2162 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2163 
2164 	/* get log with RAE set */
2165 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2166 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2167 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2168 
2169 	/* will not generate new event until RAE is clear */
2170 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2171 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2172 
2173 	/* get log with RAE clear */
2174 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2175 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2176 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2177 
2178 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2179 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2180 
2181 	cleanup_pending_async_events(&ctrlr);
2182 }
2183 
2184 static void
2185 test_nvmf_ctrlr_create_destruct(void)
2186 {
2187 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2188 	struct spdk_nvmf_poll_group group = {};
2189 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2190 	struct spdk_nvmf_transport transport = {};
2191 	struct spdk_nvmf_transport_ops tops = {};
2192 	struct spdk_nvmf_subsystem subsystem = {};
2193 	struct spdk_nvmf_request req = {};
2194 	struct spdk_nvmf_qpair qpair = {};
2195 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2196 	struct spdk_nvmf_tgt tgt = {};
2197 	union nvmf_h2c_msg cmd = {};
2198 	union nvmf_c2h_msg rsp = {};
2199 	const uint8_t hostid[16] = {
2200 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2201 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2202 	};
2203 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2204 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2205 
2206 	group.thread = spdk_get_thread();
2207 	transport.ops = &tops;
2208 	transport.opts.max_aq_depth = 32;
2209 	transport.opts.max_queue_depth = 64;
2210 	transport.opts.max_qpairs_per_ctrlr = 3;
2211 	transport.opts.dif_insert_or_strip = true;
2212 	transport.tgt = &tgt;
2213 	qpair.transport = &transport;
2214 	qpair.group = &group;
2215 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2216 	TAILQ_INIT(&qpair.outstanding);
2217 
2218 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2219 	connect_data.cntlid = 0xFFFF;
2220 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2221 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2222 
2223 	subsystem.thread = spdk_get_thread();
2224 	subsystem.id = 1;
2225 	TAILQ_INIT(&subsystem.ctrlrs);
2226 	subsystem.tgt = &tgt;
2227 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2228 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2229 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2230 
2231 	group.sgroups = sgroups;
2232 
2233 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2234 	cmd.connect_cmd.cid = 1;
2235 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2236 	cmd.connect_cmd.recfmt = 0;
2237 	cmd.connect_cmd.qid = 0;
2238 	cmd.connect_cmd.sqsize = 31;
2239 	cmd.connect_cmd.cattr = 0;
2240 	cmd.connect_cmd.kato = 120000;
2241 
2242 	req.qpair = &qpair;
2243 	req.length = sizeof(connect_data);
2244 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2245 	req.data = &connect_data;
2246 	req.cmd = &cmd;
2247 	req.rsp = &rsp;
2248 
2249 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2250 	sgroups[subsystem.id].mgmt_io_outstanding++;
2251 
2252 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.data);
2253 	poll_threads();
2254 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2255 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2256 	CU_ASSERT(ctrlr->subsys == &subsystem);
2257 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2258 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2259 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2260 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2261 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2262 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2263 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2264 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2265 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2266 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2267 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2268 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2269 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == 1);
2270 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2271 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2272 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2273 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2274 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2275 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2276 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2277 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2278 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2279 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2280 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2281 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2282 
2283 	ctrlr->in_destruct = true;
2284 	nvmf_ctrlr_destruct(ctrlr);
2285 	poll_threads();
2286 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2287 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2288 }
2289 
2290 static void
2291 test_nvmf_ctrlr_use_zcopy(void)
2292 {
2293 	struct spdk_nvmf_subsystem subsystem = {};
2294 	struct spdk_nvmf_request req = {};
2295 	struct spdk_nvmf_qpair qpair = {};
2296 	struct spdk_nvmf_ctrlr ctrlr = {};
2297 	union nvmf_h2c_msg cmd = {};
2298 	struct spdk_nvmf_ns ns = {};
2299 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2300 	struct spdk_bdev bdev = {};
2301 	struct spdk_nvmf_poll_group group = {};
2302 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2303 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2304 	struct spdk_io_channel io_ch = {};
2305 	int opc;
2306 
2307 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2308 	ns.bdev = &bdev;
2309 
2310 	subsystem.id = 0;
2311 	subsystem.max_nsid = 1;
2312 	subsys_ns[0] = &ns;
2313 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2314 
2315 	ctrlr.subsys = &subsystem;
2316 
2317 	qpair.ctrlr = &ctrlr;
2318 	qpair.group = &group;
2319 	qpair.qid = 1;
2320 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2321 
2322 	group.thread = spdk_get_thread();
2323 	group.num_sgroups = 1;
2324 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2325 	sgroups.num_ns = 1;
2326 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2327 	ns_info.channel = &io_ch;
2328 	sgroups.ns_info = &ns_info;
2329 	TAILQ_INIT(&sgroups.queued);
2330 	group.sgroups = &sgroups;
2331 	TAILQ_INIT(&qpair.outstanding);
2332 
2333 	req.qpair = &qpair;
2334 	req.cmd = &cmd;
2335 
2336 	/* Admin queue */
2337 	qpair.qid = 0;
2338 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2339 	qpair.qid = 1;
2340 
2341 	/* Invalid Opcodes */
2342 	for (opc = 0; opc <= 255; opc++) {
2343 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2344 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2345 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2346 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2347 		}
2348 	}
2349 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2350 
2351 	/* Fused WRITE */
2352 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2353 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2354 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2355 
2356 	/* Non bdev */
2357 	cmd.nvme_cmd.nsid = 4;
2358 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2359 	cmd.nvme_cmd.nsid = 1;
2360 
2361 	/* ZCOPY Not supported */
2362 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2363 
2364 	/* Success */
2365 	ns.zcopy = true;
2366 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2367 }
2368 
2369 static void
2370 test_spdk_nvmf_request_zcopy_start(void)
2371 {
2372 	struct spdk_nvmf_request req = {};
2373 	struct spdk_nvmf_qpair qpair = {};
2374 	struct spdk_nvme_cmd cmd = {};
2375 	union nvmf_c2h_msg rsp = {};
2376 	struct spdk_nvmf_ctrlr ctrlr = {};
2377 	struct spdk_nvmf_subsystem subsystem = {};
2378 	struct spdk_nvmf_ns ns = {};
2379 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2380 	enum spdk_nvme_ana_state ana_state[1];
2381 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2382 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2383 
2384 	struct spdk_nvmf_poll_group group = {};
2385 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2386 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2387 	struct spdk_io_channel io_ch = {};
2388 
2389 	ns.bdev = &bdev;
2390 	ns.zcopy = true;
2391 	ns.anagrpid = 1;
2392 
2393 	subsystem.id = 0;
2394 	subsystem.max_nsid = 1;
2395 	subsys_ns[0] = &ns;
2396 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2397 
2398 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2399 
2400 	/* Enable controller */
2401 	ctrlr.vcprop.cc.bits.en = 1;
2402 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2403 	ctrlr.listener = &listener;
2404 
2405 	group.thread = spdk_get_thread();
2406 	group.num_sgroups = 1;
2407 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2408 	sgroups.num_ns = 1;
2409 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2410 	ns_info.channel = &io_ch;
2411 	sgroups.ns_info = &ns_info;
2412 	TAILQ_INIT(&sgroups.queued);
2413 	group.sgroups = &sgroups;
2414 	TAILQ_INIT(&qpair.outstanding);
2415 
2416 	qpair.ctrlr = &ctrlr;
2417 	qpair.group = &group;
2418 	qpair.qid = 1;
2419 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2420 
2421 	cmd.nsid = 1;
2422 
2423 	req.qpair = &qpair;
2424 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2425 	req.rsp = &rsp;
2426 	cmd.opc = SPDK_NVME_OPC_READ;
2427 
2428 	/* Fail because no controller */
2429 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2430 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2431 	qpair.ctrlr = NULL;
2432 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2433 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2434 	qpair.ctrlr = &ctrlr;
2435 
2436 	/* Fail because no sgroup */
2437 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2438 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2439 	group.sgroups = NULL;
2440 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2441 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2442 	group.sgroups = &sgroups;
2443 
2444 	/* Fail because bad NSID */
2445 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2446 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2447 	cmd.nsid = 0;
2448 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2449 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2450 	cmd.nsid = 1;
2451 
2452 	/* Fail because bad Channel */
2453 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2454 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2455 	ns_info.channel = NULL;
2456 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2457 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2458 	ns_info.channel = &io_ch;
2459 
2460 	/* Fail because NSID is not active */
2461 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2462 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2463 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2464 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2465 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2466 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2467 
2468 	/* Fail because QPair is not active */
2469 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2470 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2471 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2472 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2473 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2474 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2475 
2476 	/* Fail because nvmf_bdev_ctrlr_start_zcopy fails */
2477 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2478 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2479 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2480 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2481 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2482 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2483 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2484 	cmd.cdw10 = 0;
2485 	cmd.cdw12 = 0;
2486 
2487 	/* Success */
2488 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2489 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2490 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0);
2491 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2492 }
2493 
2494 static void
2495 test_zcopy_read(void)
2496 {
2497 	struct spdk_nvmf_request req = {};
2498 	struct spdk_nvmf_qpair qpair = {};
2499 	struct spdk_nvme_cmd cmd = {};
2500 	union nvmf_c2h_msg rsp = {};
2501 	struct spdk_nvmf_ctrlr ctrlr = {};
2502 	struct spdk_nvmf_subsystem subsystem = {};
2503 	struct spdk_nvmf_ns ns = {};
2504 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2505 	enum spdk_nvme_ana_state ana_state[1];
2506 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2507 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2508 
2509 	struct spdk_nvmf_poll_group group = {};
2510 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2511 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2512 	struct spdk_io_channel io_ch = {};
2513 
2514 	ns.bdev = &bdev;
2515 	ns.zcopy = true;
2516 	ns.anagrpid = 1;
2517 
2518 	subsystem.id = 0;
2519 	subsystem.max_nsid = 1;
2520 	subsys_ns[0] = &ns;
2521 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2522 
2523 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2524 
2525 	/* Enable controller */
2526 	ctrlr.vcprop.cc.bits.en = 1;
2527 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2528 	ctrlr.listener = &listener;
2529 
2530 	group.thread = spdk_get_thread();
2531 	group.num_sgroups = 1;
2532 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2533 	sgroups.num_ns = 1;
2534 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2535 	ns_info.channel = &io_ch;
2536 	sgroups.ns_info = &ns_info;
2537 	TAILQ_INIT(&sgroups.queued);
2538 	group.sgroups = &sgroups;
2539 	TAILQ_INIT(&qpair.outstanding);
2540 
2541 	qpair.ctrlr = &ctrlr;
2542 	qpair.group = &group;
2543 	qpair.qid = 1;
2544 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2545 
2546 	cmd.nsid = 1;
2547 
2548 	req.qpair = &qpair;
2549 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2550 	req.rsp = &rsp;
2551 	cmd.opc = SPDK_NVME_OPC_READ;
2552 
2553 	/* Prepare for zcopy */
2554 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2555 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2556 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2557 	CU_ASSERT(ns_info.io_outstanding == 0);
2558 
2559 	/* Perform the zcopy start */
2560 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0);
2561 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2562 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2563 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2564 	CU_ASSERT(ns_info.io_outstanding == 1);
2565 
2566 	/* Execute the request */
2567 	spdk_nvmf_request_exec(&req);
2568 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2569 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2570 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2571 	CU_ASSERT(ns_info.io_outstanding == 1);
2572 
2573 	/* Perform the zcopy end */
2574 	spdk_nvmf_request_zcopy_end(&req, false);
2575 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2576 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2577 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2578 	CU_ASSERT(ns_info.io_outstanding == 0);
2579 }
2580 
2581 static void
2582 test_zcopy_write(void)
2583 {
2584 	struct spdk_nvmf_request req = {};
2585 	struct spdk_nvmf_qpair qpair = {};
2586 	struct spdk_nvme_cmd cmd = {};
2587 	union nvmf_c2h_msg rsp = {};
2588 	struct spdk_nvmf_ctrlr ctrlr = {};
2589 	struct spdk_nvmf_subsystem subsystem = {};
2590 	struct spdk_nvmf_ns ns = {};
2591 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2592 	enum spdk_nvme_ana_state ana_state[1];
2593 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2594 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2595 
2596 	struct spdk_nvmf_poll_group group = {};
2597 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2598 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2599 	struct spdk_io_channel io_ch = {};
2600 
2601 	ns.bdev = &bdev;
2602 	ns.zcopy = true;
2603 	ns.anagrpid = 1;
2604 
2605 	subsystem.id = 0;
2606 	subsystem.max_nsid = 1;
2607 	subsys_ns[0] = &ns;
2608 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2609 
2610 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2611 
2612 	/* Enable controller */
2613 	ctrlr.vcprop.cc.bits.en = 1;
2614 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2615 	ctrlr.listener = &listener;
2616 
2617 	group.thread = spdk_get_thread();
2618 	group.num_sgroups = 1;
2619 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2620 	sgroups.num_ns = 1;
2621 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2622 	ns_info.channel = &io_ch;
2623 	sgroups.ns_info = &ns_info;
2624 	TAILQ_INIT(&sgroups.queued);
2625 	group.sgroups = &sgroups;
2626 	TAILQ_INIT(&qpair.outstanding);
2627 
2628 	qpair.ctrlr = &ctrlr;
2629 	qpair.group = &group;
2630 	qpair.qid = 1;
2631 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2632 
2633 	cmd.nsid = 1;
2634 
2635 	req.qpair = &qpair;
2636 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2637 	req.rsp = &rsp;
2638 	cmd.opc = SPDK_NVME_OPC_WRITE;
2639 
2640 	/* Prepare for zcopy */
2641 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2642 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2643 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2644 	CU_ASSERT(ns_info.io_outstanding == 0);
2645 
2646 	/* Perform the zcopy start */
2647 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0);
2648 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2649 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2650 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2651 	CU_ASSERT(ns_info.io_outstanding == 1);
2652 
2653 	/* Execute the request */
2654 	spdk_nvmf_request_exec(&req);
2655 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2656 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2657 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2658 	CU_ASSERT(ns_info.io_outstanding == 1);
2659 
2660 	/* Perform the zcopy end */
2661 	spdk_nvmf_request_zcopy_end(&req, true);
2662 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2663 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2664 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2665 	CU_ASSERT(ns_info.io_outstanding == 0);
2666 }
2667 
2668 static void
2669 test_nvmf_property_set(void)
2670 {
2671 	int rc;
2672 	struct spdk_nvmf_request req = {};
2673 	struct spdk_nvmf_qpair qpair = {};
2674 	struct spdk_nvmf_ctrlr ctrlr = {};
2675 	union nvmf_h2c_msg cmd = {};
2676 	union nvmf_c2h_msg rsp = {};
2677 
2678 	req.qpair = &qpair;
2679 	qpair.ctrlr = &ctrlr;
2680 	req.cmd = &cmd;
2681 	req.rsp = &rsp;
2682 
2683 	/* Invalid parameters */
2684 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2685 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2686 
2687 	rc = nvmf_property_set(&req);
2688 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2689 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2690 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2691 
2692 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2693 
2694 	rc = nvmf_property_get(&req);
2695 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2696 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2697 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2698 
2699 	/* Set cc with same property size */
2700 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2701 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2702 
2703 	rc = nvmf_property_set(&req);
2704 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2705 
2706 	/* Emulate cc data */
2707 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2708 
2709 	rc = nvmf_property_get(&req);
2710 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2711 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2712 
2713 	/* Set asq with different property size */
2714 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2715 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2716 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2717 
2718 	rc = nvmf_property_set(&req);
2719 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2720 
2721 	/* Emulate asq data */
2722 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2723 
2724 	rc = nvmf_property_get(&req);
2725 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2726 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2727 }
2728 
2729 int main(int argc, char **argv)
2730 {
2731 	CU_pSuite	suite = NULL;
2732 	unsigned int	num_failures;
2733 
2734 	CU_set_error_action(CUEA_ABORT);
2735 	CU_initialize_registry();
2736 
2737 	suite = CU_add_suite("nvmf", NULL, NULL);
2738 	CU_ADD_TEST(suite, test_get_log_page);
2739 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
2740 	CU_ADD_TEST(suite, test_connect);
2741 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
2742 	CU_ADD_TEST(suite, test_identify_ns);
2743 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
2744 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
2745 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
2746 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
2747 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
2748 	CU_ADD_TEST(suite, test_get_dif_ctx);
2749 	CU_ADD_TEST(suite, test_set_get_features);
2750 	CU_ADD_TEST(suite, test_identify_ctrlr);
2751 	CU_ADD_TEST(suite, test_custom_admin_cmd);
2752 	CU_ADD_TEST(suite, test_fused_compare_and_write);
2753 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
2754 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
2755 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
2756 	CU_ADD_TEST(suite, test_multi_async_events);
2757 	CU_ADD_TEST(suite, test_rae);
2758 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
2759 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
2760 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
2761 	CU_ADD_TEST(suite, test_zcopy_read);
2762 	CU_ADD_TEST(suite, test_zcopy_write);
2763 	CU_ADD_TEST(suite, test_nvmf_property_set);
2764 
2765 	allocate_threads(1);
2766 	set_thread(0);
2767 
2768 	CU_basic_set_mode(CU_BRM_VERBOSE);
2769 	CU_basic_run_tests();
2770 	num_failures = CU_get_number_of_failures();
2771 	CU_cleanup_registry();
2772 
2773 	free_threads();
2774 
2775 	return num_failures;
2776 }
2777