xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision 2f5c602574a98ede645991abe279a96e19c50196)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "spdk_internal/mock.h"
38 #include "thread/thread_internal.h"
39 
40 #include "common/lib/ut_multithread.c"
41 #include "nvmf/ctrlr.c"
42 
43 SPDK_LOG_REGISTER_COMPONENT(nvmf)
44 
45 struct spdk_bdev {
46 	int ut_mock;
47 	uint64_t blockcnt;
48 	uint32_t blocklen;
49 };
50 
51 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
52 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
53 
54 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
55 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
56 		0x8877665544332211UL;
57 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
58 
59 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
60 	    struct spdk_nvmf_subsystem *,
61 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
62 	    NULL);
63 
64 DEFINE_STUB(spdk_nvmf_poll_group_create,
65 	    struct spdk_nvmf_poll_group *,
66 	    (struct spdk_nvmf_tgt *tgt),
67 	    NULL);
68 
69 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
70 	    const char *,
71 	    (const struct spdk_nvmf_subsystem *subsystem),
72 	    subsystem_default_sn);
73 
74 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
75 	    const char *,
76 	    (const struct spdk_nvmf_subsystem *subsystem),
77 	    subsystem_default_mn);
78 
79 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
80 	    bool,
81 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
82 	    true);
83 
84 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
85 	    int,
86 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
87 	    0);
88 
89 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
90 	    struct spdk_nvmf_ctrlr *,
91 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
92 	    NULL);
93 
94 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
95 	    bool,
96 	    (struct spdk_nvmf_ctrlr *ctrlr),
97 	    false);
98 
99 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
100 	    bool,
101 	    (struct spdk_nvmf_ctrlr *ctrlr),
102 	    false);
103 
104 DEFINE_STUB_V(nvmf_get_discovery_log_page,
105 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
106 	       uint32_t iovcnt, uint64_t offset, uint32_t length));
107 
108 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
109 	    int,
110 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
111 	    0);
112 
113 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
114 	    bool,
115 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
116 	    true);
117 
118 DEFINE_STUB(nvmf_subsystem_find_listener,
119 	    struct spdk_nvmf_subsystem_listener *,
120 	    (struct spdk_nvmf_subsystem *subsystem,
121 	     const struct spdk_nvme_transport_id *trid),
122 	    (void *)0x1);
123 
124 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *req),
134 	    0);
135 
136 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *req),
140 	    0);
141 
142 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
146 	    0);
147 
148 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
161 	    int,
162 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct spdk_nvmf_request *req),
164 	    0);
165 
166 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
167 	    int,
168 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
169 	     struct spdk_nvmf_request *req),
170 	    0);
171 
172 DEFINE_STUB(nvmf_transport_req_complete,
173 	    int,
174 	    (struct spdk_nvmf_request *req),
175 	    0);
176 
177 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
178 
179 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
180 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
181 	     struct spdk_dif_ctx *dif_ctx),
182 	    true);
183 
184 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
185 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
186 
187 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
188 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
189 
190 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
191 		struct spdk_nvmf_ctrlr *ctrlr));
192 
193 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
194 	    int,
195 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
196 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
197 	    0);
198 
199 DEFINE_STUB(nvmf_transport_req_free,
200 	    int,
201 	    (struct spdk_nvmf_request *req),
202 	    0);
203 
204 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
205 	    int,
206 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
207 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
208 	    0);
209 
210 int
211 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
212 {
213 	return 0;
214 }
215 
216 void
217 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
218 			    bool dif_insert_or_strip)
219 {
220 	uint64_t num_blocks;
221 
222 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
223 	num_blocks = ns->bdev->blockcnt;
224 	nsdata->nsze = num_blocks;
225 	nsdata->ncap = num_blocks;
226 	nsdata->nuse = num_blocks;
227 	nsdata->nlbaf = 0;
228 	nsdata->flbas.format = 0;
229 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
230 }
231 
232 struct spdk_nvmf_ns *
233 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
234 {
235 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
236 	return subsystem->ns[0];
237 }
238 
239 struct spdk_nvmf_ns *
240 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
241 				struct spdk_nvmf_ns *prev_ns)
242 {
243 	uint32_t nsid;
244 
245 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
246 	nsid = prev_ns->nsid;
247 
248 	if (nsid >= subsystem->max_nsid) {
249 		return NULL;
250 	}
251 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
252 		if (subsystem->ns[nsid - 1]) {
253 			return subsystem->ns[nsid - 1];
254 		}
255 	}
256 	return NULL;
257 }
258 
259 bool
260 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
261 {
262 	return true;
263 }
264 
265 int
266 nvmf_bdev_ctrlr_start_zcopy(struct spdk_bdev *bdev,
267 			    struct spdk_bdev_desc *desc,
268 			    struct spdk_io_channel *ch,
269 			    struct spdk_nvmf_request *req)
270 {
271 	uint64_t start_lba;
272 	uint64_t num_blocks;
273 
274 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
275 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
276 
277 	if ((start_lba + num_blocks) > bdev->blockcnt) {
278 		return -ENXIO;
279 	}
280 
281 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
282 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
283 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
284 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
285 	} else {
286 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
287 	}
288 
289 
290 	spdk_nvmf_request_complete(req);
291 	return 0;
292 }
293 
294 int
295 nvmf_bdev_ctrlr_end_zcopy(struct spdk_nvmf_request *req, bool commit)
296 {
297 	req->zcopy_bdev_io = NULL;
298 	spdk_nvmf_request_complete(req);
299 	return 0;
300 }
301 
302 static void
303 test_get_log_page(void)
304 {
305 	struct spdk_nvmf_subsystem subsystem = {};
306 	struct spdk_nvmf_request req = {};
307 	struct spdk_nvmf_qpair qpair = {};
308 	struct spdk_nvmf_ctrlr ctrlr = {};
309 	union nvmf_h2c_msg cmd = {};
310 	union nvmf_c2h_msg rsp = {};
311 	char data[4096];
312 
313 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
314 
315 	ctrlr.subsys = &subsystem;
316 
317 	qpair.ctrlr = &ctrlr;
318 
319 	req.qpair = &qpair;
320 	req.cmd = &cmd;
321 	req.rsp = &rsp;
322 	req.data = &data;
323 	req.length = sizeof(data);
324 
325 	/* Get Log Page - all valid */
326 	memset(&cmd, 0, sizeof(cmd));
327 	memset(&rsp, 0, sizeof(rsp));
328 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
329 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
330 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
331 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
332 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
333 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
334 
335 	/* Get Log Page with invalid log ID */
336 	memset(&cmd, 0, sizeof(cmd));
337 	memset(&rsp, 0, sizeof(rsp));
338 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
339 	cmd.nvme_cmd.cdw10 = 0;
340 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
341 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
342 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
343 
344 	/* Get Log Page with invalid offset (not dword aligned) */
345 	memset(&cmd, 0, sizeof(cmd));
346 	memset(&rsp, 0, sizeof(rsp));
347 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
348 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
349 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
350 	cmd.nvme_cmd.cdw12 = 2;
351 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
352 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
353 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
354 
355 	/* Get Log Page without data buffer */
356 	memset(&cmd, 0, sizeof(cmd));
357 	memset(&rsp, 0, sizeof(rsp));
358 	req.data = NULL;
359 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
360 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
361 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
362 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
363 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
364 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
365 	req.data = data;
366 }
367 
368 static void
369 test_process_fabrics_cmd(void)
370 {
371 	struct	spdk_nvmf_request req = {};
372 	int	ret;
373 	struct	spdk_nvmf_qpair req_qpair = {};
374 	union	nvmf_h2c_msg  req_cmd = {};
375 	union	nvmf_c2h_msg   req_rsp = {};
376 
377 	req.qpair = &req_qpair;
378 	req.cmd  = &req_cmd;
379 	req.rsp  = &req_rsp;
380 	req.qpair->ctrlr = NULL;
381 
382 	/* No ctrlr and invalid command check */
383 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
384 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
385 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
386 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
387 }
388 
389 static bool
390 nvme_status_success(const struct spdk_nvme_status *status)
391 {
392 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
393 }
394 
395 static void
396 test_connect(void)
397 {
398 	struct spdk_nvmf_fabric_connect_data connect_data;
399 	struct spdk_nvmf_poll_group group;
400 	struct spdk_nvmf_subsystem_poll_group *sgroups;
401 	struct spdk_nvmf_transport transport;
402 	struct spdk_nvmf_transport_ops tops = {};
403 	struct spdk_nvmf_subsystem subsystem;
404 	struct spdk_nvmf_request req;
405 	struct spdk_nvmf_qpair admin_qpair;
406 	struct spdk_nvmf_qpair qpair;
407 	struct spdk_nvmf_qpair qpair2;
408 	struct spdk_nvmf_ctrlr ctrlr;
409 	struct spdk_nvmf_tgt tgt;
410 	union nvmf_h2c_msg cmd;
411 	union nvmf_c2h_msg rsp;
412 	const uint8_t hostid[16] = {
413 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
414 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
415 	};
416 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
417 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
418 	int rc;
419 
420 	memset(&group, 0, sizeof(group));
421 	group.thread = spdk_get_thread();
422 
423 	memset(&ctrlr, 0, sizeof(ctrlr));
424 	ctrlr.subsys = &subsystem;
425 	ctrlr.qpair_mask = spdk_bit_array_create(3);
426 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
427 	ctrlr.vcprop.cc.bits.en = 1;
428 	ctrlr.vcprop.cc.bits.iosqes = 6;
429 	ctrlr.vcprop.cc.bits.iocqes = 4;
430 
431 	memset(&admin_qpair, 0, sizeof(admin_qpair));
432 	admin_qpair.group = &group;
433 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
434 
435 	memset(&tgt, 0, sizeof(tgt));
436 	memset(&transport, 0, sizeof(transport));
437 	transport.ops = &tops;
438 	transport.opts.max_aq_depth = 32;
439 	transport.opts.max_queue_depth = 64;
440 	transport.opts.max_qpairs_per_ctrlr = 3;
441 	transport.tgt = &tgt;
442 
443 	memset(&qpair, 0, sizeof(qpair));
444 	qpair.transport = &transport;
445 	qpair.group = &group;
446 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
447 	TAILQ_INIT(&qpair.outstanding);
448 
449 	memset(&connect_data, 0, sizeof(connect_data));
450 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
451 	connect_data.cntlid = 0xFFFF;
452 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
453 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
454 
455 	memset(&subsystem, 0, sizeof(subsystem));
456 	subsystem.thread = spdk_get_thread();
457 	subsystem.id = 1;
458 	TAILQ_INIT(&subsystem.ctrlrs);
459 	subsystem.tgt = &tgt;
460 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
461 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
462 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
463 
464 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
465 	group.sgroups = sgroups;
466 
467 	memset(&cmd, 0, sizeof(cmd));
468 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
469 	cmd.connect_cmd.cid = 1;
470 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
471 	cmd.connect_cmd.recfmt = 0;
472 	cmd.connect_cmd.qid = 0;
473 	cmd.connect_cmd.sqsize = 31;
474 	cmd.connect_cmd.cattr = 0;
475 	cmd.connect_cmd.kato = 120000;
476 
477 	memset(&req, 0, sizeof(req));
478 	req.qpair = &qpair;
479 	req.length = sizeof(connect_data);
480 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
481 	req.data = &connect_data;
482 	req.cmd = &cmd;
483 	req.rsp = &rsp;
484 
485 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
486 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
487 
488 	/* Valid admin connect command */
489 	memset(&rsp, 0, sizeof(rsp));
490 	sgroups[subsystem.id].mgmt_io_outstanding++;
491 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
492 	rc = nvmf_ctrlr_cmd_connect(&req);
493 	poll_threads();
494 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
495 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
496 	CU_ASSERT(qpair.ctrlr != NULL);
497 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
498 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
499 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
500 	free(qpair.ctrlr);
501 	qpair.ctrlr = NULL;
502 
503 	/* Valid admin connect command with kato = 0 */
504 	cmd.connect_cmd.kato = 0;
505 	memset(&rsp, 0, sizeof(rsp));
506 	sgroups[subsystem.id].mgmt_io_outstanding++;
507 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
508 	rc = nvmf_ctrlr_cmd_connect(&req);
509 	poll_threads();
510 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
511 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
512 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
513 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
514 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
515 	free(qpair.ctrlr);
516 	qpair.ctrlr = NULL;
517 	cmd.connect_cmd.kato = 120000;
518 
519 	/* Invalid data length */
520 	memset(&rsp, 0, sizeof(rsp));
521 	req.length = sizeof(connect_data) - 1;
522 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
523 	rc = nvmf_ctrlr_cmd_connect(&req);
524 	poll_threads();
525 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
526 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
527 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
528 	CU_ASSERT(qpair.ctrlr == NULL);
529 	req.length = sizeof(connect_data);
530 
531 	/* Invalid recfmt */
532 	memset(&rsp, 0, sizeof(rsp));
533 	cmd.connect_cmd.recfmt = 1234;
534 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
535 	rc = nvmf_ctrlr_cmd_connect(&req);
536 	poll_threads();
537 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
538 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
539 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
540 	CU_ASSERT(qpair.ctrlr == NULL);
541 	cmd.connect_cmd.recfmt = 0;
542 
543 	/* Subsystem not found */
544 	memset(&rsp, 0, sizeof(rsp));
545 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
546 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
547 	rc = nvmf_ctrlr_cmd_connect(&req);
548 	poll_threads();
549 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
550 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
551 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
552 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
553 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
554 	CU_ASSERT(qpair.ctrlr == NULL);
555 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
556 
557 	/* Unterminated hostnqn */
558 	memset(&rsp, 0, sizeof(rsp));
559 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
560 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
561 	rc = nvmf_ctrlr_cmd_connect(&req);
562 	poll_threads();
563 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
564 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
565 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
566 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
567 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
568 	CU_ASSERT(qpair.ctrlr == NULL);
569 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
570 
571 	/* Host not allowed */
572 	memset(&rsp, 0, sizeof(rsp));
573 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
574 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
575 	rc = nvmf_ctrlr_cmd_connect(&req);
576 	poll_threads();
577 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
578 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
579 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
580 	CU_ASSERT(qpair.ctrlr == NULL);
581 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
582 
583 	/* Invalid sqsize == 0 */
584 	memset(&rsp, 0, sizeof(rsp));
585 	cmd.connect_cmd.sqsize = 0;
586 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
587 	rc = nvmf_ctrlr_cmd_connect(&req);
588 	poll_threads();
589 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
590 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
591 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
592 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
593 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
594 	CU_ASSERT(qpair.ctrlr == NULL);
595 	cmd.connect_cmd.sqsize = 31;
596 
597 	/* Invalid admin sqsize > max_aq_depth */
598 	memset(&rsp, 0, sizeof(rsp));
599 	cmd.connect_cmd.sqsize = 32;
600 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
601 	rc = nvmf_ctrlr_cmd_connect(&req);
602 	poll_threads();
603 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
604 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
605 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
606 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
607 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
608 	CU_ASSERT(qpair.ctrlr == NULL);
609 	cmd.connect_cmd.sqsize = 31;
610 
611 	/* Invalid I/O sqsize > max_queue_depth */
612 	memset(&rsp, 0, sizeof(rsp));
613 	cmd.connect_cmd.qid = 1;
614 	cmd.connect_cmd.sqsize = 64;
615 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
616 	rc = nvmf_ctrlr_cmd_connect(&req);
617 	poll_threads();
618 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
619 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
620 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
621 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
622 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
623 	CU_ASSERT(qpair.ctrlr == NULL);
624 	cmd.connect_cmd.qid = 0;
625 	cmd.connect_cmd.sqsize = 31;
626 
627 	/* Invalid cntlid for admin queue */
628 	memset(&rsp, 0, sizeof(rsp));
629 	connect_data.cntlid = 0x1234;
630 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
631 	rc = nvmf_ctrlr_cmd_connect(&req);
632 	poll_threads();
633 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
634 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
635 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
636 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
637 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
638 	CU_ASSERT(qpair.ctrlr == NULL);
639 	connect_data.cntlid = 0xFFFF;
640 
641 	ctrlr.admin_qpair = &admin_qpair;
642 	ctrlr.subsys = &subsystem;
643 
644 	/* Valid I/O queue connect command */
645 	memset(&rsp, 0, sizeof(rsp));
646 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
647 	cmd.connect_cmd.qid = 1;
648 	cmd.connect_cmd.sqsize = 63;
649 	sgroups[subsystem.id].mgmt_io_outstanding++;
650 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
651 	rc = nvmf_ctrlr_cmd_connect(&req);
652 	poll_threads();
653 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
654 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
655 	CU_ASSERT(qpair.ctrlr == &ctrlr);
656 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
657 	qpair.ctrlr = NULL;
658 	cmd.connect_cmd.sqsize = 31;
659 
660 	/* Non-existent controller */
661 	memset(&rsp, 0, sizeof(rsp));
662 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
663 	sgroups[subsystem.id].mgmt_io_outstanding++;
664 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
665 	rc = nvmf_ctrlr_cmd_connect(&req);
666 	poll_threads();
667 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
668 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
669 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
670 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
671 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
672 	CU_ASSERT(qpair.ctrlr == NULL);
673 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
674 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
675 
676 	/* I/O connect to discovery controller */
677 	memset(&rsp, 0, sizeof(rsp));
678 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
679 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
680 	sgroups[subsystem.id].mgmt_io_outstanding++;
681 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
682 	rc = nvmf_ctrlr_cmd_connect(&req);
683 	poll_threads();
684 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
685 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
686 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
687 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
688 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
689 	CU_ASSERT(qpair.ctrlr == NULL);
690 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
691 
692 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
693 	cmd.connect_cmd.qid = 0;
694 	cmd.connect_cmd.kato = 120000;
695 	memset(&rsp, 0, sizeof(rsp));
696 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
697 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
698 	sgroups[subsystem.id].mgmt_io_outstanding++;
699 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
700 	rc = nvmf_ctrlr_cmd_connect(&req);
701 	poll_threads();
702 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
703 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
704 	CU_ASSERT(qpair.ctrlr != NULL);
705 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
706 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
707 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
708 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
709 	free(qpair.ctrlr);
710 	qpair.ctrlr = NULL;
711 
712 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
713 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
714 	 */
715 	cmd.connect_cmd.kato = 0;
716 	memset(&rsp, 0, sizeof(rsp));
717 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
718 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
719 	sgroups[subsystem.id].mgmt_io_outstanding++;
720 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
721 	rc = nvmf_ctrlr_cmd_connect(&req);
722 	poll_threads();
723 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
724 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
725 	CU_ASSERT(qpair.ctrlr != NULL);
726 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
727 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
728 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
729 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
730 	free(qpair.ctrlr);
731 	qpair.ctrlr = NULL;
732 	cmd.connect_cmd.qid = 1;
733 	cmd.connect_cmd.kato = 120000;
734 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
735 
736 	/* I/O connect to disabled controller */
737 	memset(&rsp, 0, sizeof(rsp));
738 	ctrlr.vcprop.cc.bits.en = 0;
739 	sgroups[subsystem.id].mgmt_io_outstanding++;
740 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
741 	rc = nvmf_ctrlr_cmd_connect(&req);
742 	poll_threads();
743 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
744 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
745 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
746 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
747 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
748 	CU_ASSERT(qpair.ctrlr == NULL);
749 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
750 	ctrlr.vcprop.cc.bits.en = 1;
751 
752 	/* I/O connect with invalid IOSQES */
753 	memset(&rsp, 0, sizeof(rsp));
754 	ctrlr.vcprop.cc.bits.iosqes = 3;
755 	sgroups[subsystem.id].mgmt_io_outstanding++;
756 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
757 	rc = nvmf_ctrlr_cmd_connect(&req);
758 	poll_threads();
759 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
760 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
761 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
762 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
763 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
764 	CU_ASSERT(qpair.ctrlr == NULL);
765 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
766 	ctrlr.vcprop.cc.bits.iosqes = 6;
767 
768 	/* I/O connect with invalid IOCQES */
769 	memset(&rsp, 0, sizeof(rsp));
770 	ctrlr.vcprop.cc.bits.iocqes = 3;
771 	sgroups[subsystem.id].mgmt_io_outstanding++;
772 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
773 	rc = nvmf_ctrlr_cmd_connect(&req);
774 	poll_threads();
775 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
776 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
777 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
778 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
779 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
780 	CU_ASSERT(qpair.ctrlr == NULL);
781 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
782 	ctrlr.vcprop.cc.bits.iocqes = 4;
783 
784 	/* I/O connect with too many existing qpairs */
785 	memset(&rsp, 0, sizeof(rsp));
786 	spdk_bit_array_set(ctrlr.qpair_mask, 0);
787 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
788 	spdk_bit_array_set(ctrlr.qpair_mask, 2);
789 	sgroups[subsystem.id].mgmt_io_outstanding++;
790 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
791 	rc = nvmf_ctrlr_cmd_connect(&req);
792 	poll_threads();
793 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
794 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
795 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
796 	CU_ASSERT(qpair.ctrlr == NULL);
797 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
798 	spdk_bit_array_clear(ctrlr.qpair_mask, 0);
799 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
800 	spdk_bit_array_clear(ctrlr.qpair_mask, 2);
801 
802 	/* I/O connect with duplicate queue ID */
803 	memset(&rsp, 0, sizeof(rsp));
804 	memset(&qpair2, 0, sizeof(qpair2));
805 	qpair2.group = &group;
806 	qpair2.qid = 1;
807 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
808 	cmd.connect_cmd.qid = 1;
809 	sgroups[subsystem.id].mgmt_io_outstanding++;
810 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
811 	rc = nvmf_ctrlr_cmd_connect(&req);
812 	poll_threads();
813 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
814 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
815 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
816 	CU_ASSERT(qpair.ctrlr == NULL);
817 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
818 
819 	/* I/O connect when admin qpair is being destroyed */
820 	admin_qpair.group = NULL;
821 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
822 	memset(&rsp, 0, sizeof(rsp));
823 	sgroups[subsystem.id].mgmt_io_outstanding++;
824 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
825 	rc = nvmf_ctrlr_cmd_connect(&req);
826 	poll_threads();
827 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
828 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
829 	CU_ASSERT(qpair.ctrlr == NULL);
830 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
831 	admin_qpair.group = &group;
832 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
833 
834 	/* Clean up globals */
835 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
836 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
837 
838 	spdk_bit_array_free(&ctrlr.qpair_mask);
839 	free(sgroups);
840 }
841 
842 static void
843 test_get_ns_id_desc_list(void)
844 {
845 	struct spdk_nvmf_subsystem subsystem;
846 	struct spdk_nvmf_qpair qpair;
847 	struct spdk_nvmf_ctrlr ctrlr;
848 	struct spdk_nvmf_request req;
849 	struct spdk_nvmf_ns *ns_ptrs[1];
850 	struct spdk_nvmf_ns ns;
851 	union nvmf_h2c_msg cmd;
852 	union nvmf_c2h_msg rsp;
853 	struct spdk_bdev bdev;
854 	uint8_t buf[4096];
855 
856 	memset(&subsystem, 0, sizeof(subsystem));
857 	ns_ptrs[0] = &ns;
858 	subsystem.ns = ns_ptrs;
859 	subsystem.max_nsid = 1;
860 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
861 
862 	memset(&ns, 0, sizeof(ns));
863 	ns.opts.nsid = 1;
864 	ns.bdev = &bdev;
865 
866 	memset(&qpair, 0, sizeof(qpair));
867 	qpair.ctrlr = &ctrlr;
868 
869 	memset(&ctrlr, 0, sizeof(ctrlr));
870 	ctrlr.subsys = &subsystem;
871 	ctrlr.vcprop.cc.bits.en = 1;
872 
873 	memset(&req, 0, sizeof(req));
874 	req.qpair = &qpair;
875 	req.cmd = &cmd;
876 	req.rsp = &rsp;
877 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
878 	req.data = buf;
879 	req.length = sizeof(buf);
880 
881 	memset(&cmd, 0, sizeof(cmd));
882 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
883 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
884 
885 	/* Invalid NSID */
886 	cmd.nvme_cmd.nsid = 0;
887 	memset(&rsp, 0, sizeof(rsp));
888 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
889 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
890 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
891 
892 	/* Valid NSID, but ns has no IDs defined */
893 	cmd.nvme_cmd.nsid = 1;
894 	memset(&rsp, 0, sizeof(rsp));
895 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
896 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
897 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
898 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
899 
900 	/* Valid NSID, only EUI64 defined */
901 	ns.opts.eui64[0] = 0x11;
902 	ns.opts.eui64[7] = 0xFF;
903 	memset(&rsp, 0, sizeof(rsp));
904 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
905 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
906 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
907 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
908 	CU_ASSERT(buf[1] == 8);
909 	CU_ASSERT(buf[4] == 0x11);
910 	CU_ASSERT(buf[11] == 0xFF);
911 	CU_ASSERT(buf[13] == 0);
912 
913 	/* Valid NSID, only NGUID defined */
914 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
915 	ns.opts.nguid[0] = 0x22;
916 	ns.opts.nguid[15] = 0xEE;
917 	memset(&rsp, 0, sizeof(rsp));
918 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
919 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
920 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
921 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
922 	CU_ASSERT(buf[1] == 16);
923 	CU_ASSERT(buf[4] == 0x22);
924 	CU_ASSERT(buf[19] == 0xEE);
925 	CU_ASSERT(buf[21] == 0);
926 
927 	/* Valid NSID, both EUI64 and NGUID defined */
928 	ns.opts.eui64[0] = 0x11;
929 	ns.opts.eui64[7] = 0xFF;
930 	ns.opts.nguid[0] = 0x22;
931 	ns.opts.nguid[15] = 0xEE;
932 	memset(&rsp, 0, sizeof(rsp));
933 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
934 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
935 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
936 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
937 	CU_ASSERT(buf[1] == 8);
938 	CU_ASSERT(buf[4] == 0x11);
939 	CU_ASSERT(buf[11] == 0xFF);
940 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
941 	CU_ASSERT(buf[13] == 16);
942 	CU_ASSERT(buf[16] == 0x22);
943 	CU_ASSERT(buf[31] == 0xEE);
944 	CU_ASSERT(buf[33] == 0);
945 
946 	/* Valid NSID, EUI64, NGUID, and UUID defined */
947 	ns.opts.eui64[0] = 0x11;
948 	ns.opts.eui64[7] = 0xFF;
949 	ns.opts.nguid[0] = 0x22;
950 	ns.opts.nguid[15] = 0xEE;
951 	ns.opts.uuid.u.raw[0] = 0x33;
952 	ns.opts.uuid.u.raw[15] = 0xDD;
953 	memset(&rsp, 0, sizeof(rsp));
954 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
955 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
956 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
957 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
958 	CU_ASSERT(buf[1] == 8);
959 	CU_ASSERT(buf[4] == 0x11);
960 	CU_ASSERT(buf[11] == 0xFF);
961 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
962 	CU_ASSERT(buf[13] == 16);
963 	CU_ASSERT(buf[16] == 0x22);
964 	CU_ASSERT(buf[31] == 0xEE);
965 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
966 	CU_ASSERT(buf[33] == 16);
967 	CU_ASSERT(buf[36] == 0x33);
968 	CU_ASSERT(buf[51] == 0xDD);
969 	CU_ASSERT(buf[53] == 0);
970 }
971 
972 static void
973 test_identify_ns(void)
974 {
975 	struct spdk_nvmf_subsystem subsystem = {};
976 	struct spdk_nvmf_transport transport = {};
977 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
978 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
979 	struct spdk_nvme_cmd cmd = {};
980 	struct spdk_nvme_cpl rsp = {};
981 	struct spdk_nvme_ns_data nsdata = {};
982 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
983 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
984 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
985 
986 	subsystem.ns = ns_arr;
987 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
988 
989 	/* Invalid NSID 0 */
990 	cmd.nsid = 0;
991 	memset(&nsdata, 0, sizeof(nsdata));
992 	memset(&rsp, 0, sizeof(rsp));
993 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
994 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
995 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
996 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
997 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
998 
999 	/* Valid NSID 1 */
1000 	cmd.nsid = 1;
1001 	memset(&nsdata, 0, sizeof(nsdata));
1002 	memset(&rsp, 0, sizeof(rsp));
1003 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1004 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1005 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1006 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1007 	CU_ASSERT(nsdata.nsze == 1234);
1008 
1009 	/* Valid but inactive NSID 2 */
1010 	cmd.nsid = 2;
1011 	memset(&nsdata, 0, sizeof(nsdata));
1012 	memset(&rsp, 0, sizeof(rsp));
1013 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1014 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1015 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1016 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1017 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1018 
1019 	/* Valid NSID 3 */
1020 	cmd.nsid = 3;
1021 	memset(&nsdata, 0, sizeof(nsdata));
1022 	memset(&rsp, 0, sizeof(rsp));
1023 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1024 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1025 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1026 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1027 	CU_ASSERT(nsdata.nsze == 5678);
1028 
1029 	/* Invalid NSID 4 */
1030 	cmd.nsid = 4;
1031 	memset(&nsdata, 0, sizeof(nsdata));
1032 	memset(&rsp, 0, sizeof(rsp));
1033 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1034 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1035 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1036 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1037 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1038 
1039 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1040 	cmd.nsid = 0xFFFFFFFF;
1041 	memset(&nsdata, 0, sizeof(nsdata));
1042 	memset(&rsp, 0, sizeof(rsp));
1043 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1044 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1045 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1046 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1047 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1048 }
1049 
1050 static void
1051 test_set_get_features(void)
1052 {
1053 	struct spdk_nvmf_subsystem subsystem = {};
1054 	struct spdk_nvmf_qpair admin_qpair = {};
1055 	struct spdk_nvmf_subsystem_listener listener = {};
1056 	struct spdk_nvmf_ctrlr ctrlr = {
1057 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1058 	};
1059 	union nvmf_h2c_msg cmd = {};
1060 	union nvmf_c2h_msg rsp = {};
1061 	struct spdk_nvmf_ns ns[3];
1062 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};;
1063 	struct spdk_nvmf_request req;
1064 	int rc;
1065 
1066 	subsystem.ns = ns_arr;
1067 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1068 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1069 	admin_qpair.ctrlr = &ctrlr;
1070 	req.qpair = &admin_qpair;
1071 	cmd.nvme_cmd.nsid = 1;
1072 	req.cmd = &cmd;
1073 	req.rsp = &rsp;
1074 
1075 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1076 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1077 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1078 	ns[0].ptpl_file = "testcfg";
1079 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1080 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1081 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1082 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1083 	CU_ASSERT(ns[0].ptpl_activated == true);
1084 
1085 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1086 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1087 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1088 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1089 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1090 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1091 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1092 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1093 
1094 
1095 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1096 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1097 	cmd.nvme_cmd.cdw11 = 0x42;
1098 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1099 
1100 	rc = nvmf_ctrlr_get_features(&req);
1101 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1102 
1103 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1104 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1105 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1106 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1107 
1108 	rc = nvmf_ctrlr_get_features(&req);
1109 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1110 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1111 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1112 
1113 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1114 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1115 	cmd.nvme_cmd.cdw11 = 0x42;
1116 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1117 
1118 	rc = nvmf_ctrlr_set_features(&req);
1119 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1120 
1121 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1122 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1123 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1124 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1125 
1126 	rc = nvmf_ctrlr_set_features(&req);
1127 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1128 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1129 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1130 
1131 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1132 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1133 	cmd.nvme_cmd.cdw11 = 0x42;
1134 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1135 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1136 
1137 	rc = nvmf_ctrlr_set_features(&req);
1138 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1139 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1140 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1141 
1142 
1143 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1144 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1145 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1146 
1147 	rc = nvmf_ctrlr_get_features(&req);
1148 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1149 
1150 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1151 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1152 	cmd.nvme_cmd.cdw11 = 0x42;
1153 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1154 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1155 
1156 	rc = nvmf_ctrlr_set_features(&req);
1157 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1158 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1159 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1160 
1161 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1162 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1163 	cmd.nvme_cmd.cdw11 = 0x42;
1164 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1165 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1166 
1167 	rc = nvmf_ctrlr_set_features(&req);
1168 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1169 }
1170 
1171 /*
1172  * Reservation Unit Test Configuration
1173  *       --------             --------    --------
1174  *      | Host A |           | Host B |  | Host C |
1175  *       --------             --------    --------
1176  *      /        \               |           |
1177  *  --------   --------       -------     -------
1178  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1179  *  --------   --------       -------     -------
1180  *    \           \              /           /
1181  *     \           \            /           /
1182  *      \           \          /           /
1183  *      --------------------------------------
1184  *     |            NAMESPACE 1               |
1185  *      --------------------------------------
1186  */
1187 
1188 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1189 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1190 
1191 static void
1192 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1193 {
1194 	/* Host A has two controllers */
1195 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1196 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1197 
1198 	/* Host B has 1 controller */
1199 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1200 
1201 	/* Host C has 1 controller */
1202 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1203 
1204 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1205 	g_ns_info.rtype = rtype;
1206 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1207 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1208 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1209 }
1210 
1211 static void
1212 test_reservation_write_exclusive(void)
1213 {
1214 	struct spdk_nvmf_request req = {};
1215 	union nvmf_h2c_msg cmd = {};
1216 	union nvmf_c2h_msg rsp = {};
1217 	int rc;
1218 
1219 	req.cmd = &cmd;
1220 	req.rsp = &rsp;
1221 
1222 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1223 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1224 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1225 
1226 	/* Test Case: Issue a Read command from Host A and Host B */
1227 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1228 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1229 	SPDK_CU_ASSERT_FATAL(rc == 0);
1230 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1231 	SPDK_CU_ASSERT_FATAL(rc == 0);
1232 
1233 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1234 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1235 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1236 	SPDK_CU_ASSERT_FATAL(rc == 0);
1237 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1238 	SPDK_CU_ASSERT_FATAL(rc < 0);
1239 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1240 
1241 	/* Test Case: Issue a Write command from Host C */
1242 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1243 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1244 	SPDK_CU_ASSERT_FATAL(rc < 0);
1245 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1246 
1247 	/* Test Case: Issue a Read command from Host B */
1248 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1249 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1250 	SPDK_CU_ASSERT_FATAL(rc == 0);
1251 
1252 	/* Unregister Host C */
1253 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1254 
1255 	/* Test Case: Read and Write commands from non-registrant Host C */
1256 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1257 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1258 	SPDK_CU_ASSERT_FATAL(rc < 0);
1259 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1260 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1261 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1262 	SPDK_CU_ASSERT_FATAL(rc == 0);
1263 }
1264 
1265 static void
1266 test_reservation_exclusive_access(void)
1267 {
1268 	struct spdk_nvmf_request req = {};
1269 	union nvmf_h2c_msg cmd = {};
1270 	union nvmf_c2h_msg rsp = {};
1271 	int rc;
1272 
1273 	req.cmd = &cmd;
1274 	req.rsp = &rsp;
1275 
1276 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1277 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1278 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1279 
1280 	/* Test Case: Issue a Read command from Host B */
1281 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1282 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1283 	SPDK_CU_ASSERT_FATAL(rc < 0);
1284 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1285 
1286 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1287 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1288 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1289 	SPDK_CU_ASSERT_FATAL(rc == 0);
1290 }
1291 
1292 static void
1293 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1294 {
1295 	struct spdk_nvmf_request req = {};
1296 	union nvmf_h2c_msg cmd = {};
1297 	union nvmf_c2h_msg rsp = {};
1298 	int rc;
1299 
1300 	req.cmd = &cmd;
1301 	req.rsp = &rsp;
1302 
1303 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1304 	ut_reservation_init(rtype);
1305 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1306 
1307 	/* Test Case: Issue a Read command from Host A and Host C */
1308 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1309 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1310 	SPDK_CU_ASSERT_FATAL(rc == 0);
1311 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1312 	SPDK_CU_ASSERT_FATAL(rc == 0);
1313 
1314 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1315 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1316 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1317 	SPDK_CU_ASSERT_FATAL(rc == 0);
1318 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1319 	SPDK_CU_ASSERT_FATAL(rc == 0);
1320 
1321 	/* Unregister Host C */
1322 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1323 
1324 	/* Test Case: Read and Write commands from non-registrant Host C */
1325 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1326 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1327 	SPDK_CU_ASSERT_FATAL(rc == 0);
1328 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1329 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1330 	SPDK_CU_ASSERT_FATAL(rc < 0);
1331 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1332 }
1333 
1334 static void
1335 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1336 {
1337 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1338 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1339 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1340 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1341 }
1342 
1343 static void
1344 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1345 {
1346 	struct spdk_nvmf_request req = {};
1347 	union nvmf_h2c_msg cmd = {};
1348 	union nvmf_c2h_msg rsp = {};
1349 	int rc;
1350 
1351 	req.cmd = &cmd;
1352 	req.rsp = &rsp;
1353 
1354 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1355 	ut_reservation_init(rtype);
1356 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1357 
1358 	/* Test Case: Issue a Write command from Host B */
1359 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1360 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1361 	SPDK_CU_ASSERT_FATAL(rc == 0);
1362 
1363 	/* Unregister Host B */
1364 	memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
1365 
1366 	/* Test Case: Issue a Read command from Host B */
1367 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1368 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1369 	SPDK_CU_ASSERT_FATAL(rc < 0);
1370 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1371 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1372 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1373 	SPDK_CU_ASSERT_FATAL(rc < 0);
1374 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1375 }
1376 
1377 static void
1378 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1379 {
1380 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1381 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1382 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1383 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1384 }
1385 
1386 static void
1387 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1388 {
1389 	STAILQ_INIT(&ctrlr->async_events);
1390 }
1391 
1392 static void
1393 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1394 {
1395 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1396 
1397 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1398 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1399 		free(event);
1400 	}
1401 }
1402 
1403 static int
1404 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1405 {
1406 	int num = 0;
1407 	struct spdk_nvmf_async_event_completion *event;
1408 
1409 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1410 		num++;
1411 	}
1412 	return num;
1413 }
1414 
1415 static void
1416 test_reservation_notification_log_page(void)
1417 {
1418 	struct spdk_nvmf_ctrlr ctrlr;
1419 	struct spdk_nvmf_qpair qpair;
1420 	struct spdk_nvmf_ns ns;
1421 	struct spdk_nvmf_request req = {};
1422 	union nvmf_h2c_msg cmd = {};
1423 	union nvmf_c2h_msg rsp = {};
1424 	union spdk_nvme_async_event_completion event = {};
1425 	struct spdk_nvme_reservation_notification_log logs[3];
1426 	struct iovec iov;
1427 
1428 	memset(&ctrlr, 0, sizeof(ctrlr));
1429 	ctrlr.thread = spdk_get_thread();
1430 	TAILQ_INIT(&ctrlr.log_head);
1431 	init_pending_async_events(&ctrlr);
1432 	ns.nsid = 1;
1433 
1434 	/* Test Case: Mask all the reservation notifications */
1435 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1436 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1437 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1438 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1439 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1440 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1441 					  SPDK_NVME_RESERVATION_RELEASED);
1442 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1443 					  SPDK_NVME_RESERVATION_PREEMPTED);
1444 	poll_threads();
1445 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1446 
1447 	/* Test Case: Unmask all the reservation notifications,
1448 	 * 3 log pages are generated, and AER was triggered.
1449 	 */
1450 	ns.mask = 0;
1451 	ctrlr.num_avail_log_pages = 0;
1452 	req.cmd = &cmd;
1453 	req.rsp = &rsp;
1454 	ctrlr.aer_req[0] = &req;
1455 	ctrlr.nr_aer_reqs = 1;
1456 	req.qpair = &qpair;
1457 	TAILQ_INIT(&qpair.outstanding);
1458 	qpair.ctrlr = NULL;
1459 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1460 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1461 
1462 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1463 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1464 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1465 					  SPDK_NVME_RESERVATION_RELEASED);
1466 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1467 					  SPDK_NVME_RESERVATION_PREEMPTED);
1468 	poll_threads();
1469 	event.raw = rsp.nvme_cpl.cdw0;
1470 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1471 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1472 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1473 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1474 
1475 	/* Test Case: Get Log Page to clear the log pages */
1476 	iov.iov_base = &logs[0];
1477 	iov.iov_len = sizeof(logs);
1478 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1479 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1480 
1481 	cleanup_pending_async_events(&ctrlr);
1482 }
1483 
1484 static void
1485 test_get_dif_ctx(void)
1486 {
1487 	struct spdk_nvmf_subsystem subsystem = {};
1488 	struct spdk_nvmf_request req = {};
1489 	struct spdk_nvmf_qpair qpair = {};
1490 	struct spdk_nvmf_ctrlr ctrlr = {};
1491 	struct spdk_nvmf_ns ns = {};
1492 	struct spdk_nvmf_ns *_ns = NULL;
1493 	struct spdk_bdev bdev = {};
1494 	union nvmf_h2c_msg cmd = {};
1495 	struct spdk_dif_ctx dif_ctx = {};
1496 	bool ret;
1497 
1498 	ctrlr.subsys = &subsystem;
1499 
1500 	qpair.ctrlr = &ctrlr;
1501 
1502 	req.qpair = &qpair;
1503 	req.cmd = &cmd;
1504 
1505 	ns.bdev = &bdev;
1506 
1507 	ctrlr.dif_insert_or_strip = false;
1508 
1509 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1510 	CU_ASSERT(ret == false);
1511 
1512 	ctrlr.dif_insert_or_strip = true;
1513 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1514 
1515 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1516 	CU_ASSERT(ret == false);
1517 
1518 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1519 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1520 
1521 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1522 	CU_ASSERT(ret == false);
1523 
1524 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1525 
1526 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1527 	CU_ASSERT(ret == false);
1528 
1529 	qpair.qid = 1;
1530 
1531 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1532 	CU_ASSERT(ret == false);
1533 
1534 	cmd.nvme_cmd.nsid = 1;
1535 
1536 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1537 	CU_ASSERT(ret == false);
1538 
1539 	subsystem.max_nsid = 1;
1540 	subsystem.ns = &_ns;
1541 	subsystem.ns[0] = &ns;
1542 
1543 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1544 	CU_ASSERT(ret == false);
1545 
1546 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1547 
1548 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1549 	CU_ASSERT(ret == true);
1550 }
1551 
1552 static void
1553 test_identify_ctrlr(void)
1554 {
1555 	struct spdk_nvmf_tgt tgt = {};
1556 	struct spdk_nvmf_subsystem subsystem = {
1557 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1558 		.tgt = &tgt,
1559 	};
1560 	struct spdk_nvmf_transport_ops tops = {};
1561 	struct spdk_nvmf_transport transport = {
1562 		.ops = &tops,
1563 		.opts = {
1564 			.in_capsule_data_size = 4096,
1565 		},
1566 	};
1567 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1568 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1569 	struct spdk_nvme_ctrlr_data cdata = {};
1570 	uint32_t expected_ioccsz;
1571 
1572 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1573 
1574 	/* Check ioccsz, TCP transport */
1575 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1576 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1577 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1578 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1579 
1580 	/* Check ioccsz, RDMA transport */
1581 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1582 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1583 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1584 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1585 
1586 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1587 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1588 	ctrlr.dif_insert_or_strip = true;
1589 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1590 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1591 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1592 }
1593 
1594 static int
1595 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1596 {
1597 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1598 
1599 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1600 };
1601 
1602 static void
1603 test_custom_admin_cmd(void)
1604 {
1605 	struct spdk_nvmf_subsystem subsystem;
1606 	struct spdk_nvmf_qpair qpair;
1607 	struct spdk_nvmf_ctrlr ctrlr;
1608 	struct spdk_nvmf_request req;
1609 	struct spdk_nvmf_ns *ns_ptrs[1];
1610 	struct spdk_nvmf_ns ns;
1611 	union nvmf_h2c_msg cmd;
1612 	union nvmf_c2h_msg rsp;
1613 	struct spdk_bdev bdev;
1614 	uint8_t buf[4096];
1615 	int rc;
1616 
1617 	memset(&subsystem, 0, sizeof(subsystem));
1618 	ns_ptrs[0] = &ns;
1619 	subsystem.ns = ns_ptrs;
1620 	subsystem.max_nsid = 1;
1621 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1622 
1623 	memset(&ns, 0, sizeof(ns));
1624 	ns.opts.nsid = 1;
1625 	ns.bdev = &bdev;
1626 
1627 	memset(&qpair, 0, sizeof(qpair));
1628 	qpair.ctrlr = &ctrlr;
1629 
1630 	memset(&ctrlr, 0, sizeof(ctrlr));
1631 	ctrlr.subsys = &subsystem;
1632 	ctrlr.vcprop.cc.bits.en = 1;
1633 
1634 	memset(&req, 0, sizeof(req));
1635 	req.qpair = &qpair;
1636 	req.cmd = &cmd;
1637 	req.rsp = &rsp;
1638 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1639 	req.data = buf;
1640 	req.length = sizeof(buf);
1641 
1642 	memset(&cmd, 0, sizeof(cmd));
1643 	cmd.nvme_cmd.opc = 0xc1;
1644 	cmd.nvme_cmd.nsid = 0;
1645 	memset(&rsp, 0, sizeof(rsp));
1646 
1647 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1648 
1649 	/* Ensure that our hdlr is being called */
1650 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1651 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1652 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1653 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1654 }
1655 
1656 static void
1657 test_fused_compare_and_write(void)
1658 {
1659 	struct spdk_nvmf_request req = {};
1660 	struct spdk_nvmf_qpair qpair = {};
1661 	struct spdk_nvme_cmd cmd = {};
1662 	union nvmf_c2h_msg rsp = {};
1663 	struct spdk_nvmf_ctrlr ctrlr = {};
1664 	struct spdk_nvmf_subsystem subsystem = {};
1665 	struct spdk_nvmf_ns ns = {};
1666 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1667 	struct spdk_nvmf_subsystem_listener listener = {};
1668 	struct spdk_bdev bdev = {};
1669 
1670 	struct spdk_nvmf_poll_group group = {};
1671 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1672 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1673 	struct spdk_io_channel io_ch = {};
1674 
1675 	ns.bdev = &bdev;
1676 
1677 	subsystem.id = 0;
1678 	subsystem.max_nsid = 1;
1679 	subsys_ns[0] = &ns;
1680 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1681 
1682 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1683 
1684 	/* Enable controller */
1685 	ctrlr.vcprop.cc.bits.en = 1;
1686 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1687 	ctrlr.listener = &listener;
1688 
1689 	group.num_sgroups = 1;
1690 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1691 	sgroups.num_ns = 1;
1692 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1693 	ns_info.channel = &io_ch;
1694 	sgroups.ns_info = &ns_info;
1695 	TAILQ_INIT(&sgroups.queued);
1696 	group.sgroups = &sgroups;
1697 	TAILQ_INIT(&qpair.outstanding);
1698 
1699 	qpair.ctrlr = &ctrlr;
1700 	qpair.group = &group;
1701 	qpair.qid = 1;
1702 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1703 
1704 	cmd.nsid = 1;
1705 
1706 	req.qpair = &qpair;
1707 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1708 	req.rsp = &rsp;
1709 
1710 	/* SUCCESS/SUCCESS */
1711 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1712 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1713 
1714 	spdk_nvmf_request_exec(&req);
1715 	CU_ASSERT(qpair.first_fused_req != NULL);
1716 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1717 
1718 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1719 	cmd.opc = SPDK_NVME_OPC_WRITE;
1720 
1721 	spdk_nvmf_request_exec(&req);
1722 	CU_ASSERT(qpair.first_fused_req == NULL);
1723 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1724 
1725 	/* Wrong sequence */
1726 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1727 	cmd.opc = SPDK_NVME_OPC_WRITE;
1728 
1729 	spdk_nvmf_request_exec(&req);
1730 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1731 	CU_ASSERT(qpair.first_fused_req == NULL);
1732 
1733 	/* Write as FUSE_FIRST (Wrong op code) */
1734 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1735 	cmd.opc = SPDK_NVME_OPC_WRITE;
1736 
1737 	spdk_nvmf_request_exec(&req);
1738 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1739 	CU_ASSERT(qpair.first_fused_req == NULL);
1740 
1741 	/* Compare as FUSE_SECOND (Wrong op code) */
1742 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1743 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1744 
1745 	spdk_nvmf_request_exec(&req);
1746 	CU_ASSERT(qpair.first_fused_req != NULL);
1747 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1748 
1749 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1750 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1751 
1752 	spdk_nvmf_request_exec(&req);
1753 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1754 	CU_ASSERT(qpair.first_fused_req == NULL);
1755 }
1756 
1757 static void
1758 test_multi_async_event_reqs(void)
1759 {
1760 	struct spdk_nvmf_subsystem subsystem = {};
1761 	struct spdk_nvmf_qpair qpair = {};
1762 	struct spdk_nvmf_ctrlr ctrlr = {};
1763 	struct spdk_nvmf_request req[5] = {};
1764 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1765 	struct spdk_nvmf_ns ns = {};
1766 	union nvmf_h2c_msg cmd[5] = {};
1767 	union nvmf_c2h_msg rsp[5] = {};
1768 
1769 	struct spdk_nvmf_poll_group group = {};
1770 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1771 
1772 	int i;
1773 
1774 	ns_ptrs[0] = &ns;
1775 	subsystem.ns = ns_ptrs;
1776 	subsystem.max_nsid = 1;
1777 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1778 
1779 	ns.opts.nsid = 1;
1780 	group.sgroups = &sgroups;
1781 
1782 	qpair.ctrlr = &ctrlr;
1783 	qpair.group = &group;
1784 	TAILQ_INIT(&qpair.outstanding);
1785 
1786 	ctrlr.subsys = &subsystem;
1787 	ctrlr.vcprop.cc.bits.en = 1;
1788 
1789 	for (i = 0; i < 5; i++) {
1790 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1791 		cmd[i].nvme_cmd.nsid = 1;
1792 		cmd[i].nvme_cmd.cid = i;
1793 
1794 		req[i].qpair = &qpair;
1795 		req[i].cmd = &cmd[i];
1796 		req[i].rsp = &rsp[i];
1797 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1798 	}
1799 
1800 	/* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
1801 	sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS;
1802 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1803 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1804 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
1805 	}
1806 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1807 
1808 	/* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
1809 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1810 	CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
1811 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
1812 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1813 
1814 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
1815 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
1816 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1817 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1818 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
1819 
1820 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
1821 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1822 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1823 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
1824 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
1825 
1826 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
1827 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
1828 }
1829 
1830 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
1831 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
1832 static void
1833 test_get_ana_log_page(void)
1834 {
1835 	struct spdk_nvmf_subsystem subsystem = {};
1836 	struct spdk_nvmf_ctrlr ctrlr = {};
1837 	struct spdk_nvmf_subsystem_listener listener = {};
1838 	struct spdk_nvmf_ns ns[3];
1839 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
1840 	uint64_t offset;
1841 	uint32_t length;
1842 	int i;
1843 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1844 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1845 	struct iovec iov, iovs[2];
1846 	struct spdk_nvme_ana_page *ana_hdr;
1847 	char _ana_desc[UT_ANA_DESC_SIZE];
1848 	struct spdk_nvme_ana_group_descriptor *ana_desc;
1849 
1850 	subsystem.ns = ns_arr;
1851 	subsystem.max_nsid = 3;
1852 	ctrlr.subsys = &subsystem;
1853 	ctrlr.listener = &listener;
1854 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1855 
1856 	for (i = 0; i < 3; i++) {
1857 		ns_arr[i]->nsid = i + 1;
1858 	}
1859 
1860 	/* create expected page */
1861 	ana_hdr = (void *)&expected_page[0];
1862 	ana_hdr->num_ana_group_desc = 3;
1863 	ana_hdr->change_count = 0;
1864 
1865 	/* descriptor may be unaligned. So create data and then copy it to the location. */
1866 	ana_desc = (void *)_ana_desc;
1867 	offset = sizeof(struct spdk_nvme_ana_page);
1868 
1869 	for (i = 0; i < 3; i++) {
1870 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
1871 		ana_desc->ana_group_id = ns_arr[i]->nsid;
1872 		ana_desc->num_of_nsid = 1;
1873 		ana_desc->change_count = 0;
1874 		ana_desc->ana_state = ctrlr.listener->ana_state;
1875 		ana_desc->nsid[0] = ns_arr[i]->nsid;
1876 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
1877 		offset += UT_ANA_DESC_SIZE;
1878 	}
1879 
1880 	/* read entire actual log page */
1881 	offset = 0;
1882 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
1883 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
1884 		iov.iov_base = &actual_page[offset];
1885 		iov.iov_len = length;
1886 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
1887 		offset += length;
1888 	}
1889 
1890 	/* compare expected page and actual page */
1891 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1892 
1893 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
1894 	offset = 0;
1895 	iovs[0].iov_base = &actual_page[offset];
1896 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1897 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1898 	iovs[1].iov_base = &actual_page[offset];
1899 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
1900 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
1901 
1902 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1903 }
1904 
1905 static void
1906 test_multi_async_events(void)
1907 {
1908 	struct spdk_nvmf_subsystem subsystem = {};
1909 	struct spdk_nvmf_qpair qpair = {};
1910 	struct spdk_nvmf_ctrlr ctrlr = {};
1911 	struct spdk_nvmf_request req[4] = {};
1912 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1913 	struct spdk_nvmf_ns ns = {};
1914 	union nvmf_h2c_msg cmd[4] = {};
1915 	union nvmf_c2h_msg rsp[4] = {};
1916 	union spdk_nvme_async_event_completion event = {};
1917 	struct spdk_nvmf_poll_group group = {};
1918 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1919 	int i;
1920 
1921 	ns_ptrs[0] = &ns;
1922 	subsystem.ns = ns_ptrs;
1923 	subsystem.max_nsid = 1;
1924 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1925 
1926 	ns.opts.nsid = 1;
1927 	group.sgroups = &sgroups;
1928 
1929 	qpair.ctrlr = &ctrlr;
1930 	qpair.group = &group;
1931 	TAILQ_INIT(&qpair.outstanding);
1932 
1933 	ctrlr.subsys = &subsystem;
1934 	ctrlr.vcprop.cc.bits.en = 1;
1935 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
1936 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
1937 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
1938 	init_pending_async_events(&ctrlr);
1939 
1940 	/* Target queue pending events when there is no outstanding AER request */
1941 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
1942 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
1943 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
1944 
1945 	for (i = 0; i < 4; i++) {
1946 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1947 		cmd[i].nvme_cmd.nsid = 1;
1948 		cmd[i].nvme_cmd.cid = i;
1949 
1950 		req[i].qpair = &qpair;
1951 		req[i].cmd = &cmd[i];
1952 		req[i].rsp = &rsp[i];
1953 
1954 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1955 
1956 		sgroups.mgmt_io_outstanding = 1;
1957 		if (i < 3) {
1958 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1959 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1960 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
1961 		} else {
1962 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1963 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1964 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
1965 		}
1966 	}
1967 
1968 	event.raw = rsp[0].nvme_cpl.cdw0;
1969 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
1970 	event.raw = rsp[1].nvme_cpl.cdw0;
1971 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
1972 	event.raw = rsp[2].nvme_cpl.cdw0;
1973 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
1974 
1975 	cleanup_pending_async_events(&ctrlr);
1976 }
1977 
1978 static void
1979 test_rae(void)
1980 {
1981 	struct spdk_nvmf_subsystem subsystem = {};
1982 	struct spdk_nvmf_qpair qpair = {};
1983 	struct spdk_nvmf_ctrlr ctrlr = {};
1984 	struct spdk_nvmf_request req[3] = {};
1985 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1986 	struct spdk_nvmf_ns ns = {};
1987 	union nvmf_h2c_msg cmd[3] = {};
1988 	union nvmf_c2h_msg rsp[3] = {};
1989 	union spdk_nvme_async_event_completion event = {};
1990 	struct spdk_nvmf_poll_group group = {};
1991 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1992 	int i;
1993 	char data[4096];
1994 
1995 	ns_ptrs[0] = &ns;
1996 	subsystem.ns = ns_ptrs;
1997 	subsystem.max_nsid = 1;
1998 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1999 
2000 	ns.opts.nsid = 1;
2001 	group.sgroups = &sgroups;
2002 
2003 	qpair.ctrlr = &ctrlr;
2004 	qpair.group = &group;
2005 	TAILQ_INIT(&qpair.outstanding);
2006 
2007 	ctrlr.subsys = &subsystem;
2008 	ctrlr.vcprop.cc.bits.en = 1;
2009 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2010 	init_pending_async_events(&ctrlr);
2011 
2012 	/* Target queue pending events when there is no outstanding AER request */
2013 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2014 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2015 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2016 	/* only one event will be queued before RAE is clear */
2017 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2018 
2019 	req[0].qpair = &qpair;
2020 	req[0].cmd = &cmd[0];
2021 	req[0].rsp = &rsp[0];
2022 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2023 	cmd[0].nvme_cmd.nsid = 1;
2024 	cmd[0].nvme_cmd.cid = 0;
2025 
2026 	for (i = 1; i < 3; i++) {
2027 		req[i].qpair = &qpair;
2028 		req[i].cmd = &cmd[i];
2029 		req[i].rsp = &rsp[i];
2030 		req[i].data = &data;
2031 		req[i].length = sizeof(data);
2032 
2033 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2034 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2035 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2036 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2037 			spdk_nvme_bytes_to_numd(req[i].length);
2038 		cmd[i].nvme_cmd.cid = i;
2039 	}
2040 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2041 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2042 
2043 	/* consume the pending event */
2044 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2045 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2046 	event.raw = rsp[0].nvme_cpl.cdw0;
2047 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2048 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2049 
2050 	/* get log with RAE set */
2051 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2052 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2053 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2054 
2055 	/* will not generate new event until RAE is clear */
2056 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2057 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2058 
2059 	/* get log with RAE clear */
2060 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2061 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2062 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2063 
2064 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2065 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2066 
2067 	cleanup_pending_async_events(&ctrlr);
2068 }
2069 
2070 static void
2071 test_nvmf_ctrlr_create_destruct(void)
2072 {
2073 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2074 	struct spdk_nvmf_poll_group group = {};
2075 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2076 	struct spdk_nvmf_transport transport = {};
2077 	struct spdk_nvmf_transport_ops tops = {};
2078 	struct spdk_nvmf_subsystem subsystem = {};
2079 	struct spdk_nvmf_request req = {};
2080 	struct spdk_nvmf_qpair qpair = {};
2081 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2082 	struct spdk_nvmf_tgt tgt = {};
2083 	union nvmf_h2c_msg cmd = {};
2084 	union nvmf_c2h_msg rsp = {};
2085 	const uint8_t hostid[16] = {
2086 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2087 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2088 	};
2089 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2090 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2091 
2092 	group.thread = spdk_get_thread();
2093 	transport.ops = &tops;
2094 	transport.opts.max_aq_depth = 32;
2095 	transport.opts.max_queue_depth = 64;
2096 	transport.opts.max_qpairs_per_ctrlr = 3;
2097 	transport.opts.dif_insert_or_strip = true;
2098 	transport.tgt = &tgt;
2099 	qpair.transport = &transport;
2100 	qpair.group = &group;
2101 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2102 	TAILQ_INIT(&qpair.outstanding);
2103 
2104 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2105 	connect_data.cntlid = 0xFFFF;
2106 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2107 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2108 
2109 	subsystem.thread = spdk_get_thread();
2110 	subsystem.id = 1;
2111 	TAILQ_INIT(&subsystem.ctrlrs);
2112 	subsystem.tgt = &tgt;
2113 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2114 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2115 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2116 
2117 	group.sgroups = sgroups;
2118 
2119 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2120 	cmd.connect_cmd.cid = 1;
2121 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2122 	cmd.connect_cmd.recfmt = 0;
2123 	cmd.connect_cmd.qid = 0;
2124 	cmd.connect_cmd.sqsize = 31;
2125 	cmd.connect_cmd.cattr = 0;
2126 	cmd.connect_cmd.kato = 120000;
2127 
2128 	req.qpair = &qpair;
2129 	req.length = sizeof(connect_data);
2130 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2131 	req.data = &connect_data;
2132 	req.cmd = &cmd;
2133 	req.rsp = &rsp;
2134 
2135 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2136 	sgroups[subsystem.id].mgmt_io_outstanding++;
2137 
2138 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.data);
2139 	poll_threads();
2140 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2141 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2142 	CU_ASSERT(ctrlr->subsys == &subsystem);
2143 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2144 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2145 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2146 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2147 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2148 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2149 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2150 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2151 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2152 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2153 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2154 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2155 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == 1);
2156 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2157 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2158 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2159 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2160 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2161 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2162 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2163 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2164 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2165 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2166 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2167 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2168 
2169 	nvmf_ctrlr_destruct(ctrlr);
2170 	poll_threads();
2171 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2172 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2173 }
2174 
2175 static void
2176 test_nvmf_ctrlr_use_zcopy(void)
2177 {
2178 	struct spdk_nvmf_subsystem subsystem = {};
2179 	struct spdk_nvmf_request req = {};
2180 	struct spdk_nvmf_qpair qpair = {};
2181 	struct spdk_nvmf_ctrlr ctrlr = {};
2182 	union nvmf_h2c_msg cmd = {};
2183 	struct spdk_nvmf_ns ns = {};
2184 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2185 	struct spdk_bdev bdev = {};
2186 	struct spdk_nvmf_poll_group group = {};
2187 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2188 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2189 	struct spdk_io_channel io_ch = {};
2190 	int opc;
2191 
2192 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2193 	ns.bdev = &bdev;
2194 
2195 	subsystem.id = 0;
2196 	subsystem.max_nsid = 1;
2197 	subsys_ns[0] = &ns;
2198 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2199 
2200 	ctrlr.subsys = &subsystem;
2201 
2202 	qpair.ctrlr = &ctrlr;
2203 	qpair.group = &group;
2204 	qpair.qid = 1;
2205 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2206 
2207 	group.thread = spdk_get_thread();
2208 	group.num_sgroups = 1;
2209 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2210 	sgroups.num_ns = 1;
2211 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2212 	ns_info.channel = &io_ch;
2213 	sgroups.ns_info = &ns_info;
2214 	TAILQ_INIT(&sgroups.queued);
2215 	group.sgroups = &sgroups;
2216 	TAILQ_INIT(&qpair.outstanding);
2217 
2218 	req.qpair = &qpair;
2219 	req.cmd = &cmd;
2220 
2221 	/* Admin queue */
2222 	qpair.qid = 0;
2223 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2224 	qpair.qid = 1;
2225 
2226 	/* Invalid Opcodes */
2227 	for (opc = 0; opc <= 255; opc++) {
2228 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2229 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2230 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2231 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2232 		}
2233 	}
2234 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2235 
2236 	/* Fused WRITE */
2237 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2238 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2239 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2240 
2241 	/* Non bdev */
2242 	cmd.nvme_cmd.nsid = 4;
2243 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2244 	cmd.nvme_cmd.nsid = 1;
2245 
2246 	/* ZCOPY Not supported */
2247 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2248 
2249 	/* Success */
2250 	ns.zcopy = true;
2251 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2252 }
2253 
2254 static void
2255 test_spdk_nvmf_request_zcopy_start(void)
2256 {
2257 	struct spdk_nvmf_request req = {};
2258 	struct spdk_nvmf_qpair qpair = {};
2259 	struct spdk_nvme_cmd cmd = {};
2260 	union nvmf_c2h_msg rsp = {};
2261 	struct spdk_nvmf_ctrlr ctrlr = {};
2262 	struct spdk_nvmf_subsystem subsystem = {};
2263 	struct spdk_nvmf_ns ns = {};
2264 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2265 	struct spdk_nvmf_subsystem_listener listener = {};
2266 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2267 
2268 	struct spdk_nvmf_poll_group group = {};
2269 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2270 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2271 	struct spdk_io_channel io_ch = {};
2272 
2273 	ns.bdev = &bdev;
2274 	ns.zcopy = true;
2275 
2276 	subsystem.id = 0;
2277 	subsystem.max_nsid = 1;
2278 	subsys_ns[0] = &ns;
2279 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2280 
2281 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2282 
2283 	/* Enable controller */
2284 	ctrlr.vcprop.cc.bits.en = 1;
2285 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2286 	ctrlr.listener = &listener;
2287 
2288 	group.thread = spdk_get_thread();
2289 	group.num_sgroups = 1;
2290 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2291 	sgroups.num_ns = 1;
2292 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2293 	ns_info.channel = &io_ch;
2294 	sgroups.ns_info = &ns_info;
2295 	TAILQ_INIT(&sgroups.queued);
2296 	group.sgroups = &sgroups;
2297 	TAILQ_INIT(&qpair.outstanding);
2298 
2299 	qpair.ctrlr = &ctrlr;
2300 	qpair.group = &group;
2301 	qpair.qid = 1;
2302 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2303 
2304 	cmd.nsid = 1;
2305 
2306 	req.qpair = &qpair;
2307 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2308 	req.rsp = &rsp;
2309 	cmd.opc = SPDK_NVME_OPC_READ;
2310 
2311 	/* Fail because no controller */
2312 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2313 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2314 	qpair.ctrlr = NULL;
2315 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2316 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2317 	qpair.ctrlr = &ctrlr;
2318 
2319 	/* Fail because no sgroup */
2320 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2321 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2322 	group.sgroups = NULL;
2323 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2324 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2325 	group.sgroups = &sgroups;
2326 
2327 	/* Fail because bad NSID */
2328 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2329 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2330 	cmd.nsid = 0;
2331 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2332 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2333 	cmd.nsid = 1;
2334 
2335 	/* Fail because bad Channel */
2336 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2337 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2338 	ns_info.channel = NULL;
2339 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2340 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2341 	ns_info.channel = &io_ch;
2342 
2343 	/* Fail because NSID is not active */
2344 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2345 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2346 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2347 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2348 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2349 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2350 
2351 	/* Fail because QPair is not active */
2352 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2353 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2354 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2355 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2356 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2357 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2358 
2359 	/* Fail because nvmf_bdev_ctrlr_start_zcopy fails */
2360 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2361 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2362 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2363 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2364 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2365 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0);
2366 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE);
2367 	cmd.cdw10 = 0;
2368 	cmd.cdw12 = 0;
2369 
2370 	/* Success */
2371 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2372 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2373 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0);
2374 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2375 }
2376 
2377 static void
2378 test_zcopy_read(void)
2379 {
2380 	struct spdk_nvmf_request req = {};
2381 	struct spdk_nvmf_qpair qpair = {};
2382 	struct spdk_nvme_cmd cmd = {};
2383 	union nvmf_c2h_msg rsp = {};
2384 	struct spdk_nvmf_ctrlr ctrlr = {};
2385 	struct spdk_nvmf_subsystem subsystem = {};
2386 	struct spdk_nvmf_ns ns = {};
2387 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2388 	struct spdk_nvmf_subsystem_listener listener = {};
2389 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2390 
2391 	struct spdk_nvmf_poll_group group = {};
2392 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2393 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2394 	struct spdk_io_channel io_ch = {};
2395 
2396 	ns.bdev = &bdev;
2397 	ns.zcopy = true;
2398 
2399 	subsystem.id = 0;
2400 	subsystem.max_nsid = 1;
2401 	subsys_ns[0] = &ns;
2402 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2403 
2404 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2405 
2406 	/* Enable controller */
2407 	ctrlr.vcprop.cc.bits.en = 1;
2408 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2409 	ctrlr.listener = &listener;
2410 
2411 	group.thread = spdk_get_thread();
2412 	group.num_sgroups = 1;
2413 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2414 	sgroups.num_ns = 1;
2415 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2416 	ns_info.channel = &io_ch;
2417 	sgroups.ns_info = &ns_info;
2418 	TAILQ_INIT(&sgroups.queued);
2419 	group.sgroups = &sgroups;
2420 	TAILQ_INIT(&qpair.outstanding);
2421 
2422 	qpair.ctrlr = &ctrlr;
2423 	qpair.group = &group;
2424 	qpair.qid = 1;
2425 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2426 
2427 	cmd.nsid = 1;
2428 
2429 	req.qpair = &qpair;
2430 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2431 	req.rsp = &rsp;
2432 	cmd.opc = SPDK_NVME_OPC_READ;
2433 
2434 	/* Prepare for zcopy */
2435 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2436 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2437 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2438 	CU_ASSERT(ns_info.io_outstanding == 0);
2439 
2440 	/* Perform the zcopy start */
2441 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0);
2442 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2443 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2444 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2445 	CU_ASSERT(ns_info.io_outstanding == 1);
2446 
2447 	/* Execute the request */
2448 	spdk_nvmf_request_exec(&req);
2449 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2450 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2451 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2452 	CU_ASSERT(ns_info.io_outstanding == 1);
2453 
2454 	/* Perform the zcopy end */
2455 	spdk_nvmf_request_zcopy_end(&req, false);
2456 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2457 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2458 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2459 	CU_ASSERT(ns_info.io_outstanding == 0);
2460 }
2461 
2462 static void
2463 test_zcopy_write(void)
2464 {
2465 	struct spdk_nvmf_request req = {};
2466 	struct spdk_nvmf_qpair qpair = {};
2467 	struct spdk_nvme_cmd cmd = {};
2468 	union nvmf_c2h_msg rsp = {};
2469 	struct spdk_nvmf_ctrlr ctrlr = {};
2470 	struct spdk_nvmf_subsystem subsystem = {};
2471 	struct spdk_nvmf_ns ns = {};
2472 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2473 	struct spdk_nvmf_subsystem_listener listener = {};
2474 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2475 
2476 	struct spdk_nvmf_poll_group group = {};
2477 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2478 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2479 	struct spdk_io_channel io_ch = {};
2480 
2481 	ns.bdev = &bdev;
2482 	ns.zcopy = true;
2483 
2484 	subsystem.id = 0;
2485 	subsystem.max_nsid = 1;
2486 	subsys_ns[0] = &ns;
2487 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2488 
2489 	listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2490 
2491 	/* Enable controller */
2492 	ctrlr.vcprop.cc.bits.en = 1;
2493 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2494 	ctrlr.listener = &listener;
2495 
2496 	group.thread = spdk_get_thread();
2497 	group.num_sgroups = 1;
2498 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2499 	sgroups.num_ns = 1;
2500 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2501 	ns_info.channel = &io_ch;
2502 	sgroups.ns_info = &ns_info;
2503 	TAILQ_INIT(&sgroups.queued);
2504 	group.sgroups = &sgroups;
2505 	TAILQ_INIT(&qpair.outstanding);
2506 
2507 	qpair.ctrlr = &ctrlr;
2508 	qpair.group = &group;
2509 	qpair.qid = 1;
2510 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2511 
2512 	cmd.nsid = 1;
2513 
2514 	req.qpair = &qpair;
2515 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2516 	req.rsp = &rsp;
2517 	cmd.opc = SPDK_NVME_OPC_WRITE;
2518 
2519 	/* Prepare for zcopy */
2520 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2521 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2522 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2523 	CU_ASSERT(ns_info.io_outstanding == 0);
2524 
2525 	/* Perform the zcopy start */
2526 	CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0);
2527 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2528 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2529 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2530 	CU_ASSERT(ns_info.io_outstanding == 1);
2531 
2532 	/* Execute the request */
2533 	spdk_nvmf_request_exec(&req);
2534 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2535 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2536 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2537 	CU_ASSERT(ns_info.io_outstanding == 1);
2538 
2539 	/* Perform the zcopy end */
2540 	spdk_nvmf_request_zcopy_end(&req, true);
2541 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2542 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2543 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2544 	CU_ASSERT(ns_info.io_outstanding == 0);
2545 }
2546 
2547 static void
2548 test_nvmf_property_set(void)
2549 {
2550 	int rc;
2551 	struct spdk_nvmf_request req = {};
2552 	struct spdk_nvmf_qpair qpair = {};
2553 	struct spdk_nvmf_ctrlr ctrlr = {};
2554 	union nvmf_h2c_msg cmd = {};
2555 	union nvmf_c2h_msg rsp = {};
2556 
2557 	req.qpair = &qpair;
2558 	qpair.ctrlr = &ctrlr;
2559 	req.cmd = &cmd;
2560 	req.rsp = &rsp;
2561 
2562 	/* Invalid parameters */
2563 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2564 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2565 
2566 	rc = nvmf_property_set(&req);
2567 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2568 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2569 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2570 
2571 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2572 
2573 	rc = nvmf_property_get(&req);
2574 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2575 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2576 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2577 
2578 	/* Set cc with same property size */
2579 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2580 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2581 
2582 	rc = nvmf_property_set(&req);
2583 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2584 
2585 	/* Emulate cc data */
2586 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2587 
2588 	rc = nvmf_property_get(&req);
2589 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2590 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2591 
2592 	/* Set asq with different property size */
2593 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2594 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2595 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2596 
2597 	rc = nvmf_property_set(&req);
2598 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2599 
2600 	/* Emulate asq data */
2601 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2602 
2603 	rc = nvmf_property_get(&req);
2604 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2605 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2606 }
2607 
2608 int main(int argc, char **argv)
2609 {
2610 	CU_pSuite	suite = NULL;
2611 	unsigned int	num_failures;
2612 
2613 	CU_set_error_action(CUEA_ABORT);
2614 	CU_initialize_registry();
2615 
2616 	suite = CU_add_suite("nvmf", NULL, NULL);
2617 	CU_ADD_TEST(suite, test_get_log_page);
2618 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
2619 	CU_ADD_TEST(suite, test_connect);
2620 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
2621 	CU_ADD_TEST(suite, test_identify_ns);
2622 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
2623 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
2624 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
2625 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
2626 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
2627 	CU_ADD_TEST(suite, test_get_dif_ctx);
2628 	CU_ADD_TEST(suite, test_set_get_features);
2629 	CU_ADD_TEST(suite, test_identify_ctrlr);
2630 	CU_ADD_TEST(suite, test_custom_admin_cmd);
2631 	CU_ADD_TEST(suite, test_fused_compare_and_write);
2632 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
2633 	CU_ADD_TEST(suite, test_get_ana_log_page);
2634 	CU_ADD_TEST(suite, test_multi_async_events);
2635 	CU_ADD_TEST(suite, test_rae);
2636 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
2637 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
2638 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
2639 	CU_ADD_TEST(suite, test_zcopy_read);
2640 	CU_ADD_TEST(suite, test_zcopy_write);
2641 	CU_ADD_TEST(suite, test_nvmf_property_set);
2642 
2643 	allocate_threads(1);
2644 	set_thread(0);
2645 
2646 	CU_basic_set_mode(CU_BRM_VERBOSE);
2647 	CU_basic_run_tests();
2648 	num_failures = CU_get_number_of_failures();
2649 	CU_cleanup_registry();
2650 
2651 	free_threads();
2652 
2653 	return num_failures;
2654 }
2655