xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision 7506a7aa53d239f533af3bc768f0d2af55e735fe)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "spdk_cunit.h"
38 #include "spdk_internal/mock.h"
39 #include "thread/thread_internal.h"
40 
41 #include "common/lib/ut_multithread.c"
42 #include "nvmf/ctrlr.c"
43 
44 SPDK_LOG_REGISTER_COMPONENT(nvmf)
45 
46 struct spdk_bdev {
47 	int ut_mock;
48 	uint64_t blockcnt;
49 	uint32_t blocklen;
50 };
51 
52 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
53 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
54 
55 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
56 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
57 		0x8877665544332211UL;
58 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
59 
60 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
61 	    struct spdk_nvmf_subsystem *,
62 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
63 	    NULL);
64 
65 DEFINE_STUB(spdk_nvmf_poll_group_create,
66 	    struct spdk_nvmf_poll_group *,
67 	    (struct spdk_nvmf_tgt *tgt),
68 	    NULL);
69 
70 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
71 	    const char *,
72 	    (const struct spdk_nvmf_subsystem *subsystem),
73 	    subsystem_default_sn);
74 
75 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
76 	    const char *,
77 	    (const struct spdk_nvmf_subsystem *subsystem),
78 	    subsystem_default_mn);
79 
80 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
81 	    bool,
82 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
83 	    true);
84 
85 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
86 	    int,
87 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
88 	    0);
89 
90 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
91 	    struct spdk_nvmf_ctrlr *,
92 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
93 	    NULL);
94 
95 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
96 	    bool,
97 	    (struct spdk_nvmf_ctrlr *ctrlr),
98 	    false);
99 
100 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
101 	    bool,
102 	    (struct spdk_nvmf_ctrlr *ctrlr),
103 	    false);
104 
105 DEFINE_STUB_V(nvmf_get_discovery_log_page,
106 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
107 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
108 
109 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
110 	    int,
111 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
112 	    0);
113 
114 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
115 	    bool,
116 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
117 	    true);
118 
119 DEFINE_STUB(nvmf_subsystem_find_listener,
120 	    struct spdk_nvmf_subsystem_listener *,
121 	    (struct spdk_nvmf_subsystem *subsystem,
122 	     const struct spdk_nvme_transport_id *trid),
123 	    (void *)0x1);
124 
125 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
126 	    int,
127 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
128 	     struct spdk_nvmf_request *req),
129 	    0);
130 
131 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
132 	    int,
133 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
134 	     struct spdk_nvmf_request *req),
135 	    0);
136 
137 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
138 	    int,
139 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
140 	     struct spdk_nvmf_request *req),
141 	    0);
142 
143 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
144 	    int,
145 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
146 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
147 	    0);
148 
149 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
150 	    int,
151 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
152 	     struct spdk_nvmf_request *req),
153 	    0);
154 
155 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
156 	    int,
157 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
158 	     struct spdk_nvmf_request *req),
159 	    0);
160 
161 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
162 	    int,
163 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
164 	     struct spdk_nvmf_request *req),
165 	    0);
166 
167 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
168 	    int,
169 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
170 	     struct spdk_nvmf_request *req),
171 	    0);
172 
173 DEFINE_STUB(nvmf_transport_req_complete,
174 	    int,
175 	    (struct spdk_nvmf_request *req),
176 	    0);
177 
178 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
179 
180 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
181 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
182 	     struct spdk_dif_ctx *dif_ctx),
183 	    true);
184 
185 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
186 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
187 
188 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
189 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
190 
191 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
192 		struct spdk_nvmf_ctrlr *ctrlr));
193 
194 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
195 	    int,
196 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
197 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
198 	    0);
199 
200 DEFINE_STUB(nvmf_transport_req_free,
201 	    int,
202 	    (struct spdk_nvmf_request *req),
203 	    0);
204 
205 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
206 	    int,
207 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
208 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
209 	    0);
210 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
211 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
212 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
213 
214 int
215 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
216 {
217 	return 0;
218 }
219 
220 void
221 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
222 			    bool dif_insert_or_strip)
223 {
224 	uint64_t num_blocks;
225 
226 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
227 	num_blocks = ns->bdev->blockcnt;
228 	nsdata->nsze = num_blocks;
229 	nsdata->ncap = num_blocks;
230 	nsdata->nuse = num_blocks;
231 	nsdata->nlbaf = 0;
232 	nsdata->flbas.format = 0;
233 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
234 }
235 
236 struct spdk_nvmf_ns *
237 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
238 {
239 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
240 	return subsystem->ns[0];
241 }
242 
243 struct spdk_nvmf_ns *
244 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
245 				struct spdk_nvmf_ns *prev_ns)
246 {
247 	uint32_t nsid;
248 
249 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
250 	nsid = prev_ns->nsid;
251 
252 	if (nsid >= subsystem->max_nsid) {
253 		return NULL;
254 	}
255 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
256 		if (subsystem->ns[nsid - 1]) {
257 			return subsystem->ns[nsid - 1];
258 		}
259 	}
260 	return NULL;
261 }
262 
263 bool
264 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
265 {
266 	return true;
267 }
268 
269 int
270 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
271 			    struct spdk_bdev_desc *desc,
272 			    struct spdk_io_channel *ch,
273 			    struct spdk_nvmf_request *req)
274 {
275 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
276 	uint64_t start_lba;
277 	uint64_t num_blocks;
278 
279 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
280 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
281 
282 	if ((start_lba + num_blocks) > bdev->blockcnt) {
283 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
284 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
285 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
286 	}
287 
288 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
289 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
290 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
291 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
292 	} else {
293 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
294 	}
295 
296 
297 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
298 }
299 
300 void
301 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
302 {
303 	req->zcopy_bdev_io = NULL;
304 	spdk_nvmf_request_complete(req);
305 }
306 
307 static void
308 test_get_log_page(void)
309 {
310 	struct spdk_nvmf_subsystem subsystem = {};
311 	struct spdk_nvmf_request req = {};
312 	struct spdk_nvmf_qpair qpair = {};
313 	struct spdk_nvmf_ctrlr ctrlr = {};
314 	union nvmf_h2c_msg cmd = {};
315 	union nvmf_c2h_msg rsp = {};
316 	char data[4096];
317 
318 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
319 
320 	ctrlr.subsys = &subsystem;
321 
322 	qpair.ctrlr = &ctrlr;
323 
324 	req.qpair = &qpair;
325 	req.cmd = &cmd;
326 	req.rsp = &rsp;
327 	req.data = &data;
328 	req.length = sizeof(data);
329 
330 	/* Get Log Page - all valid */
331 	memset(&cmd, 0, sizeof(cmd));
332 	memset(&rsp, 0, sizeof(rsp));
333 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
334 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
335 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
336 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
337 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
338 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
339 
340 	/* Get Log Page with invalid log ID */
341 	memset(&cmd, 0, sizeof(cmd));
342 	memset(&rsp, 0, sizeof(rsp));
343 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
344 	cmd.nvme_cmd.cdw10 = 0;
345 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
346 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
347 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
348 
349 	/* Get Log Page with invalid offset (not dword aligned) */
350 	memset(&cmd, 0, sizeof(cmd));
351 	memset(&rsp, 0, sizeof(rsp));
352 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
353 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
354 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
355 	cmd.nvme_cmd.cdw12 = 2;
356 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
357 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
358 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
359 
360 	/* Get Log Page without data buffer */
361 	memset(&cmd, 0, sizeof(cmd));
362 	memset(&rsp, 0, sizeof(rsp));
363 	req.data = NULL;
364 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
365 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
366 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
367 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
368 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
369 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
370 	req.data = data;
371 }
372 
373 static void
374 test_process_fabrics_cmd(void)
375 {
376 	struct	spdk_nvmf_request req = {};
377 	int	ret;
378 	struct	spdk_nvmf_qpair req_qpair = {};
379 	union	nvmf_h2c_msg  req_cmd = {};
380 	union	nvmf_c2h_msg   req_rsp = {};
381 
382 	req.qpair = &req_qpair;
383 	req.cmd  = &req_cmd;
384 	req.rsp  = &req_rsp;
385 	req.qpair->ctrlr = NULL;
386 
387 	/* No ctrlr and invalid command check */
388 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
389 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
390 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
391 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
392 }
393 
394 static bool
395 nvme_status_success(const struct spdk_nvme_status *status)
396 {
397 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
398 }
399 
400 static void
401 test_connect(void)
402 {
403 	struct spdk_nvmf_fabric_connect_data connect_data;
404 	struct spdk_nvmf_poll_group group;
405 	struct spdk_nvmf_subsystem_poll_group *sgroups;
406 	struct spdk_nvmf_transport transport;
407 	struct spdk_nvmf_transport_ops tops = {};
408 	struct spdk_nvmf_subsystem subsystem;
409 	struct spdk_nvmf_request req;
410 	struct spdk_nvmf_qpair admin_qpair;
411 	struct spdk_nvmf_qpair qpair;
412 	struct spdk_nvmf_qpair qpair2;
413 	struct spdk_nvmf_ctrlr ctrlr;
414 	struct spdk_nvmf_tgt tgt;
415 	union nvmf_h2c_msg cmd;
416 	union nvmf_c2h_msg rsp;
417 	const uint8_t hostid[16] = {
418 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
419 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
420 	};
421 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
422 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
423 	int rc;
424 
425 	memset(&group, 0, sizeof(group));
426 	group.thread = spdk_get_thread();
427 
428 	memset(&ctrlr, 0, sizeof(ctrlr));
429 	ctrlr.subsys = &subsystem;
430 	ctrlr.qpair_mask = spdk_bit_array_create(3);
431 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
432 	ctrlr.vcprop.cc.bits.en = 1;
433 	ctrlr.vcprop.cc.bits.iosqes = 6;
434 	ctrlr.vcprop.cc.bits.iocqes = 4;
435 
436 	memset(&admin_qpair, 0, sizeof(admin_qpair));
437 	admin_qpair.group = &group;
438 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
439 
440 	memset(&tgt, 0, sizeof(tgt));
441 	memset(&transport, 0, sizeof(transport));
442 	transport.ops = &tops;
443 	transport.opts.max_aq_depth = 32;
444 	transport.opts.max_queue_depth = 64;
445 	transport.opts.max_qpairs_per_ctrlr = 3;
446 	transport.tgt = &tgt;
447 
448 	memset(&qpair, 0, sizeof(qpair));
449 	qpair.transport = &transport;
450 	qpair.group = &group;
451 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
452 	TAILQ_INIT(&qpair.outstanding);
453 
454 	memset(&connect_data, 0, sizeof(connect_data));
455 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
456 	connect_data.cntlid = 0xFFFF;
457 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
458 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
459 
460 	memset(&subsystem, 0, sizeof(subsystem));
461 	subsystem.thread = spdk_get_thread();
462 	subsystem.id = 1;
463 	TAILQ_INIT(&subsystem.ctrlrs);
464 	subsystem.tgt = &tgt;
465 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
466 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
467 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
468 
469 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
470 	group.sgroups = sgroups;
471 
472 	memset(&cmd, 0, sizeof(cmd));
473 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
474 	cmd.connect_cmd.cid = 1;
475 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
476 	cmd.connect_cmd.recfmt = 0;
477 	cmd.connect_cmd.qid = 0;
478 	cmd.connect_cmd.sqsize = 31;
479 	cmd.connect_cmd.cattr = 0;
480 	cmd.connect_cmd.kato = 120000;
481 
482 	memset(&req, 0, sizeof(req));
483 	req.qpair = &qpair;
484 	req.length = sizeof(connect_data);
485 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
486 	req.data = &connect_data;
487 	req.cmd = &cmd;
488 	req.rsp = &rsp;
489 
490 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
491 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
492 
493 	/* Valid admin connect command */
494 	memset(&rsp, 0, sizeof(rsp));
495 	sgroups[subsystem.id].mgmt_io_outstanding++;
496 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
497 	rc = nvmf_ctrlr_cmd_connect(&req);
498 	poll_threads();
499 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
500 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
501 	CU_ASSERT(qpair.ctrlr != NULL);
502 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
503 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
504 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
505 	free(qpair.ctrlr);
506 	qpair.ctrlr = NULL;
507 
508 	/* Valid admin connect command with kato = 0 */
509 	cmd.connect_cmd.kato = 0;
510 	memset(&rsp, 0, sizeof(rsp));
511 	sgroups[subsystem.id].mgmt_io_outstanding++;
512 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
513 	rc = nvmf_ctrlr_cmd_connect(&req);
514 	poll_threads();
515 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
516 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
517 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
518 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
519 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
520 	free(qpair.ctrlr);
521 	qpair.ctrlr = NULL;
522 	cmd.connect_cmd.kato = 120000;
523 
524 	/* Invalid data length */
525 	memset(&rsp, 0, sizeof(rsp));
526 	req.length = sizeof(connect_data) - 1;
527 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
528 	rc = nvmf_ctrlr_cmd_connect(&req);
529 	poll_threads();
530 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
531 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
532 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
533 	CU_ASSERT(qpair.ctrlr == NULL);
534 	req.length = sizeof(connect_data);
535 
536 	/* Invalid recfmt */
537 	memset(&rsp, 0, sizeof(rsp));
538 	cmd.connect_cmd.recfmt = 1234;
539 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
540 	rc = nvmf_ctrlr_cmd_connect(&req);
541 	poll_threads();
542 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
543 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
544 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
545 	CU_ASSERT(qpair.ctrlr == NULL);
546 	cmd.connect_cmd.recfmt = 0;
547 
548 	/* Subsystem not found */
549 	memset(&rsp, 0, sizeof(rsp));
550 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
551 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
552 	rc = nvmf_ctrlr_cmd_connect(&req);
553 	poll_threads();
554 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
555 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
556 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
557 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
558 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
559 	CU_ASSERT(qpair.ctrlr == NULL);
560 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
561 
562 	/* Unterminated hostnqn */
563 	memset(&rsp, 0, sizeof(rsp));
564 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
565 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
566 	rc = nvmf_ctrlr_cmd_connect(&req);
567 	poll_threads();
568 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
569 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
570 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
571 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
572 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
573 	CU_ASSERT(qpair.ctrlr == NULL);
574 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
575 
576 	/* Host not allowed */
577 	memset(&rsp, 0, sizeof(rsp));
578 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
579 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
580 	rc = nvmf_ctrlr_cmd_connect(&req);
581 	poll_threads();
582 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
583 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
584 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
585 	CU_ASSERT(qpair.ctrlr == NULL);
586 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
587 
588 	/* Invalid sqsize == 0 */
589 	memset(&rsp, 0, sizeof(rsp));
590 	cmd.connect_cmd.sqsize = 0;
591 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
592 	rc = nvmf_ctrlr_cmd_connect(&req);
593 	poll_threads();
594 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
595 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
596 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
597 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
598 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
599 	CU_ASSERT(qpair.ctrlr == NULL);
600 	cmd.connect_cmd.sqsize = 31;
601 
602 	/* Invalid admin sqsize > max_aq_depth */
603 	memset(&rsp, 0, sizeof(rsp));
604 	cmd.connect_cmd.sqsize = 32;
605 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
606 	rc = nvmf_ctrlr_cmd_connect(&req);
607 	poll_threads();
608 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
609 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
610 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
611 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
612 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
613 	CU_ASSERT(qpair.ctrlr == NULL);
614 	cmd.connect_cmd.sqsize = 31;
615 
616 	/* Invalid I/O sqsize > max_queue_depth */
617 	memset(&rsp, 0, sizeof(rsp));
618 	cmd.connect_cmd.qid = 1;
619 	cmd.connect_cmd.sqsize = 64;
620 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
621 	rc = nvmf_ctrlr_cmd_connect(&req);
622 	poll_threads();
623 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
624 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
625 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
626 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
627 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
628 	CU_ASSERT(qpair.ctrlr == NULL);
629 	cmd.connect_cmd.qid = 0;
630 	cmd.connect_cmd.sqsize = 31;
631 
632 	/* Invalid cntlid for admin queue */
633 	memset(&rsp, 0, sizeof(rsp));
634 	connect_data.cntlid = 0x1234;
635 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
636 	rc = nvmf_ctrlr_cmd_connect(&req);
637 	poll_threads();
638 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
639 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
640 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
641 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
642 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
643 	CU_ASSERT(qpair.ctrlr == NULL);
644 	connect_data.cntlid = 0xFFFF;
645 
646 	ctrlr.admin_qpair = &admin_qpair;
647 	ctrlr.subsys = &subsystem;
648 
649 	/* Valid I/O queue connect command */
650 	memset(&rsp, 0, sizeof(rsp));
651 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
652 	cmd.connect_cmd.qid = 1;
653 	cmd.connect_cmd.sqsize = 63;
654 	sgroups[subsystem.id].mgmt_io_outstanding++;
655 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
656 	rc = nvmf_ctrlr_cmd_connect(&req);
657 	poll_threads();
658 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
659 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
660 	CU_ASSERT(qpair.ctrlr == &ctrlr);
661 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
662 	qpair.ctrlr = NULL;
663 	cmd.connect_cmd.sqsize = 31;
664 
665 	/* Non-existent controller */
666 	memset(&rsp, 0, sizeof(rsp));
667 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
668 	sgroups[subsystem.id].mgmt_io_outstanding++;
669 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
670 	rc = nvmf_ctrlr_cmd_connect(&req);
671 	poll_threads();
672 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
673 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
674 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
675 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
676 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
677 	CU_ASSERT(qpair.ctrlr == NULL);
678 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
679 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
680 
681 	/* I/O connect to discovery controller */
682 	memset(&rsp, 0, sizeof(rsp));
683 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
684 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
685 	sgroups[subsystem.id].mgmt_io_outstanding++;
686 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
687 	rc = nvmf_ctrlr_cmd_connect(&req);
688 	poll_threads();
689 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
690 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
691 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
692 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
693 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
694 	CU_ASSERT(qpair.ctrlr == NULL);
695 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
696 
697 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
698 	cmd.connect_cmd.qid = 0;
699 	cmd.connect_cmd.kato = 120000;
700 	memset(&rsp, 0, sizeof(rsp));
701 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
702 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
703 	sgroups[subsystem.id].mgmt_io_outstanding++;
704 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
705 	rc = nvmf_ctrlr_cmd_connect(&req);
706 	poll_threads();
707 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
708 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
709 	CU_ASSERT(qpair.ctrlr != NULL);
710 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
711 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
712 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
713 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
714 	free(qpair.ctrlr);
715 	qpair.ctrlr = NULL;
716 
717 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
718 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
719 	 */
720 	cmd.connect_cmd.kato = 0;
721 	memset(&rsp, 0, sizeof(rsp));
722 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
723 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
724 	sgroups[subsystem.id].mgmt_io_outstanding++;
725 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
726 	rc = nvmf_ctrlr_cmd_connect(&req);
727 	poll_threads();
728 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
729 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
730 	CU_ASSERT(qpair.ctrlr != NULL);
731 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
732 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
733 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
734 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
735 	free(qpair.ctrlr);
736 	qpair.ctrlr = NULL;
737 	cmd.connect_cmd.qid = 1;
738 	cmd.connect_cmd.kato = 120000;
739 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
740 
741 	/* I/O connect to disabled controller */
742 	memset(&rsp, 0, sizeof(rsp));
743 	ctrlr.vcprop.cc.bits.en = 0;
744 	sgroups[subsystem.id].mgmt_io_outstanding++;
745 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
746 	rc = nvmf_ctrlr_cmd_connect(&req);
747 	poll_threads();
748 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
749 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
750 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
751 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
752 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
753 	CU_ASSERT(qpair.ctrlr == NULL);
754 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
755 	ctrlr.vcprop.cc.bits.en = 1;
756 
757 	/* I/O connect with invalid IOSQES */
758 	memset(&rsp, 0, sizeof(rsp));
759 	ctrlr.vcprop.cc.bits.iosqes = 3;
760 	sgroups[subsystem.id].mgmt_io_outstanding++;
761 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
762 	rc = nvmf_ctrlr_cmd_connect(&req);
763 	poll_threads();
764 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
765 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
766 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
767 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
768 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
769 	CU_ASSERT(qpair.ctrlr == NULL);
770 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
771 	ctrlr.vcprop.cc.bits.iosqes = 6;
772 
773 	/* I/O connect with invalid IOCQES */
774 	memset(&rsp, 0, sizeof(rsp));
775 	ctrlr.vcprop.cc.bits.iocqes = 3;
776 	sgroups[subsystem.id].mgmt_io_outstanding++;
777 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
778 	rc = nvmf_ctrlr_cmd_connect(&req);
779 	poll_threads();
780 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
781 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
782 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
783 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
784 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
785 	CU_ASSERT(qpair.ctrlr == NULL);
786 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
787 	ctrlr.vcprop.cc.bits.iocqes = 4;
788 
789 	/* I/O connect with too many existing qpairs */
790 	memset(&rsp, 0, sizeof(rsp));
791 	spdk_bit_array_set(ctrlr.qpair_mask, 0);
792 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
793 	spdk_bit_array_set(ctrlr.qpair_mask, 2);
794 	sgroups[subsystem.id].mgmt_io_outstanding++;
795 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
796 	rc = nvmf_ctrlr_cmd_connect(&req);
797 	poll_threads();
798 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
799 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
800 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
801 	CU_ASSERT(qpair.ctrlr == NULL);
802 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
803 	spdk_bit_array_clear(ctrlr.qpair_mask, 0);
804 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
805 	spdk_bit_array_clear(ctrlr.qpair_mask, 2);
806 
807 	/* I/O connect with duplicate queue ID */
808 	memset(&rsp, 0, sizeof(rsp));
809 	memset(&qpair2, 0, sizeof(qpair2));
810 	qpair2.group = &group;
811 	qpair2.qid = 1;
812 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
813 	cmd.connect_cmd.qid = 1;
814 	sgroups[subsystem.id].mgmt_io_outstanding++;
815 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
816 	rc = nvmf_ctrlr_cmd_connect(&req);
817 	poll_threads();
818 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
819 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
820 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
821 	CU_ASSERT(qpair.ctrlr == NULL);
822 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
823 
824 	/* I/O connect when admin qpair is being destroyed */
825 	admin_qpair.group = NULL;
826 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
827 	memset(&rsp, 0, sizeof(rsp));
828 	sgroups[subsystem.id].mgmt_io_outstanding++;
829 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
830 	rc = nvmf_ctrlr_cmd_connect(&req);
831 	poll_threads();
832 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
833 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
834 	CU_ASSERT(qpair.ctrlr == NULL);
835 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
836 	admin_qpair.group = &group;
837 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
838 
839 	/* Clean up globals */
840 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
841 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
842 
843 	spdk_bit_array_free(&ctrlr.qpair_mask);
844 	free(sgroups);
845 }
846 
847 static void
848 test_get_ns_id_desc_list(void)
849 {
850 	struct spdk_nvmf_subsystem subsystem;
851 	struct spdk_nvmf_qpair qpair;
852 	struct spdk_nvmf_ctrlr ctrlr;
853 	struct spdk_nvmf_request req;
854 	struct spdk_nvmf_ns *ns_ptrs[1];
855 	struct spdk_nvmf_ns ns;
856 	union nvmf_h2c_msg cmd;
857 	union nvmf_c2h_msg rsp;
858 	struct spdk_bdev bdev;
859 	uint8_t buf[4096];
860 
861 	memset(&subsystem, 0, sizeof(subsystem));
862 	ns_ptrs[0] = &ns;
863 	subsystem.ns = ns_ptrs;
864 	subsystem.max_nsid = 1;
865 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
866 
867 	memset(&ns, 0, sizeof(ns));
868 	ns.opts.nsid = 1;
869 	ns.bdev = &bdev;
870 
871 	memset(&qpair, 0, sizeof(qpair));
872 	qpair.ctrlr = &ctrlr;
873 
874 	memset(&ctrlr, 0, sizeof(ctrlr));
875 	ctrlr.subsys = &subsystem;
876 	ctrlr.vcprop.cc.bits.en = 1;
877 	ctrlr.thread = spdk_get_thread();
878 
879 	memset(&req, 0, sizeof(req));
880 	req.qpair = &qpair;
881 	req.cmd = &cmd;
882 	req.rsp = &rsp;
883 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
884 	req.data = buf;
885 	req.length = sizeof(buf);
886 	req.iovcnt = 1;
887 	req.iov[0].iov_base = req.data;
888 	req.iov[0].iov_len = req.length;
889 
890 	memset(&cmd, 0, sizeof(cmd));
891 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
892 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
893 
894 	/* Invalid NSID */
895 	cmd.nvme_cmd.nsid = 0;
896 	memset(&rsp, 0, sizeof(rsp));
897 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
898 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
899 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
900 
901 	/* Valid NSID, but ns has no IDs defined */
902 	cmd.nvme_cmd.nsid = 1;
903 	memset(&rsp, 0, sizeof(rsp));
904 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
905 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
906 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
907 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
908 
909 	/* Valid NSID, only EUI64 defined */
910 	ns.opts.eui64[0] = 0x11;
911 	ns.opts.eui64[7] = 0xFF;
912 	memset(&rsp, 0, sizeof(rsp));
913 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
914 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
915 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
916 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
917 	CU_ASSERT(buf[1] == 8);
918 	CU_ASSERT(buf[4] == 0x11);
919 	CU_ASSERT(buf[11] == 0xFF);
920 	CU_ASSERT(buf[13] == 0);
921 
922 	/* Valid NSID, only NGUID defined */
923 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
924 	ns.opts.nguid[0] = 0x22;
925 	ns.opts.nguid[15] = 0xEE;
926 	memset(&rsp, 0, sizeof(rsp));
927 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
928 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
929 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
930 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
931 	CU_ASSERT(buf[1] == 16);
932 	CU_ASSERT(buf[4] == 0x22);
933 	CU_ASSERT(buf[19] == 0xEE);
934 	CU_ASSERT(buf[21] == 0);
935 
936 	/* Valid NSID, both EUI64 and NGUID defined */
937 	ns.opts.eui64[0] = 0x11;
938 	ns.opts.eui64[7] = 0xFF;
939 	ns.opts.nguid[0] = 0x22;
940 	ns.opts.nguid[15] = 0xEE;
941 	memset(&rsp, 0, sizeof(rsp));
942 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
943 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
944 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
945 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
946 	CU_ASSERT(buf[1] == 8);
947 	CU_ASSERT(buf[4] == 0x11);
948 	CU_ASSERT(buf[11] == 0xFF);
949 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
950 	CU_ASSERT(buf[13] == 16);
951 	CU_ASSERT(buf[16] == 0x22);
952 	CU_ASSERT(buf[31] == 0xEE);
953 	CU_ASSERT(buf[33] == 0);
954 
955 	/* Valid NSID, EUI64, NGUID, and UUID defined */
956 	ns.opts.eui64[0] = 0x11;
957 	ns.opts.eui64[7] = 0xFF;
958 	ns.opts.nguid[0] = 0x22;
959 	ns.opts.nguid[15] = 0xEE;
960 	ns.opts.uuid.u.raw[0] = 0x33;
961 	ns.opts.uuid.u.raw[15] = 0xDD;
962 	memset(&rsp, 0, sizeof(rsp));
963 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
964 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
965 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
966 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
967 	CU_ASSERT(buf[1] == 8);
968 	CU_ASSERT(buf[4] == 0x11);
969 	CU_ASSERT(buf[11] == 0xFF);
970 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
971 	CU_ASSERT(buf[13] == 16);
972 	CU_ASSERT(buf[16] == 0x22);
973 	CU_ASSERT(buf[31] == 0xEE);
974 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
975 	CU_ASSERT(buf[33] == 16);
976 	CU_ASSERT(buf[36] == 0x33);
977 	CU_ASSERT(buf[51] == 0xDD);
978 	CU_ASSERT(buf[53] == 0);
979 }
980 
981 static void
982 test_identify_ns(void)
983 {
984 	struct spdk_nvmf_subsystem subsystem = {};
985 	struct spdk_nvmf_transport transport = {};
986 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
987 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
988 	struct spdk_nvme_cmd cmd = {};
989 	struct spdk_nvme_cpl rsp = {};
990 	struct spdk_nvme_ns_data nsdata = {};
991 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
992 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
993 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
994 
995 	subsystem.ns = ns_arr;
996 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
997 
998 	/* Invalid NSID 0 */
999 	cmd.nsid = 0;
1000 	memset(&nsdata, 0, sizeof(nsdata));
1001 	memset(&rsp, 0, sizeof(rsp));
1002 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1003 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1004 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1005 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1006 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1007 
1008 	/* Valid NSID 1 */
1009 	cmd.nsid = 1;
1010 	memset(&nsdata, 0, sizeof(nsdata));
1011 	memset(&rsp, 0, sizeof(rsp));
1012 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1013 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1014 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1015 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1016 	CU_ASSERT(nsdata.nsze == 1234);
1017 
1018 	/* Valid but inactive NSID 2 */
1019 	cmd.nsid = 2;
1020 	memset(&nsdata, 0, sizeof(nsdata));
1021 	memset(&rsp, 0, sizeof(rsp));
1022 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1023 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1024 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1025 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1026 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1027 
1028 	/* Valid NSID 3 */
1029 	cmd.nsid = 3;
1030 	memset(&nsdata, 0, sizeof(nsdata));
1031 	memset(&rsp, 0, sizeof(rsp));
1032 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1033 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1034 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1035 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1036 	CU_ASSERT(nsdata.nsze == 5678);
1037 
1038 	/* Invalid NSID 4 */
1039 	cmd.nsid = 4;
1040 	memset(&nsdata, 0, sizeof(nsdata));
1041 	memset(&rsp, 0, sizeof(rsp));
1042 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1043 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1044 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1045 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1046 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1047 
1048 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1049 	cmd.nsid = 0xFFFFFFFF;
1050 	memset(&nsdata, 0, sizeof(nsdata));
1051 	memset(&rsp, 0, sizeof(rsp));
1052 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1053 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1054 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1055 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1056 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1057 }
1058 
1059 static void
1060 test_set_get_features(void)
1061 {
1062 	struct spdk_nvmf_subsystem subsystem = {};
1063 	struct spdk_nvmf_qpair admin_qpair = {};
1064 	enum spdk_nvme_ana_state ana_state[3];
1065 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1066 	struct spdk_nvmf_ctrlr ctrlr = {
1067 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1068 	};
1069 	union nvmf_h2c_msg cmd = {};
1070 	union nvmf_c2h_msg rsp = {};
1071 	struct spdk_nvmf_ns ns[3];
1072 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1073 	struct spdk_nvmf_request req;
1074 	int rc;
1075 
1076 	ns[0].anagrpid = 1;
1077 	ns[2].anagrpid = 3;
1078 	subsystem.ns = ns_arr;
1079 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1080 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1081 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1082 	admin_qpair.ctrlr = &ctrlr;
1083 	req.qpair = &admin_qpair;
1084 	cmd.nvme_cmd.nsid = 1;
1085 	req.cmd = &cmd;
1086 	req.rsp = &rsp;
1087 
1088 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1089 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1090 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1091 	ns[0].ptpl_file = "testcfg";
1092 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1093 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1094 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1095 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1096 	CU_ASSERT(ns[0].ptpl_activated == true);
1097 
1098 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1099 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1100 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1101 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1102 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1103 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1104 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1105 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1106 
1107 
1108 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1109 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1110 	cmd.nvme_cmd.cdw11 = 0x42;
1111 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1112 
1113 	rc = nvmf_ctrlr_get_features(&req);
1114 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1115 
1116 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1117 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1118 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1119 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1120 
1121 	rc = nvmf_ctrlr_get_features(&req);
1122 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1123 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1124 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1125 
1126 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1127 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1128 	cmd.nvme_cmd.cdw11 = 0x42;
1129 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1130 
1131 	rc = nvmf_ctrlr_set_features(&req);
1132 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1133 
1134 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1135 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1136 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1137 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1138 
1139 	rc = nvmf_ctrlr_set_features(&req);
1140 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1141 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1142 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1143 
1144 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1145 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1146 	cmd.nvme_cmd.cdw11 = 0x42;
1147 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1148 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1149 
1150 	rc = nvmf_ctrlr_set_features(&req);
1151 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1152 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1153 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1154 
1155 
1156 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1157 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1158 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1159 
1160 	rc = nvmf_ctrlr_get_features(&req);
1161 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1162 
1163 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1164 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1165 	cmd.nvme_cmd.cdw11 = 0x42;
1166 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1167 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1168 
1169 	rc = nvmf_ctrlr_set_features(&req);
1170 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1171 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1172 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1173 
1174 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1175 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1176 	cmd.nvme_cmd.cdw11 = 0x42;
1177 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1178 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1179 
1180 	rc = nvmf_ctrlr_set_features(&req);
1181 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1182 }
1183 
1184 /*
1185  * Reservation Unit Test Configuration
1186  *       --------             --------    --------
1187  *      | Host A |           | Host B |  | Host C |
1188  *       --------             --------    --------
1189  *      /        \               |           |
1190  *  --------   --------       -------     -------
1191  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1192  *  --------   --------       -------     -------
1193  *    \           \              /           /
1194  *     \           \            /           /
1195  *      \           \          /           /
1196  *      --------------------------------------
1197  *     |            NAMESPACE 1               |
1198  *      --------------------------------------
1199  */
1200 
1201 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1202 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1203 
1204 static void
1205 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1206 {
1207 	/* Host A has two controllers */
1208 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1209 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1210 
1211 	/* Host B has 1 controller */
1212 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1213 
1214 	/* Host C has 1 controller */
1215 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1216 
1217 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1218 	g_ns_info.rtype = rtype;
1219 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1220 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1221 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1222 }
1223 
1224 static void
1225 test_reservation_write_exclusive(void)
1226 {
1227 	struct spdk_nvmf_request req = {};
1228 	union nvmf_h2c_msg cmd = {};
1229 	union nvmf_c2h_msg rsp = {};
1230 	int rc;
1231 
1232 	req.cmd = &cmd;
1233 	req.rsp = &rsp;
1234 
1235 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1236 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1237 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1238 
1239 	/* Test Case: Issue a Read command from Host A and Host B */
1240 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1241 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1242 	SPDK_CU_ASSERT_FATAL(rc == 0);
1243 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1244 	SPDK_CU_ASSERT_FATAL(rc == 0);
1245 
1246 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1247 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1248 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1249 	SPDK_CU_ASSERT_FATAL(rc == 0);
1250 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1251 	SPDK_CU_ASSERT_FATAL(rc < 0);
1252 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1253 
1254 	/* Test Case: Issue a Write command from Host C */
1255 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1256 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1257 	SPDK_CU_ASSERT_FATAL(rc < 0);
1258 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1259 
1260 	/* Test Case: Issue a Read command from Host B */
1261 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1262 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1263 	SPDK_CU_ASSERT_FATAL(rc == 0);
1264 
1265 	/* Unregister Host C */
1266 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1267 
1268 	/* Test Case: Read and Write commands from non-registrant Host C */
1269 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1270 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1271 	SPDK_CU_ASSERT_FATAL(rc < 0);
1272 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1273 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1274 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1275 	SPDK_CU_ASSERT_FATAL(rc == 0);
1276 }
1277 
1278 static void
1279 test_reservation_exclusive_access(void)
1280 {
1281 	struct spdk_nvmf_request req = {};
1282 	union nvmf_h2c_msg cmd = {};
1283 	union nvmf_c2h_msg rsp = {};
1284 	int rc;
1285 
1286 	req.cmd = &cmd;
1287 	req.rsp = &rsp;
1288 
1289 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1290 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1291 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1292 
1293 	/* Test Case: Issue a Read command from Host B */
1294 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1295 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1296 	SPDK_CU_ASSERT_FATAL(rc < 0);
1297 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1298 
1299 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1300 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1301 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1302 	SPDK_CU_ASSERT_FATAL(rc == 0);
1303 }
1304 
1305 static void
1306 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1307 {
1308 	struct spdk_nvmf_request req = {};
1309 	union nvmf_h2c_msg cmd = {};
1310 	union nvmf_c2h_msg rsp = {};
1311 	int rc;
1312 
1313 	req.cmd = &cmd;
1314 	req.rsp = &rsp;
1315 
1316 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1317 	ut_reservation_init(rtype);
1318 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1319 
1320 	/* Test Case: Issue a Read command from Host A and Host C */
1321 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1322 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1323 	SPDK_CU_ASSERT_FATAL(rc == 0);
1324 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1325 	SPDK_CU_ASSERT_FATAL(rc == 0);
1326 
1327 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1328 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1329 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1330 	SPDK_CU_ASSERT_FATAL(rc == 0);
1331 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1332 	SPDK_CU_ASSERT_FATAL(rc == 0);
1333 
1334 	/* Unregister Host C */
1335 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1336 
1337 	/* Test Case: Read and Write commands from non-registrant Host C */
1338 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1339 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1340 	SPDK_CU_ASSERT_FATAL(rc == 0);
1341 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1342 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1343 	SPDK_CU_ASSERT_FATAL(rc < 0);
1344 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1345 }
1346 
1347 static void
1348 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1349 {
1350 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1351 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1352 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1353 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1354 }
1355 
1356 static void
1357 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1358 {
1359 	struct spdk_nvmf_request req = {};
1360 	union nvmf_h2c_msg cmd = {};
1361 	union nvmf_c2h_msg rsp = {};
1362 	int rc;
1363 
1364 	req.cmd = &cmd;
1365 	req.rsp = &rsp;
1366 
1367 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1368 	ut_reservation_init(rtype);
1369 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1370 
1371 	/* Test Case: Issue a Write command from Host B */
1372 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1373 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1374 	SPDK_CU_ASSERT_FATAL(rc == 0);
1375 
1376 	/* Unregister Host B */
1377 	memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
1378 
1379 	/* Test Case: Issue a Read command from Host B */
1380 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1381 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1382 	SPDK_CU_ASSERT_FATAL(rc < 0);
1383 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1384 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1385 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1386 	SPDK_CU_ASSERT_FATAL(rc < 0);
1387 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1388 }
1389 
1390 static void
1391 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1392 {
1393 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1394 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1395 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1396 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1397 }
1398 
1399 static void
1400 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1401 {
1402 	STAILQ_INIT(&ctrlr->async_events);
1403 }
1404 
1405 static void
1406 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1407 {
1408 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1409 
1410 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1411 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1412 		free(event);
1413 	}
1414 }
1415 
1416 static int
1417 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1418 {
1419 	int num = 0;
1420 	struct spdk_nvmf_async_event_completion *event;
1421 
1422 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1423 		num++;
1424 	}
1425 	return num;
1426 }
1427 
1428 static void
1429 test_reservation_notification_log_page(void)
1430 {
1431 	struct spdk_nvmf_ctrlr ctrlr;
1432 	struct spdk_nvmf_qpair qpair;
1433 	struct spdk_nvmf_ns ns;
1434 	struct spdk_nvmf_request req = {};
1435 	union nvmf_h2c_msg cmd = {};
1436 	union nvmf_c2h_msg rsp = {};
1437 	union spdk_nvme_async_event_completion event = {};
1438 	struct spdk_nvme_reservation_notification_log logs[3];
1439 	struct iovec iov;
1440 
1441 	memset(&ctrlr, 0, sizeof(ctrlr));
1442 	ctrlr.thread = spdk_get_thread();
1443 	TAILQ_INIT(&ctrlr.log_head);
1444 	init_pending_async_events(&ctrlr);
1445 	ns.nsid = 1;
1446 
1447 	/* Test Case: Mask all the reservation notifications */
1448 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1449 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1450 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1451 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1452 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1453 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1454 					  SPDK_NVME_RESERVATION_RELEASED);
1455 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1456 					  SPDK_NVME_RESERVATION_PREEMPTED);
1457 	poll_threads();
1458 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1459 
1460 	/* Test Case: Unmask all the reservation notifications,
1461 	 * 3 log pages are generated, and AER was triggered.
1462 	 */
1463 	ns.mask = 0;
1464 	ctrlr.num_avail_log_pages = 0;
1465 	req.cmd = &cmd;
1466 	req.rsp = &rsp;
1467 	ctrlr.aer_req[0] = &req;
1468 	ctrlr.nr_aer_reqs = 1;
1469 	req.qpair = &qpair;
1470 	TAILQ_INIT(&qpair.outstanding);
1471 	qpair.ctrlr = NULL;
1472 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1473 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1474 
1475 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1476 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1477 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1478 					  SPDK_NVME_RESERVATION_RELEASED);
1479 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1480 					  SPDK_NVME_RESERVATION_PREEMPTED);
1481 	poll_threads();
1482 	event.raw = rsp.nvme_cpl.cdw0;
1483 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1484 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1485 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1486 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1487 
1488 	/* Test Case: Get Log Page to clear the log pages */
1489 	iov.iov_base = &logs[0];
1490 	iov.iov_len = sizeof(logs);
1491 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1492 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1493 
1494 	cleanup_pending_async_events(&ctrlr);
1495 }
1496 
1497 static void
1498 test_get_dif_ctx(void)
1499 {
1500 	struct spdk_nvmf_subsystem subsystem = {};
1501 	struct spdk_nvmf_request req = {};
1502 	struct spdk_nvmf_qpair qpair = {};
1503 	struct spdk_nvmf_ctrlr ctrlr = {};
1504 	struct spdk_nvmf_ns ns = {};
1505 	struct spdk_nvmf_ns *_ns = NULL;
1506 	struct spdk_bdev bdev = {};
1507 	union nvmf_h2c_msg cmd = {};
1508 	struct spdk_dif_ctx dif_ctx = {};
1509 	bool ret;
1510 
1511 	ctrlr.subsys = &subsystem;
1512 
1513 	qpair.ctrlr = &ctrlr;
1514 
1515 	req.qpair = &qpair;
1516 	req.cmd = &cmd;
1517 
1518 	ns.bdev = &bdev;
1519 
1520 	ctrlr.dif_insert_or_strip = false;
1521 
1522 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1523 	CU_ASSERT(ret == false);
1524 
1525 	ctrlr.dif_insert_or_strip = true;
1526 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1527 
1528 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1529 	CU_ASSERT(ret == false);
1530 
1531 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1532 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1533 
1534 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1535 	CU_ASSERT(ret == false);
1536 
1537 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1538 
1539 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1540 	CU_ASSERT(ret == false);
1541 
1542 	qpair.qid = 1;
1543 
1544 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1545 	CU_ASSERT(ret == false);
1546 
1547 	cmd.nvme_cmd.nsid = 1;
1548 
1549 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1550 	CU_ASSERT(ret == false);
1551 
1552 	subsystem.max_nsid = 1;
1553 	subsystem.ns = &_ns;
1554 	subsystem.ns[0] = &ns;
1555 
1556 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1557 	CU_ASSERT(ret == false);
1558 
1559 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1560 
1561 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1562 	CU_ASSERT(ret == true);
1563 }
1564 
1565 static void
1566 test_identify_ctrlr(void)
1567 {
1568 	struct spdk_nvmf_tgt tgt = {};
1569 	struct spdk_nvmf_subsystem subsystem = {
1570 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1571 		.tgt = &tgt,
1572 	};
1573 	struct spdk_nvmf_transport_ops tops = {};
1574 	struct spdk_nvmf_transport transport = {
1575 		.ops = &tops,
1576 		.opts = {
1577 			.in_capsule_data_size = 4096,
1578 		},
1579 	};
1580 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1581 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1582 	struct spdk_nvme_ctrlr_data cdata = {};
1583 	uint32_t expected_ioccsz;
1584 
1585 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1586 
1587 	/* Check ioccsz, TCP transport */
1588 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1589 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1590 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1591 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1592 
1593 	/* Check ioccsz, RDMA transport */
1594 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1595 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1596 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1597 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1598 
1599 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1600 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1601 	ctrlr.dif_insert_or_strip = true;
1602 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1603 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1604 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1605 }
1606 
1607 static int
1608 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1609 {
1610 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1611 
1612 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1613 };
1614 
1615 static void
1616 test_custom_admin_cmd(void)
1617 {
1618 	struct spdk_nvmf_subsystem subsystem;
1619 	struct spdk_nvmf_qpair qpair;
1620 	struct spdk_nvmf_ctrlr ctrlr;
1621 	struct spdk_nvmf_request req;
1622 	struct spdk_nvmf_ns *ns_ptrs[1];
1623 	struct spdk_nvmf_ns ns;
1624 	union nvmf_h2c_msg cmd;
1625 	union nvmf_c2h_msg rsp;
1626 	struct spdk_bdev bdev;
1627 	uint8_t buf[4096];
1628 	int rc;
1629 
1630 	memset(&subsystem, 0, sizeof(subsystem));
1631 	ns_ptrs[0] = &ns;
1632 	subsystem.ns = ns_ptrs;
1633 	subsystem.max_nsid = 1;
1634 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1635 
1636 	memset(&ns, 0, sizeof(ns));
1637 	ns.opts.nsid = 1;
1638 	ns.bdev = &bdev;
1639 
1640 	memset(&qpair, 0, sizeof(qpair));
1641 	qpair.ctrlr = &ctrlr;
1642 
1643 	memset(&ctrlr, 0, sizeof(ctrlr));
1644 	ctrlr.subsys = &subsystem;
1645 	ctrlr.vcprop.cc.bits.en = 1;
1646 	ctrlr.thread = spdk_get_thread();
1647 
1648 	memset(&req, 0, sizeof(req));
1649 	req.qpair = &qpair;
1650 	req.cmd = &cmd;
1651 	req.rsp = &rsp;
1652 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1653 	req.data = buf;
1654 	req.length = sizeof(buf);
1655 
1656 	memset(&cmd, 0, sizeof(cmd));
1657 	cmd.nvme_cmd.opc = 0xc1;
1658 	cmd.nvme_cmd.nsid = 0;
1659 	memset(&rsp, 0, sizeof(rsp));
1660 
1661 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1662 
1663 	/* Ensure that our hdlr is being called */
1664 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1665 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1666 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1667 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1668 }
1669 
1670 static void
1671 test_fused_compare_and_write(void)
1672 {
1673 	struct spdk_nvmf_request req = {};
1674 	struct spdk_nvmf_qpair qpair = {};
1675 	struct spdk_nvme_cmd cmd = {};
1676 	union nvmf_c2h_msg rsp = {};
1677 	struct spdk_nvmf_ctrlr ctrlr = {};
1678 	struct spdk_nvmf_subsystem subsystem = {};
1679 	struct spdk_nvmf_ns ns = {};
1680 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1681 	enum spdk_nvme_ana_state ana_state[1];
1682 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1683 	struct spdk_bdev bdev = {};
1684 
1685 	struct spdk_nvmf_poll_group group = {};
1686 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1687 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1688 	struct spdk_io_channel io_ch = {};
1689 
1690 	ns.bdev = &bdev;
1691 	ns.anagrpid = 1;
1692 
1693 	subsystem.id = 0;
1694 	subsystem.max_nsid = 1;
1695 	subsys_ns[0] = &ns;
1696 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1697 
1698 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1699 
1700 	/* Enable controller */
1701 	ctrlr.vcprop.cc.bits.en = 1;
1702 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1703 	ctrlr.listener = &listener;
1704 
1705 	group.num_sgroups = 1;
1706 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1707 	sgroups.num_ns = 1;
1708 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1709 	ns_info.channel = &io_ch;
1710 	sgroups.ns_info = &ns_info;
1711 	TAILQ_INIT(&sgroups.queued);
1712 	group.sgroups = &sgroups;
1713 	TAILQ_INIT(&qpair.outstanding);
1714 
1715 	qpair.ctrlr = &ctrlr;
1716 	qpair.group = &group;
1717 	qpair.qid = 1;
1718 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1719 
1720 	cmd.nsid = 1;
1721 
1722 	req.qpair = &qpair;
1723 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1724 	req.rsp = &rsp;
1725 
1726 	/* SUCCESS/SUCCESS */
1727 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1728 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1729 
1730 	spdk_nvmf_request_exec(&req);
1731 	CU_ASSERT(qpair.first_fused_req != NULL);
1732 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1733 
1734 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1735 	cmd.opc = SPDK_NVME_OPC_WRITE;
1736 
1737 	spdk_nvmf_request_exec(&req);
1738 	CU_ASSERT(qpair.first_fused_req == NULL);
1739 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1740 
1741 	/* Wrong sequence */
1742 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1743 	cmd.opc = SPDK_NVME_OPC_WRITE;
1744 
1745 	spdk_nvmf_request_exec(&req);
1746 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1747 	CU_ASSERT(qpair.first_fused_req == NULL);
1748 
1749 	/* Write as FUSE_FIRST (Wrong op code) */
1750 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1751 	cmd.opc = SPDK_NVME_OPC_WRITE;
1752 
1753 	spdk_nvmf_request_exec(&req);
1754 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1755 	CU_ASSERT(qpair.first_fused_req == NULL);
1756 
1757 	/* Compare as FUSE_SECOND (Wrong op code) */
1758 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1759 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1760 
1761 	spdk_nvmf_request_exec(&req);
1762 	CU_ASSERT(qpair.first_fused_req != NULL);
1763 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1764 
1765 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1766 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1767 
1768 	spdk_nvmf_request_exec(&req);
1769 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1770 	CU_ASSERT(qpair.first_fused_req == NULL);
1771 }
1772 
1773 static void
1774 test_multi_async_event_reqs(void)
1775 {
1776 	struct spdk_nvmf_subsystem subsystem = {};
1777 	struct spdk_nvmf_qpair qpair = {};
1778 	struct spdk_nvmf_ctrlr ctrlr = {};
1779 	struct spdk_nvmf_request req[5] = {};
1780 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1781 	struct spdk_nvmf_ns ns = {};
1782 	union nvmf_h2c_msg cmd[5] = {};
1783 	union nvmf_c2h_msg rsp[5] = {};
1784 
1785 	struct spdk_nvmf_poll_group group = {};
1786 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1787 
1788 	int i;
1789 
1790 	ns_ptrs[0] = &ns;
1791 	subsystem.ns = ns_ptrs;
1792 	subsystem.max_nsid = 1;
1793 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1794 
1795 	ns.opts.nsid = 1;
1796 	group.sgroups = &sgroups;
1797 
1798 	qpair.ctrlr = &ctrlr;
1799 	qpair.group = &group;
1800 	TAILQ_INIT(&qpair.outstanding);
1801 
1802 	ctrlr.subsys = &subsystem;
1803 	ctrlr.vcprop.cc.bits.en = 1;
1804 	ctrlr.thread = spdk_get_thread();
1805 
1806 	for (i = 0; i < 5; i++) {
1807 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1808 		cmd[i].nvme_cmd.nsid = 1;
1809 		cmd[i].nvme_cmd.cid = i;
1810 
1811 		req[i].qpair = &qpair;
1812 		req[i].cmd = &cmd[i];
1813 		req[i].rsp = &rsp[i];
1814 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1815 	}
1816 
1817 	/* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
1818 	sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS;
1819 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1820 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1821 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
1822 	}
1823 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1824 
1825 	/* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
1826 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1827 	CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
1828 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
1829 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1830 
1831 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
1832 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
1833 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1834 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1835 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
1836 
1837 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
1838 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1839 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1840 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
1841 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
1842 
1843 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
1844 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
1845 }
1846 
1847 static void
1848 test_get_ana_log_page_one_ns_per_anagrp(void)
1849 {
1850 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
1851 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
1852 	uint32_t ana_group[3];
1853 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
1854 	struct spdk_nvmf_ctrlr ctrlr = {};
1855 	enum spdk_nvme_ana_state ana_state[3];
1856 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1857 	struct spdk_nvmf_ns ns[3];
1858 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
1859 	uint64_t offset;
1860 	uint32_t length;
1861 	int i;
1862 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1863 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1864 	struct iovec iov, iovs[2];
1865 	struct spdk_nvme_ana_page *ana_hdr;
1866 	char _ana_desc[UT_ANA_DESC_SIZE];
1867 	struct spdk_nvme_ana_group_descriptor *ana_desc;
1868 
1869 	subsystem.ns = ns_arr;
1870 	subsystem.max_nsid = 3;
1871 	for (i = 0; i < 3; i++) {
1872 		subsystem.ana_group[i] = 1;
1873 	}
1874 	ctrlr.subsys = &subsystem;
1875 	ctrlr.listener = &listener;
1876 
1877 	for (i = 0; i < 3; i++) {
1878 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1879 	}
1880 
1881 	for (i = 0; i < 3; i++) {
1882 		ns_arr[i]->nsid = i + 1;
1883 		ns_arr[i]->anagrpid = i + 1;
1884 	}
1885 
1886 	/* create expected page */
1887 	ana_hdr = (void *)&expected_page[0];
1888 	ana_hdr->num_ana_group_desc = 3;
1889 	ana_hdr->change_count = 0;
1890 
1891 	/* descriptor may be unaligned. So create data and then copy it to the location. */
1892 	ana_desc = (void *)_ana_desc;
1893 	offset = sizeof(struct spdk_nvme_ana_page);
1894 
1895 	for (i = 0; i < 3; i++) {
1896 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
1897 		ana_desc->ana_group_id = ns_arr[i]->nsid;
1898 		ana_desc->num_of_nsid = 1;
1899 		ana_desc->change_count = 0;
1900 		ana_desc->ana_state = ctrlr.listener->ana_state[i];
1901 		ana_desc->nsid[0] = ns_arr[i]->nsid;
1902 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
1903 		offset += UT_ANA_DESC_SIZE;
1904 	}
1905 
1906 	/* read entire actual log page */
1907 	offset = 0;
1908 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
1909 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
1910 		iov.iov_base = &actual_page[offset];
1911 		iov.iov_len = length;
1912 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
1913 		offset += length;
1914 	}
1915 
1916 	/* compare expected page and actual page */
1917 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1918 
1919 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
1920 	offset = 0;
1921 	iovs[0].iov_base = &actual_page[offset];
1922 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1923 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1924 	iovs[1].iov_base = &actual_page[offset];
1925 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
1926 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
1927 
1928 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1929 
1930 #undef UT_ANA_DESC_SIZE
1931 #undef UT_ANA_LOG_PAGE_SIZE
1932 }
1933 
1934 static void
1935 test_get_ana_log_page_multi_ns_per_anagrp(void)
1936 {
1937 #define UT_ANA_LOG_PAGE_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
1938 				 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
1939 				 sizeof(uint32_t) * 5)
1940 	struct spdk_nvmf_ns ns[5];
1941 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
1942 	uint32_t ana_group[5] = {0};
1943 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
1944 	enum spdk_nvme_ana_state ana_state[5];
1945 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
1946 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
1947 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1948 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1949 	struct iovec iov, iovs[2];
1950 	struct spdk_nvme_ana_page *ana_hdr;
1951 	char _ana_desc[UT_ANA_LOG_PAGE_SIZE];
1952 	struct spdk_nvme_ana_group_descriptor *ana_desc;
1953 	uint64_t offset;
1954 	uint32_t length;
1955 	int i;
1956 
1957 	subsystem.max_nsid = 5;
1958 	subsystem.ana_group[1] = 3;
1959 	subsystem.ana_group[2] = 2;
1960 	for (i = 0; i < 5; i++) {
1961 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1962 	}
1963 
1964 	for (i = 0; i < 5; i++) {
1965 		ns_arr[i]->nsid = i + 1;
1966 	}
1967 	ns_arr[0]->anagrpid = 2;
1968 	ns_arr[1]->anagrpid = 3;
1969 	ns_arr[2]->anagrpid = 2;
1970 	ns_arr[3]->anagrpid = 3;
1971 	ns_arr[4]->anagrpid = 2;
1972 
1973 	/* create expected page */
1974 	ana_hdr = (void *)&expected_page[0];
1975 	ana_hdr->num_ana_group_desc = 2;
1976 	ana_hdr->change_count = 0;
1977 
1978 	/* descriptor may be unaligned. So create data and then copy it to the location. */
1979 	ana_desc = (void *)_ana_desc;
1980 	offset = sizeof(struct spdk_nvme_ana_page);
1981 
1982 	memset(_ana_desc, 0, sizeof(_ana_desc));
1983 	ana_desc->ana_group_id = 2;
1984 	ana_desc->num_of_nsid = 3;
1985 	ana_desc->change_count = 0;
1986 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1987 	ana_desc->nsid[0] = 1;
1988 	ana_desc->nsid[1] = 3;
1989 	ana_desc->nsid[2] = 5;
1990 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
1991 	       sizeof(uint32_t) * 3);
1992 	offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3;
1993 
1994 	memset(_ana_desc, 0, sizeof(_ana_desc));
1995 	ana_desc->ana_group_id = 3;
1996 	ana_desc->num_of_nsid = 2;
1997 	ana_desc->change_count = 0;
1998 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1999 	ana_desc->nsid[0] = 2;
2000 	ana_desc->nsid[1] = 4;
2001 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2002 	       sizeof(uint32_t) * 2);
2003 
2004 	/* read entire actual log page, and compare expected page and actual page. */
2005 	offset = 0;
2006 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2007 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2008 		iov.iov_base = &actual_page[offset];
2009 		iov.iov_len = length;
2010 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2011 		offset += length;
2012 	}
2013 
2014 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2015 
2016 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2017 	offset = 0;
2018 	iovs[0].iov_base = &actual_page[offset];
2019 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2020 	offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2021 	iovs[1].iov_base = &actual_page[offset];
2022 	iovs[1].iov_len = sizeof(uint32_t) * 5;
2023 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2024 
2025 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2026 
2027 #undef UT_ANA_LOG_PAGE_SIZE
2028 }
2029 static void
2030 test_multi_async_events(void)
2031 {
2032 	struct spdk_nvmf_subsystem subsystem = {};
2033 	struct spdk_nvmf_qpair qpair = {};
2034 	struct spdk_nvmf_ctrlr ctrlr = {};
2035 	struct spdk_nvmf_request req[4] = {};
2036 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2037 	struct spdk_nvmf_ns ns = {};
2038 	union nvmf_h2c_msg cmd[4] = {};
2039 	union nvmf_c2h_msg rsp[4] = {};
2040 	union spdk_nvme_async_event_completion event = {};
2041 	struct spdk_nvmf_poll_group group = {};
2042 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2043 	int i;
2044 
2045 	ns_ptrs[0] = &ns;
2046 	subsystem.ns = ns_ptrs;
2047 	subsystem.max_nsid = 1;
2048 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2049 
2050 	ns.opts.nsid = 1;
2051 	group.sgroups = &sgroups;
2052 
2053 	qpair.ctrlr = &ctrlr;
2054 	qpair.group = &group;
2055 	TAILQ_INIT(&qpair.outstanding);
2056 
2057 	ctrlr.subsys = &subsystem;
2058 	ctrlr.vcprop.cc.bits.en = 1;
2059 	ctrlr.thread = spdk_get_thread();
2060 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2061 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2062 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2063 	init_pending_async_events(&ctrlr);
2064 
2065 	/* Target queue pending events when there is no outstanding AER request */
2066 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2067 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2068 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2069 
2070 	for (i = 0; i < 4; i++) {
2071 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2072 		cmd[i].nvme_cmd.nsid = 1;
2073 		cmd[i].nvme_cmd.cid = i;
2074 
2075 		req[i].qpair = &qpair;
2076 		req[i].cmd = &cmd[i];
2077 		req[i].rsp = &rsp[i];
2078 
2079 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2080 
2081 		sgroups.mgmt_io_outstanding = 1;
2082 		if (i < 3) {
2083 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2084 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2085 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2086 		} else {
2087 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2088 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2089 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2090 		}
2091 	}
2092 
2093 	event.raw = rsp[0].nvme_cpl.cdw0;
2094 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2095 	event.raw = rsp[1].nvme_cpl.cdw0;
2096 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2097 	event.raw = rsp[2].nvme_cpl.cdw0;
2098 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2099 
2100 	cleanup_pending_async_events(&ctrlr);
2101 }
2102 
2103 static void
2104 test_rae(void)
2105 {
2106 	struct spdk_nvmf_subsystem subsystem = {};
2107 	struct spdk_nvmf_qpair qpair = {};
2108 	struct spdk_nvmf_ctrlr ctrlr = {};
2109 	struct spdk_nvmf_request req[3] = {};
2110 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2111 	struct spdk_nvmf_ns ns = {};
2112 	union nvmf_h2c_msg cmd[3] = {};
2113 	union nvmf_c2h_msg rsp[3] = {};
2114 	union spdk_nvme_async_event_completion event = {};
2115 	struct spdk_nvmf_poll_group group = {};
2116 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2117 	int i;
2118 	char data[4096];
2119 
2120 	ns_ptrs[0] = &ns;
2121 	subsystem.ns = ns_ptrs;
2122 	subsystem.max_nsid = 1;
2123 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2124 
2125 	ns.opts.nsid = 1;
2126 	group.sgroups = &sgroups;
2127 
2128 	qpair.ctrlr = &ctrlr;
2129 	qpair.group = &group;
2130 	TAILQ_INIT(&qpair.outstanding);
2131 
2132 	ctrlr.subsys = &subsystem;
2133 	ctrlr.vcprop.cc.bits.en = 1;
2134 	ctrlr.thread = spdk_get_thread();
2135 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2136 	init_pending_async_events(&ctrlr);
2137 
2138 	/* Target queue pending events when there is no outstanding AER request */
2139 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2140 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2141 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2142 	/* only one event will be queued before RAE is clear */
2143 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2144 
2145 	req[0].qpair = &qpair;
2146 	req[0].cmd = &cmd[0];
2147 	req[0].rsp = &rsp[0];
2148 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2149 	cmd[0].nvme_cmd.nsid = 1;
2150 	cmd[0].nvme_cmd.cid = 0;
2151 
2152 	for (i = 1; i < 3; i++) {
2153 		req[i].qpair = &qpair;
2154 		req[i].cmd = &cmd[i];
2155 		req[i].rsp = &rsp[i];
2156 		req[i].data = &data;
2157 		req[i].length = sizeof(data);
2158 
2159 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2160 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2161 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2162 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2163 			spdk_nvme_bytes_to_numd(req[i].length);
2164 		cmd[i].nvme_cmd.cid = i;
2165 	}
2166 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2167 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2168 
2169 	/* consume the pending event */
2170 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2171 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2172 	event.raw = rsp[0].nvme_cpl.cdw0;
2173 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2174 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2175 
2176 	/* get log with RAE set */
2177 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2178 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2179 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2180 
2181 	/* will not generate new event until RAE is clear */
2182 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2183 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2184 
2185 	/* get log with RAE clear */
2186 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2187 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2188 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2189 
2190 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2191 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2192 
2193 	cleanup_pending_async_events(&ctrlr);
2194 }
2195 
2196 static void
2197 test_nvmf_ctrlr_create_destruct(void)
2198 {
2199 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2200 	struct spdk_nvmf_poll_group group = {};
2201 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2202 	struct spdk_nvmf_transport transport = {};
2203 	struct spdk_nvmf_transport_ops tops = {};
2204 	struct spdk_nvmf_subsystem subsystem = {};
2205 	struct spdk_nvmf_request req = {};
2206 	struct spdk_nvmf_qpair qpair = {};
2207 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2208 	struct spdk_nvmf_tgt tgt = {};
2209 	union nvmf_h2c_msg cmd = {};
2210 	union nvmf_c2h_msg rsp = {};
2211 	const uint8_t hostid[16] = {
2212 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2213 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2214 	};
2215 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2216 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2217 
2218 	group.thread = spdk_get_thread();
2219 	transport.ops = &tops;
2220 	transport.opts.max_aq_depth = 32;
2221 	transport.opts.max_queue_depth = 64;
2222 	transport.opts.max_qpairs_per_ctrlr = 3;
2223 	transport.opts.dif_insert_or_strip = true;
2224 	transport.tgt = &tgt;
2225 	qpair.transport = &transport;
2226 	qpair.group = &group;
2227 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2228 	TAILQ_INIT(&qpair.outstanding);
2229 
2230 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2231 	connect_data.cntlid = 0xFFFF;
2232 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2233 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2234 
2235 	subsystem.thread = spdk_get_thread();
2236 	subsystem.id = 1;
2237 	TAILQ_INIT(&subsystem.ctrlrs);
2238 	subsystem.tgt = &tgt;
2239 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2240 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2241 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2242 
2243 	group.sgroups = sgroups;
2244 
2245 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2246 	cmd.connect_cmd.cid = 1;
2247 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2248 	cmd.connect_cmd.recfmt = 0;
2249 	cmd.connect_cmd.qid = 0;
2250 	cmd.connect_cmd.sqsize = 31;
2251 	cmd.connect_cmd.cattr = 0;
2252 	cmd.connect_cmd.kato = 120000;
2253 
2254 	req.qpair = &qpair;
2255 	req.length = sizeof(connect_data);
2256 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2257 	req.data = &connect_data;
2258 	req.cmd = &cmd;
2259 	req.rsp = &rsp;
2260 
2261 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2262 	sgroups[subsystem.id].mgmt_io_outstanding++;
2263 
2264 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.data);
2265 	poll_threads();
2266 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2267 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2268 	CU_ASSERT(ctrlr->subsys == &subsystem);
2269 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2270 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2271 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2272 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2273 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2274 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2275 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2276 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2277 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2278 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2279 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2280 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2281 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2282 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2283 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2284 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2285 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2286 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2287 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2288 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2289 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2290 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2291 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2292 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2293 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2294 
2295 	ctrlr->in_destruct = true;
2296 	nvmf_ctrlr_destruct(ctrlr);
2297 	poll_threads();
2298 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2299 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2300 }
2301 
2302 static void
2303 test_nvmf_ctrlr_use_zcopy(void)
2304 {
2305 	struct spdk_nvmf_subsystem subsystem = {};
2306 	struct spdk_nvmf_transport transport = {};
2307 	struct spdk_nvmf_request req = {};
2308 	struct spdk_nvmf_qpair qpair = {};
2309 	struct spdk_nvmf_ctrlr ctrlr = {};
2310 	union nvmf_h2c_msg cmd = {};
2311 	struct spdk_nvmf_ns ns = {};
2312 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2313 	struct spdk_bdev bdev = {};
2314 	struct spdk_nvmf_poll_group group = {};
2315 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2316 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2317 	struct spdk_io_channel io_ch = {};
2318 	int opc;
2319 
2320 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2321 	ns.bdev = &bdev;
2322 
2323 	subsystem.id = 0;
2324 	subsystem.max_nsid = 1;
2325 	subsys_ns[0] = &ns;
2326 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2327 
2328 	ctrlr.subsys = &subsystem;
2329 
2330 	transport.opts.zcopy = true;
2331 
2332 	qpair.ctrlr = &ctrlr;
2333 	qpair.group = &group;
2334 	qpair.qid = 1;
2335 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2336 	qpair.transport = &transport;
2337 
2338 	group.thread = spdk_get_thread();
2339 	group.num_sgroups = 1;
2340 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2341 	sgroups.num_ns = 1;
2342 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2343 	ns_info.channel = &io_ch;
2344 	sgroups.ns_info = &ns_info;
2345 	TAILQ_INIT(&sgroups.queued);
2346 	group.sgroups = &sgroups;
2347 	TAILQ_INIT(&qpair.outstanding);
2348 
2349 	req.qpair = &qpair;
2350 	req.cmd = &cmd;
2351 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2352 
2353 	/* Admin queue */
2354 	qpair.qid = 0;
2355 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2356 	qpair.qid = 1;
2357 
2358 	/* Invalid Opcodes */
2359 	for (opc = 0; opc <= 255; opc++) {
2360 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2361 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2362 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2363 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2364 		}
2365 	}
2366 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2367 
2368 	/* Fused WRITE */
2369 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2370 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2371 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2372 
2373 	/* Non bdev */
2374 	cmd.nvme_cmd.nsid = 4;
2375 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2376 	cmd.nvme_cmd.nsid = 1;
2377 
2378 	/* ZCOPY Not supported */
2379 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2380 	ns.zcopy = true;
2381 
2382 	/* ZCOPY disabled on transport level */
2383 	transport.opts.zcopy = false;
2384 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2385 	transport.opts.zcopy = true;
2386 
2387 	/* Success */
2388 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2389 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2390 }
2391 
2392 static void
2393 qpair_state_change_done(void *cb_arg, int status)
2394 {
2395 }
2396 
2397 static void
2398 test_spdk_nvmf_request_zcopy_start(void)
2399 {
2400 	struct spdk_nvmf_request req = {};
2401 	struct spdk_nvmf_qpair qpair = {};
2402 	struct spdk_nvmf_transport transport = {};
2403 	struct spdk_nvme_cmd cmd = {};
2404 	union nvmf_c2h_msg rsp = {};
2405 	struct spdk_nvmf_ctrlr ctrlr = {};
2406 	struct spdk_nvmf_subsystem subsystem = {};
2407 	struct spdk_nvmf_ns ns = {};
2408 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2409 	enum spdk_nvme_ana_state ana_state[1];
2410 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2411 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2412 
2413 	struct spdk_nvmf_poll_group group = {};
2414 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2415 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2416 	struct spdk_io_channel io_ch = {};
2417 
2418 	ns.bdev = &bdev;
2419 	ns.zcopy = true;
2420 	ns.anagrpid = 1;
2421 
2422 	subsystem.id = 0;
2423 	subsystem.max_nsid = 1;
2424 	subsys_ns[0] = &ns;
2425 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2426 
2427 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2428 
2429 	/* Enable controller */
2430 	ctrlr.vcprop.cc.bits.en = 1;
2431 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2432 	ctrlr.listener = &listener;
2433 
2434 	transport.opts.zcopy = true;
2435 
2436 	group.thread = spdk_get_thread();
2437 	group.num_sgroups = 1;
2438 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2439 	sgroups.num_ns = 1;
2440 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2441 	ns_info.channel = &io_ch;
2442 	sgroups.ns_info = &ns_info;
2443 	TAILQ_INIT(&sgroups.queued);
2444 	group.sgroups = &sgroups;
2445 	TAILQ_INIT(&qpair.outstanding);
2446 
2447 	qpair.ctrlr = &ctrlr;
2448 	qpair.group = &group;
2449 	qpair.transport = &transport;
2450 	qpair.qid = 1;
2451 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2452 
2453 	cmd.nsid = 1;
2454 
2455 	req.qpair = &qpair;
2456 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2457 	req.rsp = &rsp;
2458 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2459 	cmd.opc = SPDK_NVME_OPC_READ;
2460 
2461 	/* Fail because no controller */
2462 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2463 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2464 	qpair.ctrlr = NULL;
2465 	spdk_nvmf_request_zcopy_start(&req);
2466 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2467 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2468 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2469 	qpair.ctrlr = &ctrlr;
2470 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2471 
2472 	/* Fail because bad NSID */
2473 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2474 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2475 	cmd.nsid = 0;
2476 	spdk_nvmf_request_zcopy_start(&req);
2477 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2478 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2479 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2480 	cmd.nsid = 1;
2481 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2482 
2483 	/* Fail because bad Channel */
2484 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2485 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2486 	ns_info.channel = NULL;
2487 	spdk_nvmf_request_zcopy_start(&req);
2488 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2489 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2490 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2491 	ns_info.channel = &io_ch;
2492 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2493 
2494 	/* Queue the requet because NSID is not active */
2495 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2496 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2497 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2498 	spdk_nvmf_request_zcopy_start(&req);
2499 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2500 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2501 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2502 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2503 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2504 
2505 	/* Fail because QPair is not active */
2506 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2507 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2508 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2509 	qpair.state_cb = qpair_state_change_done;
2510 	spdk_nvmf_request_zcopy_start(&req);
2511 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2512 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2513 	qpair.state_cb = NULL;
2514 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2515 
2516 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2517 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2518 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2519 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2520 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2521 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2522 	spdk_nvmf_request_zcopy_start(&req);
2523 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2524 	cmd.cdw10 = 0;
2525 	cmd.cdw12 = 0;
2526 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2527 
2528 	/* Success */
2529 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2530 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2531 	spdk_nvmf_request_zcopy_start(&req);
2532 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2533 }
2534 
2535 static void
2536 test_zcopy_read(void)
2537 {
2538 	struct spdk_nvmf_request req = {};
2539 	struct spdk_nvmf_qpair qpair = {};
2540 	struct spdk_nvmf_transport transport = {};
2541 	struct spdk_nvme_cmd cmd = {};
2542 	union nvmf_c2h_msg rsp = {};
2543 	struct spdk_nvmf_ctrlr ctrlr = {};
2544 	struct spdk_nvmf_subsystem subsystem = {};
2545 	struct spdk_nvmf_ns ns = {};
2546 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2547 	enum spdk_nvme_ana_state ana_state[1];
2548 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2549 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2550 
2551 	struct spdk_nvmf_poll_group group = {};
2552 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2553 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2554 	struct spdk_io_channel io_ch = {};
2555 
2556 	ns.bdev = &bdev;
2557 	ns.zcopy = true;
2558 	ns.anagrpid = 1;
2559 
2560 	subsystem.id = 0;
2561 	subsystem.max_nsid = 1;
2562 	subsys_ns[0] = &ns;
2563 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2564 
2565 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2566 
2567 	/* Enable controller */
2568 	ctrlr.vcprop.cc.bits.en = 1;
2569 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2570 	ctrlr.listener = &listener;
2571 
2572 	transport.opts.zcopy = true;
2573 
2574 	group.thread = spdk_get_thread();
2575 	group.num_sgroups = 1;
2576 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2577 	sgroups.num_ns = 1;
2578 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2579 	ns_info.channel = &io_ch;
2580 	sgroups.ns_info = &ns_info;
2581 	TAILQ_INIT(&sgroups.queued);
2582 	group.sgroups = &sgroups;
2583 	TAILQ_INIT(&qpair.outstanding);
2584 
2585 	qpair.ctrlr = &ctrlr;
2586 	qpair.group = &group;
2587 	qpair.transport = &transport;
2588 	qpair.qid = 1;
2589 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2590 
2591 	cmd.nsid = 1;
2592 
2593 	req.qpair = &qpair;
2594 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2595 	req.rsp = &rsp;
2596 	cmd.opc = SPDK_NVME_OPC_READ;
2597 
2598 	/* Prepare for zcopy */
2599 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2600 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2601 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2602 	CU_ASSERT(ns_info.io_outstanding == 0);
2603 
2604 	/* Perform the zcopy start */
2605 	spdk_nvmf_request_zcopy_start(&req);
2606 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2607 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2608 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2609 	CU_ASSERT(ns_info.io_outstanding == 1);
2610 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2611 
2612 	/* Perform the zcopy end */
2613 	spdk_nvmf_request_zcopy_end(&req, false);
2614 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2615 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2616 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2617 	CU_ASSERT(ns_info.io_outstanding == 0);
2618 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2619 }
2620 
2621 static void
2622 test_zcopy_write(void)
2623 {
2624 	struct spdk_nvmf_request req = {};
2625 	struct spdk_nvmf_qpair qpair = {};
2626 	struct spdk_nvmf_transport transport = {};
2627 	struct spdk_nvme_cmd cmd = {};
2628 	union nvmf_c2h_msg rsp = {};
2629 	struct spdk_nvmf_ctrlr ctrlr = {};
2630 	struct spdk_nvmf_subsystem subsystem = {};
2631 	struct spdk_nvmf_ns ns = {};
2632 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2633 	enum spdk_nvme_ana_state ana_state[1];
2634 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2635 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2636 
2637 	struct spdk_nvmf_poll_group group = {};
2638 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2639 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2640 	struct spdk_io_channel io_ch = {};
2641 
2642 	ns.bdev = &bdev;
2643 	ns.zcopy = true;
2644 	ns.anagrpid = 1;
2645 
2646 	subsystem.id = 0;
2647 	subsystem.max_nsid = 1;
2648 	subsys_ns[0] = &ns;
2649 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2650 
2651 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2652 
2653 	/* Enable controller */
2654 	ctrlr.vcprop.cc.bits.en = 1;
2655 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2656 	ctrlr.listener = &listener;
2657 
2658 	transport.opts.zcopy = true;
2659 
2660 	group.thread = spdk_get_thread();
2661 	group.num_sgroups = 1;
2662 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2663 	sgroups.num_ns = 1;
2664 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2665 	ns_info.channel = &io_ch;
2666 	sgroups.ns_info = &ns_info;
2667 	TAILQ_INIT(&sgroups.queued);
2668 	group.sgroups = &sgroups;
2669 	TAILQ_INIT(&qpair.outstanding);
2670 
2671 	qpair.ctrlr = &ctrlr;
2672 	qpair.group = &group;
2673 	qpair.transport = &transport;
2674 	qpair.qid = 1;
2675 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2676 
2677 	cmd.nsid = 1;
2678 
2679 	req.qpair = &qpair;
2680 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2681 	req.rsp = &rsp;
2682 	cmd.opc = SPDK_NVME_OPC_WRITE;
2683 
2684 	/* Prepare for zcopy */
2685 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2686 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2687 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2688 	CU_ASSERT(ns_info.io_outstanding == 0);
2689 
2690 	/* Perform the zcopy start */
2691 	spdk_nvmf_request_zcopy_start(&req);
2692 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2693 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2694 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2695 	CU_ASSERT(ns_info.io_outstanding == 1);
2696 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2697 
2698 	/* Perform the zcopy end */
2699 	spdk_nvmf_request_zcopy_end(&req, true);
2700 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2701 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2702 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2703 	CU_ASSERT(ns_info.io_outstanding == 0);
2704 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2705 }
2706 
2707 static void
2708 test_nvmf_property_set(void)
2709 {
2710 	int rc;
2711 	struct spdk_nvmf_request req = {};
2712 	struct spdk_nvmf_qpair qpair = {};
2713 	struct spdk_nvmf_ctrlr ctrlr = {};
2714 	union nvmf_h2c_msg cmd = {};
2715 	union nvmf_c2h_msg rsp = {};
2716 
2717 	req.qpair = &qpair;
2718 	qpair.ctrlr = &ctrlr;
2719 	req.cmd = &cmd;
2720 	req.rsp = &rsp;
2721 
2722 	/* Invalid parameters */
2723 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2724 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2725 
2726 	rc = nvmf_property_set(&req);
2727 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2728 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2729 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2730 
2731 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2732 
2733 	rc = nvmf_property_get(&req);
2734 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2735 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2736 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2737 
2738 	/* Set cc with same property size */
2739 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2740 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2741 
2742 	rc = nvmf_property_set(&req);
2743 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2744 
2745 	/* Emulate cc data */
2746 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2747 
2748 	rc = nvmf_property_get(&req);
2749 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2750 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2751 
2752 	/* Set asq with different property size */
2753 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2754 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2755 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2756 
2757 	rc = nvmf_property_set(&req);
2758 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2759 
2760 	/* Emulate asq data */
2761 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2762 
2763 	rc = nvmf_property_get(&req);
2764 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2765 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2766 }
2767 
2768 int main(int argc, char **argv)
2769 {
2770 	CU_pSuite	suite = NULL;
2771 	unsigned int	num_failures;
2772 
2773 	CU_set_error_action(CUEA_ABORT);
2774 	CU_initialize_registry();
2775 
2776 	suite = CU_add_suite("nvmf", NULL, NULL);
2777 	CU_ADD_TEST(suite, test_get_log_page);
2778 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
2779 	CU_ADD_TEST(suite, test_connect);
2780 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
2781 	CU_ADD_TEST(suite, test_identify_ns);
2782 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
2783 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
2784 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
2785 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
2786 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
2787 	CU_ADD_TEST(suite, test_get_dif_ctx);
2788 	CU_ADD_TEST(suite, test_set_get_features);
2789 	CU_ADD_TEST(suite, test_identify_ctrlr);
2790 	CU_ADD_TEST(suite, test_custom_admin_cmd);
2791 	CU_ADD_TEST(suite, test_fused_compare_and_write);
2792 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
2793 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
2794 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
2795 	CU_ADD_TEST(suite, test_multi_async_events);
2796 	CU_ADD_TEST(suite, test_rae);
2797 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
2798 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
2799 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
2800 	CU_ADD_TEST(suite, test_zcopy_read);
2801 	CU_ADD_TEST(suite, test_zcopy_write);
2802 	CU_ADD_TEST(suite, test_nvmf_property_set);
2803 
2804 	allocate_threads(1);
2805 	set_thread(0);
2806 
2807 	CU_basic_set_mode(CU_BRM_VERBOSE);
2808 	CU_basic_run_tests();
2809 	num_failures = CU_get_number_of_failures();
2810 	CU_cleanup_registry();
2811 
2812 	free_threads();
2813 
2814 	return num_failures;
2815 }
2816