xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision f914cd2e166f37c6b59739c14a6391e3c53feaac)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/bdev_zone.h"
8 #include "spdk/nvme_spec.h"
9 #include "spdk/stdinc.h"
10 
11 #include "spdk_internal/cunit.h"
12 #include "spdk_internal/mock.h"
13 #include "thread/thread_internal.h"
14 
15 #include "common/lib/ut_multithread.c"
16 #include "nvmf/ctrlr.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 struct spdk_bdev {
21 	int ut_mock;
22 	uint64_t blockcnt;
23 	uint32_t blocklen;
24 	bool zoned;
25 	uint32_t zone_size;
26 	uint32_t max_open_zones;
27 	uint32_t max_active_zones;
28 };
29 
30 #define MAX_OPEN_ZONES 12
31 #define MAX_ACTIVE_ZONES 34
32 #define ZONE_SIZE 56
33 
34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
36 
37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
39 		0x8877665544332211UL;
40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
41 
42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
43 	    struct spdk_nvmf_subsystem *,
44 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
45 	    NULL);
46 
47 DEFINE_STUB(spdk_nvmf_poll_group_create,
48 	    struct spdk_nvmf_poll_group *,
49 	    (struct spdk_nvmf_tgt *tgt),
50 	    NULL);
51 
52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
53 	    const char *,
54 	    (const struct spdk_nvmf_subsystem *subsystem),
55 	    subsystem_default_sn);
56 
57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
58 	    const char *,
59 	    (const struct spdk_nvmf_subsystem *subsystem),
60 	    subsystem_default_mn);
61 
62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
63 	    bool,
64 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
65 	    true);
66 
67 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
68 	    int,
69 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
70 	    0);
71 
72 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
73 	    struct spdk_nvmf_ctrlr *,
74 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
75 	    NULL);
76 
77 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
78 	    bool,
79 	    (struct spdk_nvmf_ctrlr *ctrlr),
80 	    false);
81 
82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
83 	    bool,
84 	    (struct spdk_nvmf_ctrlr *ctrlr),
85 	    false);
86 
87 DEFINE_STUB(nvmf_ctrlr_copy_supported,
88 	    bool,
89 	    (struct spdk_nvmf_ctrlr *ctrlr),
90 	    false);
91 
92 DEFINE_STUB_V(nvmf_get_discovery_log_page,
93 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
94 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
95 
96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
97 	    int,
98 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
99 	    0);
100 
101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
102 	    bool,
103 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
104 	    true);
105 
106 DEFINE_STUB(nvmf_subsystem_find_listener,
107 	    struct spdk_nvmf_subsystem_listener *,
108 	    (struct spdk_nvmf_subsystem *subsystem,
109 	     const struct spdk_nvme_transport_id *trid),
110 	    (void *)0x1);
111 
112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
113 	    int,
114 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
115 	     struct spdk_nvmf_request *req),
116 	    0);
117 
118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
119 	    int,
120 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
121 	     struct spdk_nvmf_request *req),
122 	    0);
123 
124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
134 	    0);
135 
136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *req),
140 	    0);
141 
142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *req),
146 	    0);
147 
148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
161 	    int,
162 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct spdk_nvmf_request *req),
164 	    0);
165 
166 DEFINE_STUB(nvmf_transport_req_complete,
167 	    int,
168 	    (struct spdk_nvmf_request *req),
169 	    0);
170 
171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
172 
173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
174 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
175 	     struct spdk_dif_ctx *dif_ctx),
176 	    true);
177 
178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
179 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
180 
181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
183 
184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
185 		struct spdk_nvmf_ctrlr *ctrlr));
186 
187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
188 	    int,
189 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
190 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
191 	    0);
192 
193 DEFINE_STUB(nvmf_transport_req_free,
194 	    int,
195 	    (struct spdk_nvmf_request *req),
196 	    0);
197 
198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
199 	    int,
200 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
201 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
202 	    0);
203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
204 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
206 
207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev),
208 	    MAX_ACTIVE_ZONES);
209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES);
210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE);
211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
212 
213 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t,
214 	    (const struct spdk_nvme_ns_data *nsdata), 0);
215 
216 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false);
217 
218 int
219 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
220 {
221 	return 0;
222 }
223 
224 void
225 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
226 			    bool dif_insert_or_strip)
227 {
228 	uint64_t num_blocks;
229 
230 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
231 	num_blocks = ns->bdev->blockcnt;
232 	nsdata->nsze = num_blocks;
233 	nsdata->ncap = num_blocks;
234 	nsdata->nuse = num_blocks;
235 	nsdata->nlbaf = 0;
236 	nsdata->flbas.format = 0;
237 	nsdata->flbas.msb_format = 0;
238 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
239 }
240 
241 struct spdk_nvmf_ns *
242 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
243 {
244 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
245 	return subsystem->ns[0];
246 }
247 
248 struct spdk_nvmf_ns *
249 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
250 				struct spdk_nvmf_ns *prev_ns)
251 {
252 	uint32_t nsid;
253 
254 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
255 	nsid = prev_ns->nsid;
256 
257 	if (nsid >= subsystem->max_nsid) {
258 		return NULL;
259 	}
260 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
261 		if (subsystem->ns[nsid - 1]) {
262 			return subsystem->ns[nsid - 1];
263 		}
264 	}
265 	return NULL;
266 }
267 
268 bool
269 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
270 {
271 	return true;
272 }
273 
274 int
275 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
276 			    struct spdk_bdev_desc *desc,
277 			    struct spdk_io_channel *ch,
278 			    struct spdk_nvmf_request *req)
279 {
280 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
281 	uint64_t start_lba;
282 	uint64_t num_blocks;
283 
284 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
285 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
286 
287 	if ((start_lba + num_blocks) > bdev->blockcnt) {
288 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
289 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
290 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
291 	}
292 
293 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
294 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
295 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
296 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
297 	} else {
298 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
299 	}
300 
301 
302 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
303 }
304 
305 void
306 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
307 {
308 	req->zcopy_bdev_io = NULL;
309 	spdk_nvmf_request_complete(req);
310 }
311 
312 bool
313 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns)
314 {
315 	return ns->ptpl_file != NULL;
316 }
317 
318 static void
319 test_get_log_page(void)
320 {
321 	struct spdk_nvmf_subsystem subsystem = {};
322 	struct spdk_nvmf_request req = {};
323 	struct spdk_nvmf_qpair qpair = {};
324 	struct spdk_nvmf_ctrlr ctrlr = {};
325 	union nvmf_h2c_msg cmd = {};
326 	union nvmf_c2h_msg rsp = {};
327 	char data[4096];
328 
329 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
330 
331 	ctrlr.subsys = &subsystem;
332 
333 	qpair.ctrlr = &ctrlr;
334 
335 	req.qpair = &qpair;
336 	req.cmd = &cmd;
337 	req.rsp = &rsp;
338 	req.length = sizeof(data);
339 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &data, req.length);
340 
341 	/* Get Log Page - all valid */
342 	memset(&cmd, 0, sizeof(cmd));
343 	memset(&rsp, 0, sizeof(rsp));
344 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
345 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
346 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
347 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
348 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
349 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
350 
351 	/* Get Log Page with invalid log ID */
352 	memset(&cmd, 0, sizeof(cmd));
353 	memset(&rsp, 0, sizeof(rsp));
354 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
355 	cmd.nvme_cmd.cdw10 = 0;
356 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
357 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
358 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
359 
360 	/* Get Log Page with invalid offset (not dword aligned) */
361 	memset(&cmd, 0, sizeof(cmd));
362 	memset(&rsp, 0, sizeof(rsp));
363 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
364 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
365 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
366 	cmd.nvme_cmd.cdw12 = 2;
367 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
368 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
369 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
370 
371 	/* Get Log Page without data buffer */
372 	memset(&cmd, 0, sizeof(cmd));
373 	memset(&rsp, 0, sizeof(rsp));
374 	req.iovcnt = 0;
375 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
376 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
377 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
378 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
379 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
380 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
381 }
382 
383 static void
384 test_process_fabrics_cmd(void)
385 {
386 	struct	spdk_nvmf_request req = {};
387 	int	ret;
388 	struct	spdk_nvmf_qpair req_qpair = {};
389 	union	nvmf_h2c_msg  req_cmd = {};
390 	union	nvmf_c2h_msg   req_rsp = {};
391 
392 	req.qpair = &req_qpair;
393 	req.cmd  = &req_cmd;
394 	req.rsp  = &req_rsp;
395 	req.qpair->ctrlr = NULL;
396 
397 	/* No ctrlr and invalid command check */
398 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
399 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
400 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
401 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
402 }
403 
404 static bool
405 nvme_status_success(const struct spdk_nvme_status *status)
406 {
407 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
408 }
409 
410 static void
411 test_connect(void)
412 {
413 	struct spdk_nvmf_fabric_connect_data connect_data;
414 	struct spdk_nvmf_poll_group group;
415 	struct spdk_nvmf_subsystem_poll_group *sgroups;
416 	struct spdk_nvmf_transport transport;
417 	struct spdk_nvmf_transport_ops tops = {};
418 	struct spdk_nvmf_subsystem subsystem;
419 	struct spdk_nvmf_request req;
420 	struct spdk_nvmf_qpair admin_qpair;
421 	struct spdk_nvmf_qpair qpair;
422 	struct spdk_nvmf_ctrlr ctrlr;
423 	struct spdk_nvmf_tgt tgt;
424 	union nvmf_h2c_msg cmd;
425 	union nvmf_c2h_msg rsp;
426 	const uint8_t hostid[16] = {
427 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
428 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
429 	};
430 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
431 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
432 	int rc;
433 
434 	memset(&group, 0, sizeof(group));
435 	group.thread = spdk_get_thread();
436 
437 	memset(&ctrlr, 0, sizeof(ctrlr));
438 	ctrlr.subsys = &subsystem;
439 	ctrlr.qpair_mask = spdk_bit_array_create(3);
440 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
441 	ctrlr.vcprop.cc.bits.en = 1;
442 	ctrlr.vcprop.cc.bits.iosqes = 6;
443 	ctrlr.vcprop.cc.bits.iocqes = 4;
444 
445 	memset(&admin_qpair, 0, sizeof(admin_qpair));
446 	admin_qpair.group = &group;
447 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
448 
449 	memset(&tgt, 0, sizeof(tgt));
450 	memset(&transport, 0, sizeof(transport));
451 	transport.ops = &tops;
452 	transport.opts.max_aq_depth = 32;
453 	transport.opts.max_queue_depth = 64;
454 	transport.opts.max_qpairs_per_ctrlr = 3;
455 	transport.tgt = &tgt;
456 
457 	memset(&qpair, 0, sizeof(qpair));
458 	qpair.transport = &transport;
459 	qpair.group = &group;
460 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
461 	TAILQ_INIT(&qpair.outstanding);
462 
463 	memset(&connect_data, 0, sizeof(connect_data));
464 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
465 	connect_data.cntlid = 0xFFFF;
466 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
467 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
468 
469 	memset(&subsystem, 0, sizeof(subsystem));
470 	subsystem.thread = spdk_get_thread();
471 	subsystem.id = 1;
472 	TAILQ_INIT(&subsystem.ctrlrs);
473 	subsystem.tgt = &tgt;
474 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
475 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
476 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
477 
478 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
479 	group.sgroups = sgroups;
480 
481 	memset(&cmd, 0, sizeof(cmd));
482 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
483 	cmd.connect_cmd.cid = 1;
484 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
485 	cmd.connect_cmd.recfmt = 0;
486 	cmd.connect_cmd.qid = 0;
487 	cmd.connect_cmd.sqsize = 31;
488 	cmd.connect_cmd.cattr = 0;
489 	cmd.connect_cmd.kato = 120000;
490 
491 	memset(&req, 0, sizeof(req));
492 	req.qpair = &qpair;
493 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
494 	req.length = sizeof(connect_data);
495 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
496 	req.cmd = &cmd;
497 	req.rsp = &rsp;
498 
499 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
500 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
501 
502 	/* Valid admin connect command */
503 	memset(&rsp, 0, sizeof(rsp));
504 	sgroups[subsystem.id].mgmt_io_outstanding++;
505 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
506 	rc = nvmf_ctrlr_cmd_connect(&req);
507 	poll_threads();
508 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
509 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
510 	CU_ASSERT(qpair.ctrlr != NULL);
511 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
512 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
513 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
514 	free(qpair.ctrlr);
515 	qpair.ctrlr = NULL;
516 
517 	/* Valid admin connect command with kato = 0 */
518 	cmd.connect_cmd.kato = 0;
519 	memset(&rsp, 0, sizeof(rsp));
520 	sgroups[subsystem.id].mgmt_io_outstanding++;
521 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
522 	rc = nvmf_ctrlr_cmd_connect(&req);
523 	poll_threads();
524 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
525 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
526 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
527 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
528 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
529 	free(qpair.ctrlr);
530 	qpair.ctrlr = NULL;
531 	cmd.connect_cmd.kato = 120000;
532 
533 	/* Invalid data length */
534 	memset(&rsp, 0, sizeof(rsp));
535 	req.length = sizeof(connect_data) - 1;
536 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
537 	rc = nvmf_ctrlr_cmd_connect(&req);
538 	poll_threads();
539 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
540 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
541 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
542 	CU_ASSERT(qpair.ctrlr == NULL);
543 	req.length = sizeof(connect_data);
544 
545 	/* Invalid recfmt */
546 	memset(&rsp, 0, sizeof(rsp));
547 	cmd.connect_cmd.recfmt = 1234;
548 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
549 	rc = nvmf_ctrlr_cmd_connect(&req);
550 	poll_threads();
551 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
552 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
553 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
554 	CU_ASSERT(qpair.ctrlr == NULL);
555 	cmd.connect_cmd.recfmt = 0;
556 
557 	/* Subsystem not found */
558 	memset(&rsp, 0, sizeof(rsp));
559 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
560 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
561 	rc = nvmf_ctrlr_cmd_connect(&req);
562 	poll_threads();
563 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
564 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
565 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
566 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
567 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
568 	CU_ASSERT(qpair.ctrlr == NULL);
569 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
570 
571 	/* Unterminated hostnqn */
572 	memset(&rsp, 0, sizeof(rsp));
573 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
574 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
575 	rc = nvmf_ctrlr_cmd_connect(&req);
576 	poll_threads();
577 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
578 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
579 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
580 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
581 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
582 	CU_ASSERT(qpair.ctrlr == NULL);
583 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
584 
585 	/* Host not allowed */
586 	memset(&rsp, 0, sizeof(rsp));
587 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
588 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
589 	rc = nvmf_ctrlr_cmd_connect(&req);
590 	poll_threads();
591 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
592 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
593 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
594 	CU_ASSERT(qpair.ctrlr == NULL);
595 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
596 
597 	/* Invalid sqsize == 0 */
598 	memset(&rsp, 0, sizeof(rsp));
599 	cmd.connect_cmd.sqsize = 0;
600 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
601 	rc = nvmf_ctrlr_cmd_connect(&req);
602 	poll_threads();
603 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
604 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
605 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
606 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
607 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
608 	CU_ASSERT(qpair.ctrlr == NULL);
609 	cmd.connect_cmd.sqsize = 31;
610 
611 	/* Invalid admin sqsize > max_aq_depth */
612 	memset(&rsp, 0, sizeof(rsp));
613 	cmd.connect_cmd.sqsize = 32;
614 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
615 	rc = nvmf_ctrlr_cmd_connect(&req);
616 	poll_threads();
617 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
618 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
619 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
620 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
621 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
622 	CU_ASSERT(qpair.ctrlr == NULL);
623 	cmd.connect_cmd.sqsize = 31;
624 
625 	/* Invalid I/O sqsize > max_queue_depth */
626 	memset(&rsp, 0, sizeof(rsp));
627 	cmd.connect_cmd.qid = 1;
628 	cmd.connect_cmd.sqsize = 64;
629 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
630 	rc = nvmf_ctrlr_cmd_connect(&req);
631 	poll_threads();
632 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
633 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
634 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
635 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
636 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
637 	CU_ASSERT(qpair.ctrlr == NULL);
638 	cmd.connect_cmd.qid = 0;
639 	cmd.connect_cmd.sqsize = 31;
640 
641 	/* Invalid cntlid for admin queue */
642 	memset(&rsp, 0, sizeof(rsp));
643 	connect_data.cntlid = 0x1234;
644 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
645 	rc = nvmf_ctrlr_cmd_connect(&req);
646 	poll_threads();
647 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
648 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
649 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
650 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
651 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
652 	CU_ASSERT(qpair.ctrlr == NULL);
653 	connect_data.cntlid = 0xFFFF;
654 
655 	ctrlr.admin_qpair = &admin_qpair;
656 	ctrlr.subsys = &subsystem;
657 
658 	/* Valid I/O queue connect command */
659 	memset(&rsp, 0, sizeof(rsp));
660 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
661 	cmd.connect_cmd.qid = 1;
662 	cmd.connect_cmd.sqsize = 63;
663 	sgroups[subsystem.id].mgmt_io_outstanding++;
664 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
665 	rc = nvmf_ctrlr_cmd_connect(&req);
666 	poll_threads();
667 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
668 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
669 	CU_ASSERT(qpair.ctrlr == &ctrlr);
670 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
671 	qpair.ctrlr = NULL;
672 	cmd.connect_cmd.sqsize = 31;
673 
674 	/* Non-existent controller */
675 	memset(&rsp, 0, sizeof(rsp));
676 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
677 	sgroups[subsystem.id].mgmt_io_outstanding++;
678 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
679 	rc = nvmf_ctrlr_cmd_connect(&req);
680 	poll_threads();
681 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
682 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
683 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
684 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
685 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
686 	CU_ASSERT(qpair.ctrlr == NULL);
687 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
688 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
689 
690 	/* I/O connect to discovery controller */
691 	memset(&rsp, 0, sizeof(rsp));
692 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
693 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
694 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
695 	sgroups[subsystem.id].mgmt_io_outstanding++;
696 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
697 	rc = nvmf_ctrlr_cmd_connect(&req);
698 	poll_threads();
699 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
700 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
701 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
702 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
703 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
704 	CU_ASSERT(qpair.ctrlr == NULL);
705 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
706 
707 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
708 	cmd.connect_cmd.qid = 0;
709 	cmd.connect_cmd.kato = 120000;
710 	memset(&rsp, 0, sizeof(rsp));
711 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
712 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
713 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
714 	sgroups[subsystem.id].mgmt_io_outstanding++;
715 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
716 	rc = nvmf_ctrlr_cmd_connect(&req);
717 	poll_threads();
718 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
719 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
720 	CU_ASSERT(qpair.ctrlr != NULL);
721 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
722 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
723 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
724 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
725 	free(qpair.ctrlr);
726 	qpair.ctrlr = NULL;
727 
728 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
729 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
730 	 */
731 	cmd.connect_cmd.kato = 0;
732 	memset(&rsp, 0, sizeof(rsp));
733 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
734 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
735 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
736 	sgroups[subsystem.id].mgmt_io_outstanding++;
737 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
738 	rc = nvmf_ctrlr_cmd_connect(&req);
739 	poll_threads();
740 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
741 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
742 	CU_ASSERT(qpair.ctrlr != NULL);
743 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
744 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
745 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
746 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
747 	free(qpair.ctrlr);
748 	qpair.ctrlr = NULL;
749 	cmd.connect_cmd.qid = 1;
750 	cmd.connect_cmd.kato = 120000;
751 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
752 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, false);
753 
754 	/* I/O connect to disabled controller */
755 	memset(&rsp, 0, sizeof(rsp));
756 	ctrlr.vcprop.cc.bits.en = 0;
757 	sgroups[subsystem.id].mgmt_io_outstanding++;
758 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
759 	rc = nvmf_ctrlr_cmd_connect(&req);
760 	poll_threads();
761 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
762 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
763 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
764 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
765 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
766 	CU_ASSERT(qpair.ctrlr == NULL);
767 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
768 	ctrlr.vcprop.cc.bits.en = 1;
769 
770 	/* I/O connect with invalid IOSQES */
771 	memset(&rsp, 0, sizeof(rsp));
772 	ctrlr.vcprop.cc.bits.iosqes = 3;
773 	sgroups[subsystem.id].mgmt_io_outstanding++;
774 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
775 	rc = nvmf_ctrlr_cmd_connect(&req);
776 	poll_threads();
777 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
778 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
779 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
780 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
781 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
782 	CU_ASSERT(qpair.ctrlr == NULL);
783 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
784 	ctrlr.vcprop.cc.bits.iosqes = 6;
785 
786 	/* I/O connect with invalid IOCQES */
787 	memset(&rsp, 0, sizeof(rsp));
788 	ctrlr.vcprop.cc.bits.iocqes = 3;
789 	sgroups[subsystem.id].mgmt_io_outstanding++;
790 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
791 	rc = nvmf_ctrlr_cmd_connect(&req);
792 	poll_threads();
793 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
794 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
795 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
796 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
797 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
798 	CU_ASSERT(qpair.ctrlr == NULL);
799 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
800 	ctrlr.vcprop.cc.bits.iocqes = 4;
801 
802 	/* I/O connect with qid that is too large */
803 	memset(&rsp, 0, sizeof(rsp));
804 	cmd.connect_cmd.qid = 3;
805 	sgroups[subsystem.id].mgmt_io_outstanding++;
806 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
807 	rc = nvmf_ctrlr_cmd_connect(&req);
808 	poll_threads();
809 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
810 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
811 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
812 	CU_ASSERT(qpair.ctrlr == NULL);
813 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
814 
815 	/* I/O connect with duplicate queue ID */
816 	memset(&rsp, 0, sizeof(rsp));
817 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
818 	cmd.connect_cmd.qid = 1;
819 	sgroups[subsystem.id].mgmt_io_outstanding++;
820 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
821 	rc = nvmf_ctrlr_cmd_connect(&req);
822 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
823 	poll_threads();
824 	/* First time, it will detect duplicate QID and schedule a retry.  So for
825 	 * now we should expect the response to still be all zeroes.
826 	 */
827 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
828 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
829 
830 	/* Now advance the clock, so that the retry poller executes. */
831 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
832 	poll_threads();
833 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
834 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
835 	CU_ASSERT(qpair.ctrlr == NULL);
836 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
837 
838 	/* I/O connect with temporarily duplicate queue ID. This covers race
839 	 * where qpair_mask bit may not yet be cleared, even though initiator
840 	 * has closed the connection.  See issue #2955. */
841 	memset(&rsp, 0, sizeof(rsp));
842 	sgroups[subsystem.id].mgmt_io_outstanding++;
843 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
844 	rc = nvmf_ctrlr_cmd_connect(&req);
845 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
846 	poll_threads();
847 	/* First time, it will detect duplicate QID and schedule a retry.  So for
848 	 * now we should expect the response to still be all zeroes.
849 	 */
850 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
851 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
852 
853 	/* Now advance the clock, so that the retry poller executes. */
854 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
855 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
856 	poll_threads();
857 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
858 	CU_ASSERT(qpair.ctrlr == &ctrlr);
859 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
860 	qpair.ctrlr = NULL;
861 
862 	/* I/O connect when admin qpair is being destroyed */
863 	admin_qpair.group = NULL;
864 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
865 	memset(&rsp, 0, sizeof(rsp));
866 	sgroups[subsystem.id].mgmt_io_outstanding++;
867 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
868 	rc = nvmf_ctrlr_cmd_connect(&req);
869 	poll_threads();
870 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
871 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
872 	CU_ASSERT(qpair.ctrlr == NULL);
873 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
874 	admin_qpair.group = &group;
875 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
876 
877 	/* I/O connect when admin qpair was destroyed */
878 	ctrlr.admin_qpair = NULL;
879 	memset(&rsp, 0, sizeof(rsp));
880 	sgroups[subsystem.id].mgmt_io_outstanding++;
881 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
882 	rc = nvmf_ctrlr_cmd_connect(&req);
883 	poll_threads();
884 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
885 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
886 	CU_ASSERT(qpair.ctrlr == NULL);
887 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
888 	ctrlr.admin_qpair = &admin_qpair;
889 
890 	/* Clean up globals */
891 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
892 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
893 
894 	spdk_bit_array_free(&ctrlr.qpair_mask);
895 	free(sgroups);
896 }
897 
898 static void
899 test_get_ns_id_desc_list(void)
900 {
901 	struct spdk_nvmf_subsystem subsystem;
902 	struct spdk_nvmf_qpair qpair;
903 	struct spdk_nvmf_ctrlr ctrlr;
904 	struct spdk_nvmf_request req;
905 	struct spdk_nvmf_ns *ns_ptrs[1];
906 	struct spdk_nvmf_ns ns;
907 	union nvmf_h2c_msg cmd;
908 	union nvmf_c2h_msg rsp;
909 	struct spdk_bdev bdev;
910 	uint8_t buf[4096];
911 
912 	memset(&subsystem, 0, sizeof(subsystem));
913 	ns_ptrs[0] = &ns;
914 	subsystem.ns = ns_ptrs;
915 	subsystem.max_nsid = 1;
916 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
917 
918 	memset(&ns, 0, sizeof(ns));
919 	ns.opts.nsid = 1;
920 	ns.bdev = &bdev;
921 
922 	memset(&qpair, 0, sizeof(qpair));
923 	qpair.ctrlr = &ctrlr;
924 
925 	memset(&ctrlr, 0, sizeof(ctrlr));
926 	ctrlr.subsys = &subsystem;
927 	ctrlr.vcprop.cc.bits.en = 1;
928 	ctrlr.thread = spdk_get_thread();
929 
930 	memset(&req, 0, sizeof(req));
931 	req.qpair = &qpair;
932 	req.cmd = &cmd;
933 	req.rsp = &rsp;
934 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
935 	req.length = sizeof(buf);
936 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
937 
938 	memset(&cmd, 0, sizeof(cmd));
939 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
940 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
941 
942 	/* Invalid NSID */
943 	cmd.nvme_cmd.nsid = 0;
944 	memset(&rsp, 0, sizeof(rsp));
945 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
946 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
947 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
948 
949 	/* Valid NSID, but ns has no IDs defined */
950 	cmd.nvme_cmd.nsid = 1;
951 	memset(&rsp, 0, sizeof(rsp));
952 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
953 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
954 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
955 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
956 
957 	/* Valid NSID, only EUI64 defined */
958 	ns.opts.eui64[0] = 0x11;
959 	ns.opts.eui64[7] = 0xFF;
960 	memset(&rsp, 0, sizeof(rsp));
961 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
962 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
963 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
964 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
965 	CU_ASSERT(buf[1] == 8);
966 	CU_ASSERT(buf[4] == 0x11);
967 	CU_ASSERT(buf[11] == 0xFF);
968 	CU_ASSERT(buf[13] == 0);
969 
970 	/* Valid NSID, only NGUID defined */
971 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
972 	ns.opts.nguid[0] = 0x22;
973 	ns.opts.nguid[15] = 0xEE;
974 	memset(&rsp, 0, sizeof(rsp));
975 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
976 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
977 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
978 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
979 	CU_ASSERT(buf[1] == 16);
980 	CU_ASSERT(buf[4] == 0x22);
981 	CU_ASSERT(buf[19] == 0xEE);
982 	CU_ASSERT(buf[21] == 0);
983 
984 	/* Valid NSID, both EUI64 and NGUID defined */
985 	ns.opts.eui64[0] = 0x11;
986 	ns.opts.eui64[7] = 0xFF;
987 	ns.opts.nguid[0] = 0x22;
988 	ns.opts.nguid[15] = 0xEE;
989 	memset(&rsp, 0, sizeof(rsp));
990 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
991 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
992 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
993 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
994 	CU_ASSERT(buf[1] == 8);
995 	CU_ASSERT(buf[4] == 0x11);
996 	CU_ASSERT(buf[11] == 0xFF);
997 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
998 	CU_ASSERT(buf[13] == 16);
999 	CU_ASSERT(buf[16] == 0x22);
1000 	CU_ASSERT(buf[31] == 0xEE);
1001 	CU_ASSERT(buf[33] == 0);
1002 
1003 	/* Valid NSID, EUI64, NGUID, and UUID defined */
1004 	ns.opts.eui64[0] = 0x11;
1005 	ns.opts.eui64[7] = 0xFF;
1006 	ns.opts.nguid[0] = 0x22;
1007 	ns.opts.nguid[15] = 0xEE;
1008 	ns.opts.uuid.u.raw[0] = 0x33;
1009 	ns.opts.uuid.u.raw[15] = 0xDD;
1010 	memset(&rsp, 0, sizeof(rsp));
1011 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1012 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1013 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1014 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
1015 	CU_ASSERT(buf[1] == 8);
1016 	CU_ASSERT(buf[4] == 0x11);
1017 	CU_ASSERT(buf[11] == 0xFF);
1018 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
1019 	CU_ASSERT(buf[13] == 16);
1020 	CU_ASSERT(buf[16] == 0x22);
1021 	CU_ASSERT(buf[31] == 0xEE);
1022 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
1023 	CU_ASSERT(buf[33] == 16);
1024 	CU_ASSERT(buf[36] == 0x33);
1025 	CU_ASSERT(buf[51] == 0xDD);
1026 	CU_ASSERT(buf[53] == 0);
1027 }
1028 
1029 static void
1030 test_identify_ns(void)
1031 {
1032 	struct spdk_nvmf_subsystem subsystem = {};
1033 	struct spdk_nvmf_transport transport = {};
1034 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1035 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1036 	struct spdk_nvme_cmd cmd = {};
1037 	struct spdk_nvme_cpl rsp = {};
1038 	struct spdk_nvme_ns_data nsdata = {};
1039 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
1040 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
1041 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1042 
1043 	subsystem.ns = ns_arr;
1044 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1045 
1046 	/* Invalid NSID 0 */
1047 	cmd.nsid = 0;
1048 	memset(&nsdata, 0, sizeof(nsdata));
1049 	memset(&rsp, 0, sizeof(rsp));
1050 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1051 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1052 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1053 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1054 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1055 
1056 	/* Valid NSID 1 */
1057 	cmd.nsid = 1;
1058 	memset(&nsdata, 0, sizeof(nsdata));
1059 	memset(&rsp, 0, sizeof(rsp));
1060 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1061 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1062 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1063 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1064 	CU_ASSERT(nsdata.nsze == 1234);
1065 
1066 	/* Valid but inactive NSID 2 */
1067 	cmd.nsid = 2;
1068 	memset(&nsdata, 0, sizeof(nsdata));
1069 	memset(&rsp, 0, sizeof(rsp));
1070 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1071 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1072 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1073 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1074 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1075 
1076 	/* Valid NSID 3 */
1077 	cmd.nsid = 3;
1078 	memset(&nsdata, 0, sizeof(nsdata));
1079 	memset(&rsp, 0, sizeof(rsp));
1080 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1081 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1082 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1083 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1084 	CU_ASSERT(nsdata.nsze == 5678);
1085 
1086 	/* Invalid NSID 4 */
1087 	cmd.nsid = 4;
1088 	memset(&nsdata, 0, sizeof(nsdata));
1089 	memset(&rsp, 0, sizeof(rsp));
1090 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1091 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1092 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1093 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1094 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1095 
1096 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1097 	cmd.nsid = 0xFFFFFFFF;
1098 	memset(&nsdata, 0, sizeof(nsdata));
1099 	memset(&rsp, 0, sizeof(rsp));
1100 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1101 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1102 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1103 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1104 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1105 }
1106 
1107 static void
1108 test_identify_ns_iocs_specific(void)
1109 {
1110 	struct spdk_nvmf_subsystem subsystem = {};
1111 	struct spdk_nvmf_transport transport = {};
1112 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport };
1113 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1114 	struct spdk_nvme_cmd cmd = {};
1115 	struct spdk_nvme_cpl rsp = {};
1116 	struct spdk_nvme_zns_ns_data nsdata = {};
1117 	struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}};
1118 	struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}};
1119 	struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]};
1120 
1121 	subsystem.ns = ns_arr;
1122 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1123 
1124 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1125 
1126 	/* Invalid ZNS NSID 0 */
1127 	cmd.nsid = 0;
1128 	memset(&nsdata, 0xFF, sizeof(nsdata));
1129 	memset(&rsp, 0, sizeof(rsp));
1130 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1131 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1132 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1133 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1134 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1135 
1136 	/* Valid ZNS NSID 1 */
1137 	cmd.nsid = 1;
1138 	memset(&nsdata, 0xFF, sizeof(nsdata));
1139 	memset(&rsp, 0, sizeof(rsp));
1140 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1141 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1142 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1143 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1144 	CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1);
1145 	CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1);
1146 	CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1);
1147 	CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE);
1148 	nsdata.ozcs.read_across_zone_boundaries = 0;
1149 	nsdata.mar = 0;
1150 	nsdata.mor = 0;
1151 	nsdata.lbafe[0].zsze = 0;
1152 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1153 
1154 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1155 
1156 	/* Valid NVM NSID 2 */
1157 	cmd.nsid = 2;
1158 	memset(&nsdata, 0xFF, sizeof(nsdata));
1159 	memset(&rsp, 0, sizeof(rsp));
1160 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1161 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1162 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1163 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1164 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1165 
1166 	/* Invalid NVM NSID 3 */
1167 	cmd.nsid = 0;
1168 	memset(&nsdata, 0xFF, sizeof(nsdata));
1169 	memset(&rsp, 0, sizeof(rsp));
1170 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1171 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1172 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1173 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1174 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1175 }
1176 
1177 static void
1178 test_set_get_features(void)
1179 {
1180 	struct spdk_nvmf_subsystem subsystem = {};
1181 	struct spdk_nvmf_qpair admin_qpair = {};
1182 	enum spdk_nvme_ana_state ana_state[3];
1183 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1184 	struct spdk_nvmf_ctrlr ctrlr = {
1185 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1186 	};
1187 	union nvmf_h2c_msg cmd = {};
1188 	union nvmf_c2h_msg rsp = {};
1189 	struct spdk_nvmf_ns ns[3];
1190 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1191 	struct spdk_nvmf_request req;
1192 	int rc;
1193 
1194 	ns[0].anagrpid = 1;
1195 	ns[2].anagrpid = 3;
1196 	subsystem.ns = ns_arr;
1197 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1198 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1199 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1200 	admin_qpair.ctrlr = &ctrlr;
1201 	req.qpair = &admin_qpair;
1202 	cmd.nvme_cmd.nsid = 1;
1203 	req.cmd = &cmd;
1204 	req.rsp = &rsp;
1205 
1206 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1207 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1208 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1209 	ns[0].ptpl_file = "testcfg";
1210 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1211 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1212 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1213 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1214 	CU_ASSERT(ns[0].ptpl_activated == true);
1215 
1216 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1217 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1218 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1219 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1220 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1221 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1222 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1223 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1224 
1225 
1226 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1227 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1228 	cmd.nvme_cmd.cdw11 = 0x42;
1229 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1230 
1231 	rc = nvmf_ctrlr_get_features(&req);
1232 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1233 
1234 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1235 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1236 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1237 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1238 
1239 	rc = nvmf_ctrlr_get_features(&req);
1240 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1241 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1242 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1243 
1244 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1245 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1246 	cmd.nvme_cmd.cdw11 = 0x42;
1247 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1248 
1249 	rc = nvmf_ctrlr_set_features(&req);
1250 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1251 
1252 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1253 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1254 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1255 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1256 
1257 	rc = nvmf_ctrlr_set_features(&req);
1258 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1259 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1260 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1261 
1262 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1263 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1264 	cmd.nvme_cmd.cdw11 = 0x42;
1265 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1266 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1267 
1268 	rc = nvmf_ctrlr_set_features(&req);
1269 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1270 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1271 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1272 
1273 
1274 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1275 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1276 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1277 
1278 	rc = nvmf_ctrlr_get_features(&req);
1279 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1280 
1281 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1282 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1283 	cmd.nvme_cmd.cdw11 = 0x42;
1284 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1285 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1286 
1287 	rc = nvmf_ctrlr_set_features(&req);
1288 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1289 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1290 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1291 
1292 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1293 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1294 	cmd.nvme_cmd.cdw11 = 0x42;
1295 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1296 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1297 
1298 	rc = nvmf_ctrlr_set_features(&req);
1299 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1300 }
1301 
1302 /*
1303  * Reservation Unit Test Configuration
1304  *       --------             --------    --------
1305  *      | Host A |           | Host B |  | Host C |
1306  *       --------             --------    --------
1307  *      /        \               |           |
1308  *  --------   --------       -------     -------
1309  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1310  *  --------   --------       -------     -------
1311  *    \           \              /           /
1312  *     \           \            /           /
1313  *      \           \          /           /
1314  *      --------------------------------------
1315  *     |            NAMESPACE 1               |
1316  *      --------------------------------------
1317  */
1318 
1319 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1320 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1321 
1322 static void
1323 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1324 {
1325 	/* Host A has two controllers */
1326 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1327 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1328 
1329 	/* Host B has 1 controller */
1330 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1331 
1332 	/* Host C has 1 controller */
1333 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1334 
1335 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1336 	g_ns_info.rtype = rtype;
1337 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1338 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1339 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1340 }
1341 
1342 static void
1343 test_reservation_write_exclusive(void)
1344 {
1345 	struct spdk_nvmf_request req = {};
1346 	union nvmf_h2c_msg cmd = {};
1347 	union nvmf_c2h_msg rsp = {};
1348 	int rc;
1349 
1350 	req.cmd = &cmd;
1351 	req.rsp = &rsp;
1352 
1353 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1354 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1355 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1356 
1357 	/* Test Case: Issue a Read command from Host A and Host B */
1358 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1359 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1360 	SPDK_CU_ASSERT_FATAL(rc == 0);
1361 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1362 	SPDK_CU_ASSERT_FATAL(rc == 0);
1363 
1364 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1365 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1366 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1367 	SPDK_CU_ASSERT_FATAL(rc == 0);
1368 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1369 	SPDK_CU_ASSERT_FATAL(rc < 0);
1370 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1371 
1372 	/* Test Case: Issue a Write command from Host C */
1373 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1374 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1375 	SPDK_CU_ASSERT_FATAL(rc < 0);
1376 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1377 
1378 	/* Test Case: Issue a Read command from Host B */
1379 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1380 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1381 	SPDK_CU_ASSERT_FATAL(rc == 0);
1382 
1383 	/* Unregister Host C */
1384 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1385 
1386 	/* Test Case: Read and Write commands from non-registrant Host C */
1387 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1388 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1389 	SPDK_CU_ASSERT_FATAL(rc < 0);
1390 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1391 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1392 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1393 	SPDK_CU_ASSERT_FATAL(rc == 0);
1394 }
1395 
1396 static void
1397 test_reservation_exclusive_access(void)
1398 {
1399 	struct spdk_nvmf_request req = {};
1400 	union nvmf_h2c_msg cmd = {};
1401 	union nvmf_c2h_msg rsp = {};
1402 	int rc;
1403 
1404 	req.cmd = &cmd;
1405 	req.rsp = &rsp;
1406 
1407 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1408 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1409 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1410 
1411 	/* Test Case: Issue a Read command from Host B */
1412 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1413 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1414 	SPDK_CU_ASSERT_FATAL(rc < 0);
1415 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1416 
1417 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1418 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1419 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1420 	SPDK_CU_ASSERT_FATAL(rc == 0);
1421 }
1422 
1423 static void
1424 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1425 {
1426 	struct spdk_nvmf_request req = {};
1427 	union nvmf_h2c_msg cmd = {};
1428 	union nvmf_c2h_msg rsp = {};
1429 	int rc;
1430 
1431 	req.cmd = &cmd;
1432 	req.rsp = &rsp;
1433 
1434 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1435 	ut_reservation_init(rtype);
1436 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1437 
1438 	/* Test Case: Issue a Read command from Host A and Host C */
1439 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1440 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1441 	SPDK_CU_ASSERT_FATAL(rc == 0);
1442 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1443 	SPDK_CU_ASSERT_FATAL(rc == 0);
1444 
1445 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1446 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1447 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1448 	SPDK_CU_ASSERT_FATAL(rc == 0);
1449 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1450 	SPDK_CU_ASSERT_FATAL(rc == 0);
1451 
1452 	/* Unregister Host C */
1453 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1454 
1455 	/* Test Case: Read and Write commands from non-registrant Host C */
1456 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1457 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1458 	SPDK_CU_ASSERT_FATAL(rc == 0);
1459 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1460 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1461 	SPDK_CU_ASSERT_FATAL(rc < 0);
1462 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1463 }
1464 
1465 static void
1466 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1467 {
1468 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1469 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1470 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1471 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1472 }
1473 
1474 static void
1475 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1476 {
1477 	struct spdk_nvmf_request req = {};
1478 	union nvmf_h2c_msg cmd = {};
1479 	union nvmf_c2h_msg rsp = {};
1480 	int rc;
1481 
1482 	req.cmd = &cmd;
1483 	req.rsp = &rsp;
1484 
1485 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1486 	ut_reservation_init(rtype);
1487 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1488 
1489 	/* Test Case: Issue a Write command from Host B */
1490 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1491 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1492 	SPDK_CU_ASSERT_FATAL(rc == 0);
1493 
1494 	/* Unregister Host B */
1495 	spdk_uuid_set_null(&g_ns_info.reg_hostid[1]);
1496 
1497 	/* Test Case: Issue a Read command from Host B */
1498 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1499 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1500 	SPDK_CU_ASSERT_FATAL(rc < 0);
1501 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1502 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1503 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1504 	SPDK_CU_ASSERT_FATAL(rc < 0);
1505 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1506 }
1507 
1508 static void
1509 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1510 {
1511 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1512 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1513 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1514 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1515 }
1516 
1517 static void
1518 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1519 {
1520 	STAILQ_INIT(&ctrlr->async_events);
1521 }
1522 
1523 static void
1524 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1525 {
1526 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1527 
1528 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1529 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1530 		free(event);
1531 	}
1532 }
1533 
1534 static int
1535 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1536 {
1537 	int num = 0;
1538 	struct spdk_nvmf_async_event_completion *event;
1539 
1540 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1541 		num++;
1542 	}
1543 	return num;
1544 }
1545 
1546 static void
1547 test_reservation_notification_log_page(void)
1548 {
1549 	struct spdk_nvmf_ctrlr ctrlr;
1550 	struct spdk_nvmf_qpair qpair;
1551 	struct spdk_nvmf_ns ns;
1552 	struct spdk_nvmf_request req = {};
1553 	union nvmf_h2c_msg cmd = {};
1554 	union nvmf_c2h_msg rsp = {};
1555 	union spdk_nvme_async_event_completion event = {};
1556 	struct spdk_nvme_reservation_notification_log logs[3];
1557 	struct iovec iov;
1558 
1559 	memset(&ctrlr, 0, sizeof(ctrlr));
1560 	ctrlr.thread = spdk_get_thread();
1561 	TAILQ_INIT(&ctrlr.log_head);
1562 	init_pending_async_events(&ctrlr);
1563 	ns.nsid = 1;
1564 
1565 	/* Test Case: Mask all the reservation notifications */
1566 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1567 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1568 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1569 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1570 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1571 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1572 					  SPDK_NVME_RESERVATION_RELEASED);
1573 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1574 					  SPDK_NVME_RESERVATION_PREEMPTED);
1575 	poll_threads();
1576 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1577 
1578 	/* Test Case: Unmask all the reservation notifications,
1579 	 * 3 log pages are generated, and AER was triggered.
1580 	 */
1581 	ns.mask = 0;
1582 	ctrlr.num_avail_log_pages = 0;
1583 	req.cmd = &cmd;
1584 	req.rsp = &rsp;
1585 	ctrlr.aer_req[0] = &req;
1586 	ctrlr.nr_aer_reqs = 1;
1587 	req.qpair = &qpair;
1588 	TAILQ_INIT(&qpair.outstanding);
1589 	qpair.ctrlr = NULL;
1590 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1591 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1592 
1593 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1594 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1595 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1596 					  SPDK_NVME_RESERVATION_RELEASED);
1597 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1598 					  SPDK_NVME_RESERVATION_PREEMPTED);
1599 	poll_threads();
1600 	event.raw = rsp.nvme_cpl.cdw0;
1601 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1602 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1603 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1604 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1605 
1606 	/* Test Case: Get Log Page to clear the log pages */
1607 	iov.iov_base = &logs[0];
1608 	iov.iov_len = sizeof(logs);
1609 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1610 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1611 
1612 	cleanup_pending_async_events(&ctrlr);
1613 }
1614 
1615 static void
1616 test_get_dif_ctx(void)
1617 {
1618 	struct spdk_nvmf_subsystem subsystem = {};
1619 	struct spdk_nvmf_request req = {};
1620 	struct spdk_nvmf_qpair qpair = {};
1621 	struct spdk_nvmf_ctrlr ctrlr = {};
1622 	struct spdk_nvmf_ns ns = {};
1623 	struct spdk_nvmf_ns *_ns = NULL;
1624 	struct spdk_bdev bdev = {};
1625 	union nvmf_h2c_msg cmd = {};
1626 	struct spdk_dif_ctx dif_ctx = {};
1627 	bool ret;
1628 
1629 	ctrlr.subsys = &subsystem;
1630 
1631 	qpair.ctrlr = &ctrlr;
1632 
1633 	req.qpair = &qpair;
1634 	req.cmd = &cmd;
1635 
1636 	ns.bdev = &bdev;
1637 
1638 	ctrlr.dif_insert_or_strip = false;
1639 
1640 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1641 	CU_ASSERT(ret == false);
1642 
1643 	ctrlr.dif_insert_or_strip = true;
1644 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1645 
1646 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1647 	CU_ASSERT(ret == false);
1648 
1649 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1650 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1651 
1652 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1653 	CU_ASSERT(ret == false);
1654 
1655 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1656 
1657 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1658 	CU_ASSERT(ret == false);
1659 
1660 	qpair.qid = 1;
1661 
1662 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1663 	CU_ASSERT(ret == false);
1664 
1665 	cmd.nvme_cmd.nsid = 1;
1666 
1667 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1668 	CU_ASSERT(ret == false);
1669 
1670 	subsystem.max_nsid = 1;
1671 	subsystem.ns = &_ns;
1672 	subsystem.ns[0] = &ns;
1673 
1674 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1675 	CU_ASSERT(ret == false);
1676 
1677 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1678 
1679 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1680 	CU_ASSERT(ret == true);
1681 }
1682 
1683 static void
1684 test_identify_ctrlr(void)
1685 {
1686 	struct spdk_nvmf_tgt tgt = {};
1687 	struct spdk_nvmf_subsystem subsystem = {
1688 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1689 		.tgt = &tgt,
1690 	};
1691 	struct spdk_nvmf_transport_ops tops = {};
1692 	struct spdk_nvmf_transport transport = {
1693 		.ops = &tops,
1694 		.opts = {
1695 			.in_capsule_data_size = 4096,
1696 		},
1697 	};
1698 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1699 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1700 	struct spdk_nvme_ctrlr_data cdata = {};
1701 	uint32_t expected_ioccsz;
1702 
1703 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1704 
1705 	/* Check ioccsz, TCP transport */
1706 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1707 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1708 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1709 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1710 
1711 	/* Check ioccsz, RDMA transport */
1712 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1713 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1714 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1715 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1716 
1717 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1718 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1719 	ctrlr.dif_insert_or_strip = true;
1720 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1721 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1722 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1723 }
1724 
1725 static void
1726 test_identify_ctrlr_iocs_specific(void)
1727 {
1728 	struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 };
1729 	struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 };
1730 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop };
1731 	struct spdk_nvme_cmd cmd = {};
1732 	struct spdk_nvme_cpl rsp = {};
1733 	struct spdk_nvme_zns_ctrlr_data ctrlr_data = {};
1734 	struct spdk_nvme_nvm_ctrlr_data cdata_nvm = {};
1735 
1736 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1737 
1738 	/* ZNS max_zone_append_size_kib no limit */
1739 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1740 	memset(&rsp, 0, sizeof(rsp));
1741 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1742 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1743 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1744 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1745 	CU_ASSERT(ctrlr_data.zasl == 0);
1746 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1747 
1748 	/* ZNS max_zone_append_size_kib = 4096 */
1749 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1750 	memset(&rsp, 0, sizeof(rsp));
1751 	subsystem.max_zone_append_size_kib = 4096;
1752 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1753 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1754 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1755 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1756 	CU_ASSERT(ctrlr_data.zasl == 0);
1757 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1758 
1759 	/* ZNS max_zone_append_size_kib = 60000 */
1760 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1761 	memset(&rsp, 0, sizeof(rsp));
1762 	subsystem.max_zone_append_size_kib = 60000;
1763 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1764 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1765 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1766 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1767 	CU_ASSERT(ctrlr_data.zasl == 3);
1768 	ctrlr_data.zasl = 0;
1769 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1770 
1771 	/* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */
1772 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1773 	memset(&rsp, 0, sizeof(rsp));
1774 	ctrlr.vcprop.cap.bits.mpsmin = 2;
1775 	subsystem.max_zone_append_size_kib = 60000;
1776 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1777 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1778 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1779 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1780 	CU_ASSERT(ctrlr_data.zasl == 1);
1781 	ctrlr_data.zasl = 0;
1782 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1783 	ctrlr.vcprop.cap.bits.mpsmin = 0;
1784 
1785 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1786 
1787 	/* NVM max_discard_size_kib = 1024;
1788 	 * max_write_zeroes_size_kib = 1024;
1789 	 * mpsmin = 0;
1790 	 */
1791 	memset(&cdata_nvm, 0xFF, sizeof(cdata_nvm));
1792 	memset(&rsp, 0, sizeof(rsp));
1793 	subsystem.max_discard_size_kib = (uint64_t)1024;
1794 	subsystem.max_write_zeroes_size_kib = (uint64_t)1024;
1795 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1796 			&cdata_nvm, sizeof(cdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1797 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1798 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1799 	CU_ASSERT(cdata_nvm.wzsl == 8);
1800 	CU_ASSERT(cdata_nvm.dmrsl == 2048);
1801 	CU_ASSERT(cdata_nvm.dmrl == 1);
1802 }
1803 
1804 static int
1805 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1806 {
1807 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1808 
1809 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1810 };
1811 
1812 static void
1813 test_custom_admin_cmd(void)
1814 {
1815 	struct spdk_nvmf_subsystem subsystem;
1816 	struct spdk_nvmf_qpair qpair;
1817 	struct spdk_nvmf_ctrlr ctrlr;
1818 	struct spdk_nvmf_request req;
1819 	struct spdk_nvmf_ns *ns_ptrs[1];
1820 	struct spdk_nvmf_ns ns;
1821 	union nvmf_h2c_msg cmd;
1822 	union nvmf_c2h_msg rsp;
1823 	struct spdk_bdev bdev;
1824 	uint8_t buf[4096];
1825 	int rc;
1826 
1827 	memset(&subsystem, 0, sizeof(subsystem));
1828 	ns_ptrs[0] = &ns;
1829 	subsystem.ns = ns_ptrs;
1830 	subsystem.max_nsid = 1;
1831 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1832 
1833 	memset(&ns, 0, sizeof(ns));
1834 	ns.opts.nsid = 1;
1835 	ns.bdev = &bdev;
1836 
1837 	memset(&qpair, 0, sizeof(qpair));
1838 	qpair.ctrlr = &ctrlr;
1839 
1840 	memset(&ctrlr, 0, sizeof(ctrlr));
1841 	ctrlr.subsys = &subsystem;
1842 	ctrlr.vcprop.cc.bits.en = 1;
1843 	ctrlr.thread = spdk_get_thread();
1844 
1845 	memset(&req, 0, sizeof(req));
1846 	req.qpair = &qpair;
1847 	req.cmd = &cmd;
1848 	req.rsp = &rsp;
1849 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1850 	req.length = sizeof(buf);
1851 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
1852 
1853 	memset(&cmd, 0, sizeof(cmd));
1854 	cmd.nvme_cmd.opc = 0xc1;
1855 	cmd.nvme_cmd.nsid = 0;
1856 	memset(&rsp, 0, sizeof(rsp));
1857 
1858 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1859 
1860 	/* Ensure that our hdlr is being called */
1861 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1862 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1863 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1864 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1865 }
1866 
1867 static void
1868 test_fused_compare_and_write(void)
1869 {
1870 	struct spdk_nvmf_request req = {};
1871 	struct spdk_nvmf_qpair qpair = {};
1872 	struct spdk_nvme_cmd cmd = {};
1873 	union nvmf_c2h_msg rsp = {};
1874 	struct spdk_nvmf_ctrlr ctrlr = {};
1875 	struct spdk_nvmf_subsystem subsystem = {};
1876 	struct spdk_nvmf_ns ns = {};
1877 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1878 	enum spdk_nvme_ana_state ana_state[1];
1879 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1880 	struct spdk_bdev bdev = {};
1881 
1882 	struct spdk_nvmf_poll_group group = {};
1883 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1884 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1885 	struct spdk_io_channel io_ch = {};
1886 
1887 	ns.bdev = &bdev;
1888 	ns.anagrpid = 1;
1889 
1890 	subsystem.id = 0;
1891 	subsystem.max_nsid = 1;
1892 	subsys_ns[0] = &ns;
1893 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1894 
1895 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1896 
1897 	/* Enable controller */
1898 	ctrlr.vcprop.cc.bits.en = 1;
1899 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1900 	ctrlr.listener = &listener;
1901 
1902 	group.num_sgroups = 1;
1903 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1904 	sgroups.num_ns = 1;
1905 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1906 	ns_info.channel = &io_ch;
1907 	sgroups.ns_info = &ns_info;
1908 	TAILQ_INIT(&sgroups.queued);
1909 	group.sgroups = &sgroups;
1910 	TAILQ_INIT(&qpair.outstanding);
1911 
1912 	qpair.ctrlr = &ctrlr;
1913 	qpair.group = &group;
1914 	qpair.qid = 1;
1915 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1916 
1917 	cmd.nsid = 1;
1918 
1919 	req.qpair = &qpair;
1920 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1921 	req.rsp = &rsp;
1922 
1923 	/* SUCCESS/SUCCESS */
1924 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1925 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1926 
1927 	spdk_nvmf_request_exec(&req);
1928 	CU_ASSERT(qpair.first_fused_req != NULL);
1929 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1930 
1931 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1932 	cmd.opc = SPDK_NVME_OPC_WRITE;
1933 
1934 	spdk_nvmf_request_exec(&req);
1935 	CU_ASSERT(qpair.first_fused_req == NULL);
1936 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1937 
1938 	/* Wrong sequence */
1939 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1940 	cmd.opc = SPDK_NVME_OPC_WRITE;
1941 
1942 	spdk_nvmf_request_exec(&req);
1943 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1944 	CU_ASSERT(qpair.first_fused_req == NULL);
1945 
1946 	/* Write as FUSE_FIRST (Wrong op code) */
1947 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1948 	cmd.opc = SPDK_NVME_OPC_WRITE;
1949 
1950 	spdk_nvmf_request_exec(&req);
1951 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1952 	CU_ASSERT(qpair.first_fused_req == NULL);
1953 
1954 	/* Compare as FUSE_SECOND (Wrong op code) */
1955 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1956 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1957 
1958 	spdk_nvmf_request_exec(&req);
1959 	CU_ASSERT(qpair.first_fused_req != NULL);
1960 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1961 
1962 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1963 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1964 
1965 	spdk_nvmf_request_exec(&req);
1966 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1967 	CU_ASSERT(qpair.first_fused_req == NULL);
1968 }
1969 
1970 static void
1971 test_multi_async_event_reqs(void)
1972 {
1973 	struct spdk_nvmf_subsystem subsystem = {};
1974 	struct spdk_nvmf_qpair qpair = {};
1975 	struct spdk_nvmf_ctrlr ctrlr = {};
1976 	struct spdk_nvmf_request req[5] = {};
1977 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1978 	struct spdk_nvmf_ns ns = {};
1979 	union nvmf_h2c_msg cmd[5] = {};
1980 	union nvmf_c2h_msg rsp[5] = {};
1981 
1982 	struct spdk_nvmf_poll_group group = {};
1983 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1984 
1985 	int i;
1986 
1987 	ns_ptrs[0] = &ns;
1988 	subsystem.ns = ns_ptrs;
1989 	subsystem.max_nsid = 1;
1990 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1991 
1992 	ns.opts.nsid = 1;
1993 	group.sgroups = &sgroups;
1994 
1995 	qpair.ctrlr = &ctrlr;
1996 	qpair.group = &group;
1997 	TAILQ_INIT(&qpair.outstanding);
1998 
1999 	ctrlr.subsys = &subsystem;
2000 	ctrlr.vcprop.cc.bits.en = 1;
2001 	ctrlr.thread = spdk_get_thread();
2002 
2003 	for (i = 0; i < 5; i++) {
2004 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2005 		cmd[i].nvme_cmd.nsid = 1;
2006 		cmd[i].nvme_cmd.cid = i;
2007 
2008 		req[i].qpair = &qpair;
2009 		req[i].cmd = &cmd[i];
2010 		req[i].rsp = &rsp[i];
2011 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2012 	}
2013 
2014 	/* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */
2015 	sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS;
2016 	for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
2017 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2018 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
2019 	}
2020 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2021 
2022 	/* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */
2023 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2024 	CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS);
2025 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
2026 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
2027 
2028 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
2029 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
2030 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2031 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2032 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
2033 
2034 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
2035 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2036 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2037 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
2038 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
2039 
2040 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
2041 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
2042 }
2043 
2044 static void
2045 test_get_ana_log_page_one_ns_per_anagrp(void)
2046 {
2047 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
2048 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
2049 	uint32_t ana_group[3];
2050 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
2051 	struct spdk_nvmf_ctrlr ctrlr = {};
2052 	enum spdk_nvme_ana_state ana_state[3];
2053 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2054 	struct spdk_nvmf_ns ns[3];
2055 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
2056 	uint64_t offset;
2057 	uint32_t length;
2058 	int i;
2059 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2060 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2061 	struct iovec iov, iovs[2];
2062 	struct spdk_nvme_ana_page *ana_hdr;
2063 	char _ana_desc[UT_ANA_DESC_SIZE];
2064 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2065 
2066 	subsystem.ns = ns_arr;
2067 	subsystem.max_nsid = 3;
2068 	for (i = 0; i < 3; i++) {
2069 		subsystem.ana_group[i] = 1;
2070 	}
2071 	ctrlr.subsys = &subsystem;
2072 	ctrlr.listener = &listener;
2073 
2074 	for (i = 0; i < 3; i++) {
2075 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2076 	}
2077 
2078 	for (i = 0; i < 3; i++) {
2079 		ns_arr[i]->nsid = i + 1;
2080 		ns_arr[i]->anagrpid = i + 1;
2081 	}
2082 
2083 	/* create expected page */
2084 	ana_hdr = (void *)&expected_page[0];
2085 	ana_hdr->num_ana_group_desc = 3;
2086 	ana_hdr->change_count = 0;
2087 
2088 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2089 	ana_desc = (void *)_ana_desc;
2090 	offset = sizeof(struct spdk_nvme_ana_page);
2091 
2092 	for (i = 0; i < 3; i++) {
2093 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
2094 		ana_desc->ana_group_id = ns_arr[i]->nsid;
2095 		ana_desc->num_of_nsid = 1;
2096 		ana_desc->change_count = 0;
2097 		ana_desc->ana_state = ctrlr.listener->ana_state[i];
2098 		ana_desc->nsid[0] = ns_arr[i]->nsid;
2099 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
2100 		offset += UT_ANA_DESC_SIZE;
2101 	}
2102 
2103 	/* read entire actual log page */
2104 	offset = 0;
2105 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2106 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2107 		iov.iov_base = &actual_page[offset];
2108 		iov.iov_len = length;
2109 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2110 		offset += length;
2111 	}
2112 
2113 	/* compare expected page and actual page */
2114 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2115 
2116 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2117 	offset = 0;
2118 	iovs[0].iov_base = &actual_page[offset];
2119 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2120 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2121 	iovs[1].iov_base = &actual_page[offset];
2122 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
2123 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2124 
2125 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2126 
2127 #undef UT_ANA_DESC_SIZE
2128 #undef UT_ANA_LOG_PAGE_SIZE
2129 }
2130 
2131 static void
2132 test_get_ana_log_page_multi_ns_per_anagrp(void)
2133 {
2134 #define UT_ANA_LOG_PAGE_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
2135 				 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2136 				 sizeof(uint32_t) * 5)
2137 	struct spdk_nvmf_ns ns[5];
2138 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
2139 	uint32_t ana_group[5] = {0};
2140 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
2141 	enum spdk_nvme_ana_state ana_state[5];
2142 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
2143 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
2144 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2145 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2146 	struct iovec iov, iovs[2];
2147 	struct spdk_nvme_ana_page *ana_hdr;
2148 	char _ana_desc[UT_ANA_LOG_PAGE_SIZE];
2149 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2150 	uint64_t offset;
2151 	uint32_t length;
2152 	int i;
2153 
2154 	subsystem.max_nsid = 5;
2155 	subsystem.ana_group[1] = 3;
2156 	subsystem.ana_group[2] = 2;
2157 	for (i = 0; i < 5; i++) {
2158 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2159 	}
2160 
2161 	for (i = 0; i < 5; i++) {
2162 		ns_arr[i]->nsid = i + 1;
2163 	}
2164 	ns_arr[0]->anagrpid = 2;
2165 	ns_arr[1]->anagrpid = 3;
2166 	ns_arr[2]->anagrpid = 2;
2167 	ns_arr[3]->anagrpid = 3;
2168 	ns_arr[4]->anagrpid = 2;
2169 
2170 	/* create expected page */
2171 	ana_hdr = (void *)&expected_page[0];
2172 	ana_hdr->num_ana_group_desc = 2;
2173 	ana_hdr->change_count = 0;
2174 
2175 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2176 	ana_desc = (void *)_ana_desc;
2177 	offset = sizeof(struct spdk_nvme_ana_page);
2178 
2179 	memset(_ana_desc, 0, sizeof(_ana_desc));
2180 	ana_desc->ana_group_id = 2;
2181 	ana_desc->num_of_nsid = 3;
2182 	ana_desc->change_count = 0;
2183 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2184 	ana_desc->nsid[0] = 1;
2185 	ana_desc->nsid[1] = 3;
2186 	ana_desc->nsid[2] = 5;
2187 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2188 	       sizeof(uint32_t) * 3);
2189 	offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3;
2190 
2191 	memset(_ana_desc, 0, sizeof(_ana_desc));
2192 	ana_desc->ana_group_id = 3;
2193 	ana_desc->num_of_nsid = 2;
2194 	ana_desc->change_count = 0;
2195 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2196 	ana_desc->nsid[0] = 2;
2197 	ana_desc->nsid[1] = 4;
2198 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2199 	       sizeof(uint32_t) * 2);
2200 
2201 	/* read entire actual log page, and compare expected page and actual page. */
2202 	offset = 0;
2203 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2204 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2205 		iov.iov_base = &actual_page[offset];
2206 		iov.iov_len = length;
2207 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2208 		offset += length;
2209 	}
2210 
2211 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2212 
2213 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2214 	offset = 0;
2215 	iovs[0].iov_base = &actual_page[offset];
2216 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2217 	offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2218 	iovs[1].iov_base = &actual_page[offset];
2219 	iovs[1].iov_len = sizeof(uint32_t) * 5;
2220 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2221 
2222 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2223 
2224 #undef UT_ANA_LOG_PAGE_SIZE
2225 }
2226 static void
2227 test_multi_async_events(void)
2228 {
2229 	struct spdk_nvmf_subsystem subsystem = {};
2230 	struct spdk_nvmf_qpair qpair = {};
2231 	struct spdk_nvmf_ctrlr ctrlr = {};
2232 	struct spdk_nvmf_request req[4] = {};
2233 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2234 	struct spdk_nvmf_ns ns = {};
2235 	union nvmf_h2c_msg cmd[4] = {};
2236 	union nvmf_c2h_msg rsp[4] = {};
2237 	union spdk_nvme_async_event_completion event = {};
2238 	struct spdk_nvmf_poll_group group = {};
2239 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2240 	int i;
2241 
2242 	ns_ptrs[0] = &ns;
2243 	subsystem.ns = ns_ptrs;
2244 	subsystem.max_nsid = 1;
2245 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2246 
2247 	ns.opts.nsid = 1;
2248 	group.sgroups = &sgroups;
2249 
2250 	qpair.ctrlr = &ctrlr;
2251 	qpair.group = &group;
2252 	TAILQ_INIT(&qpair.outstanding);
2253 
2254 	ctrlr.subsys = &subsystem;
2255 	ctrlr.vcprop.cc.bits.en = 1;
2256 	ctrlr.thread = spdk_get_thread();
2257 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2258 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2259 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2260 	init_pending_async_events(&ctrlr);
2261 
2262 	/* Target queue pending events when there is no outstanding AER request */
2263 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2264 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2265 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2266 
2267 	for (i = 0; i < 4; i++) {
2268 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2269 		cmd[i].nvme_cmd.nsid = 1;
2270 		cmd[i].nvme_cmd.cid = i;
2271 
2272 		req[i].qpair = &qpair;
2273 		req[i].cmd = &cmd[i];
2274 		req[i].rsp = &rsp[i];
2275 
2276 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2277 
2278 		sgroups.mgmt_io_outstanding = 1;
2279 		if (i < 3) {
2280 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2281 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2282 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2283 		} else {
2284 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2285 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2286 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2287 		}
2288 	}
2289 
2290 	event.raw = rsp[0].nvme_cpl.cdw0;
2291 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2292 	event.raw = rsp[1].nvme_cpl.cdw0;
2293 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2294 	event.raw = rsp[2].nvme_cpl.cdw0;
2295 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2296 
2297 	cleanup_pending_async_events(&ctrlr);
2298 }
2299 
2300 static void
2301 test_rae(void)
2302 {
2303 	struct spdk_nvmf_subsystem subsystem = {};
2304 	struct spdk_nvmf_qpair qpair = {};
2305 	struct spdk_nvmf_ctrlr ctrlr = {};
2306 	struct spdk_nvmf_request req[3] = {};
2307 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2308 	struct spdk_nvmf_ns ns = {};
2309 	union nvmf_h2c_msg cmd[3] = {};
2310 	union nvmf_c2h_msg rsp[3] = {};
2311 	union spdk_nvme_async_event_completion event = {};
2312 	struct spdk_nvmf_poll_group group = {};
2313 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2314 	int i;
2315 	char data[4096];
2316 
2317 	ns_ptrs[0] = &ns;
2318 	subsystem.ns = ns_ptrs;
2319 	subsystem.max_nsid = 1;
2320 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2321 
2322 	ns.opts.nsid = 1;
2323 	group.sgroups = &sgroups;
2324 
2325 	qpair.ctrlr = &ctrlr;
2326 	qpair.group = &group;
2327 	TAILQ_INIT(&qpair.outstanding);
2328 
2329 	ctrlr.subsys = &subsystem;
2330 	ctrlr.vcprop.cc.bits.en = 1;
2331 	ctrlr.thread = spdk_get_thread();
2332 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2333 	init_pending_async_events(&ctrlr);
2334 
2335 	/* Target queue pending events when there is no outstanding AER request */
2336 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2337 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2338 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2339 	/* only one event will be queued before RAE is clear */
2340 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2341 
2342 	req[0].qpair = &qpair;
2343 	req[0].cmd = &cmd[0];
2344 	req[0].rsp = &rsp[0];
2345 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2346 	cmd[0].nvme_cmd.nsid = 1;
2347 	cmd[0].nvme_cmd.cid = 0;
2348 
2349 	for (i = 1; i < 3; i++) {
2350 		req[i].qpair = &qpair;
2351 		req[i].cmd = &cmd[i];
2352 		req[i].rsp = &rsp[i];
2353 		req[i].length = sizeof(data);
2354 		SPDK_IOV_ONE(req[i].iov, &req[i].iovcnt, &data, req[i].length);
2355 
2356 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2357 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2358 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2359 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2360 			spdk_nvme_bytes_to_numd(req[i].length);
2361 		cmd[i].nvme_cmd.cid = i;
2362 	}
2363 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2364 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2365 
2366 	/* consume the pending event */
2367 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2368 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2369 	event.raw = rsp[0].nvme_cpl.cdw0;
2370 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2371 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2372 
2373 	/* get log with RAE set */
2374 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2375 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2376 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2377 
2378 	/* will not generate new event until RAE is clear */
2379 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2380 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2381 
2382 	/* get log with RAE clear */
2383 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2384 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2385 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2386 
2387 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2388 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2389 
2390 	cleanup_pending_async_events(&ctrlr);
2391 }
2392 
2393 static void
2394 test_nvmf_ctrlr_create_destruct(void)
2395 {
2396 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2397 	struct spdk_nvmf_poll_group group = {};
2398 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2399 	struct spdk_nvmf_transport transport = {};
2400 	struct spdk_nvmf_transport_ops tops = {};
2401 	struct spdk_nvmf_subsystem subsystem = {};
2402 	struct spdk_nvmf_request req = {};
2403 	struct spdk_nvmf_qpair qpair = {};
2404 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2405 	struct spdk_nvmf_tgt tgt = {};
2406 	union nvmf_h2c_msg cmd = {};
2407 	union nvmf_c2h_msg rsp = {};
2408 	const uint8_t hostid[16] = {
2409 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2410 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2411 	};
2412 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2413 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2414 
2415 	group.thread = spdk_get_thread();
2416 	transport.ops = &tops;
2417 	transport.opts.max_aq_depth = 32;
2418 	transport.opts.max_queue_depth = 64;
2419 	transport.opts.max_qpairs_per_ctrlr = 3;
2420 	transport.opts.dif_insert_or_strip = true;
2421 	transport.tgt = &tgt;
2422 	qpair.transport = &transport;
2423 	qpair.group = &group;
2424 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2425 	TAILQ_INIT(&qpair.outstanding);
2426 
2427 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2428 	connect_data.cntlid = 0xFFFF;
2429 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2430 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2431 
2432 	subsystem.thread = spdk_get_thread();
2433 	subsystem.id = 1;
2434 	TAILQ_INIT(&subsystem.ctrlrs);
2435 	subsystem.tgt = &tgt;
2436 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2437 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2438 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2439 
2440 	group.sgroups = sgroups;
2441 
2442 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2443 	cmd.connect_cmd.cid = 1;
2444 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2445 	cmd.connect_cmd.recfmt = 0;
2446 	cmd.connect_cmd.qid = 0;
2447 	cmd.connect_cmd.sqsize = 31;
2448 	cmd.connect_cmd.cattr = 0;
2449 	cmd.connect_cmd.kato = 120000;
2450 
2451 	req.qpair = &qpair;
2452 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2453 	req.length = sizeof(connect_data);
2454 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
2455 	req.cmd = &cmd;
2456 	req.rsp = &rsp;
2457 
2458 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2459 	sgroups[subsystem.id].mgmt_io_outstanding++;
2460 
2461 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base);
2462 	poll_threads();
2463 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2464 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2465 	CU_ASSERT(ctrlr->subsys == &subsystem);
2466 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2467 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2468 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2469 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2470 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2471 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2472 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2473 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2474 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2475 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2476 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2477 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2478 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2479 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2480 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2481 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2482 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2483 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2484 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2485 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2486 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2487 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2488 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2489 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2490 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2491 
2492 	ctrlr->in_destruct = true;
2493 	nvmf_ctrlr_destruct(ctrlr);
2494 	poll_threads();
2495 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2496 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2497 }
2498 
2499 static void
2500 test_nvmf_ctrlr_use_zcopy(void)
2501 {
2502 	struct spdk_nvmf_subsystem subsystem = {};
2503 	struct spdk_nvmf_transport transport = {};
2504 	struct spdk_nvmf_request req = {};
2505 	struct spdk_nvmf_qpair qpair = {};
2506 	struct spdk_nvmf_ctrlr ctrlr = {};
2507 	union nvmf_h2c_msg cmd = {};
2508 	struct spdk_nvmf_ns ns = {};
2509 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2510 	struct spdk_bdev bdev = {};
2511 	struct spdk_nvmf_poll_group group = {};
2512 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2513 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2514 	struct spdk_io_channel io_ch = {};
2515 	int opc;
2516 
2517 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2518 	ns.bdev = &bdev;
2519 
2520 	subsystem.id = 0;
2521 	subsystem.max_nsid = 1;
2522 	subsys_ns[0] = &ns;
2523 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2524 
2525 	ctrlr.subsys = &subsystem;
2526 
2527 	transport.opts.zcopy = true;
2528 
2529 	qpair.ctrlr = &ctrlr;
2530 	qpair.group = &group;
2531 	qpair.qid = 1;
2532 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2533 	qpair.transport = &transport;
2534 
2535 	group.thread = spdk_get_thread();
2536 	group.num_sgroups = 1;
2537 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2538 	sgroups.num_ns = 1;
2539 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2540 	ns_info.channel = &io_ch;
2541 	sgroups.ns_info = &ns_info;
2542 	TAILQ_INIT(&sgroups.queued);
2543 	group.sgroups = &sgroups;
2544 	TAILQ_INIT(&qpair.outstanding);
2545 
2546 	req.qpair = &qpair;
2547 	req.cmd = &cmd;
2548 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2549 
2550 	/* Admin queue */
2551 	qpair.qid = 0;
2552 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2553 	qpair.qid = 1;
2554 
2555 	/* Invalid Opcodes */
2556 	for (opc = 0; opc <= 255; opc++) {
2557 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2558 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2559 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2560 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2561 		}
2562 	}
2563 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2564 
2565 	/* Fused WRITE */
2566 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2567 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2568 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2569 
2570 	/* Non bdev */
2571 	cmd.nvme_cmd.nsid = 4;
2572 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2573 	cmd.nvme_cmd.nsid = 1;
2574 
2575 	/* ZCOPY Not supported */
2576 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2577 	ns.zcopy = true;
2578 
2579 	/* ZCOPY disabled on transport level */
2580 	transport.opts.zcopy = false;
2581 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2582 	transport.opts.zcopy = true;
2583 
2584 	/* Success */
2585 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2586 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2587 }
2588 
2589 static void
2590 qpair_state_change_done(void *cb_arg, int status)
2591 {
2592 }
2593 
2594 static void
2595 test_spdk_nvmf_request_zcopy_start(void)
2596 {
2597 	struct spdk_nvmf_request req = {};
2598 	struct spdk_nvmf_qpair qpair = {};
2599 	struct spdk_nvmf_transport transport = {};
2600 	struct spdk_nvme_cmd cmd = {};
2601 	union nvmf_c2h_msg rsp = {};
2602 	struct spdk_nvmf_ctrlr ctrlr = {};
2603 	struct spdk_nvmf_subsystem subsystem = {};
2604 	struct spdk_nvmf_ns ns = {};
2605 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2606 	enum spdk_nvme_ana_state ana_state[1];
2607 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2608 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2609 
2610 	struct spdk_nvmf_poll_group group = {};
2611 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2612 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2613 	struct spdk_io_channel io_ch = {};
2614 
2615 	ns.bdev = &bdev;
2616 	ns.zcopy = true;
2617 	ns.anagrpid = 1;
2618 
2619 	subsystem.id = 0;
2620 	subsystem.max_nsid = 1;
2621 	subsys_ns[0] = &ns;
2622 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2623 
2624 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2625 
2626 	/* Enable controller */
2627 	ctrlr.vcprop.cc.bits.en = 1;
2628 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2629 	ctrlr.listener = &listener;
2630 
2631 	transport.opts.zcopy = true;
2632 
2633 	group.thread = spdk_get_thread();
2634 	group.num_sgroups = 1;
2635 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2636 	sgroups.num_ns = 1;
2637 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2638 	ns_info.channel = &io_ch;
2639 	sgroups.ns_info = &ns_info;
2640 	TAILQ_INIT(&sgroups.queued);
2641 	group.sgroups = &sgroups;
2642 	TAILQ_INIT(&qpair.outstanding);
2643 
2644 	qpair.ctrlr = &ctrlr;
2645 	qpair.group = &group;
2646 	qpair.transport = &transport;
2647 	qpair.qid = 1;
2648 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2649 
2650 	cmd.nsid = 1;
2651 
2652 	req.qpair = &qpair;
2653 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2654 	req.rsp = &rsp;
2655 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2656 	cmd.opc = SPDK_NVME_OPC_READ;
2657 
2658 	/* Fail because no controller */
2659 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2660 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2661 	qpair.ctrlr = NULL;
2662 	spdk_nvmf_request_zcopy_start(&req);
2663 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2664 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2665 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2666 	qpair.ctrlr = &ctrlr;
2667 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2668 
2669 	/* Fail because bad NSID */
2670 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2671 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2672 	cmd.nsid = 0;
2673 	spdk_nvmf_request_zcopy_start(&req);
2674 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2675 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2676 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2677 	cmd.nsid = 1;
2678 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2679 
2680 	/* Fail because bad Channel */
2681 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2682 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2683 	ns_info.channel = NULL;
2684 	spdk_nvmf_request_zcopy_start(&req);
2685 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2686 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2687 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2688 	ns_info.channel = &io_ch;
2689 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2690 
2691 	/* Queue the requet because NSID is not active */
2692 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2693 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2694 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2695 	spdk_nvmf_request_zcopy_start(&req);
2696 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2697 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2698 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2699 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2700 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2701 
2702 	/* Fail because QPair is not active */
2703 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2704 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2705 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2706 	qpair.state_cb = qpair_state_change_done;
2707 	spdk_nvmf_request_zcopy_start(&req);
2708 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2709 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2710 	qpair.state_cb = NULL;
2711 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2712 
2713 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2714 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2715 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2716 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2717 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2718 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2719 	spdk_nvmf_request_zcopy_start(&req);
2720 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2721 	cmd.cdw10 = 0;
2722 	cmd.cdw12 = 0;
2723 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2724 
2725 	/* Success */
2726 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2727 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2728 	spdk_nvmf_request_zcopy_start(&req);
2729 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2730 }
2731 
2732 static void
2733 test_zcopy_read(void)
2734 {
2735 	struct spdk_nvmf_request req = {};
2736 	struct spdk_nvmf_qpair qpair = {};
2737 	struct spdk_nvmf_transport transport = {};
2738 	struct spdk_nvme_cmd cmd = {};
2739 	union nvmf_c2h_msg rsp = {};
2740 	struct spdk_nvmf_ctrlr ctrlr = {};
2741 	struct spdk_nvmf_subsystem subsystem = {};
2742 	struct spdk_nvmf_ns ns = {};
2743 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2744 	enum spdk_nvme_ana_state ana_state[1];
2745 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2746 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2747 
2748 	struct spdk_nvmf_poll_group group = {};
2749 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2750 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2751 	struct spdk_io_channel io_ch = {};
2752 
2753 	ns.bdev = &bdev;
2754 	ns.zcopy = true;
2755 	ns.anagrpid = 1;
2756 
2757 	subsystem.id = 0;
2758 	subsystem.max_nsid = 1;
2759 	subsys_ns[0] = &ns;
2760 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2761 
2762 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2763 
2764 	/* Enable controller */
2765 	ctrlr.vcprop.cc.bits.en = 1;
2766 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2767 	ctrlr.listener = &listener;
2768 
2769 	transport.opts.zcopy = true;
2770 
2771 	group.thread = spdk_get_thread();
2772 	group.num_sgroups = 1;
2773 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2774 	sgroups.num_ns = 1;
2775 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2776 	ns_info.channel = &io_ch;
2777 	sgroups.ns_info = &ns_info;
2778 	TAILQ_INIT(&sgroups.queued);
2779 	group.sgroups = &sgroups;
2780 	TAILQ_INIT(&qpair.outstanding);
2781 
2782 	qpair.ctrlr = &ctrlr;
2783 	qpair.group = &group;
2784 	qpair.transport = &transport;
2785 	qpair.qid = 1;
2786 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2787 
2788 	cmd.nsid = 1;
2789 
2790 	req.qpair = &qpair;
2791 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2792 	req.rsp = &rsp;
2793 	cmd.opc = SPDK_NVME_OPC_READ;
2794 
2795 	/* Prepare for zcopy */
2796 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2797 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2798 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2799 	CU_ASSERT(ns_info.io_outstanding == 0);
2800 
2801 	/* Perform the zcopy start */
2802 	spdk_nvmf_request_zcopy_start(&req);
2803 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2804 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2805 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2806 	CU_ASSERT(ns_info.io_outstanding == 1);
2807 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2808 
2809 	/* Perform the zcopy end */
2810 	spdk_nvmf_request_zcopy_end(&req, false);
2811 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2812 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2813 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2814 	CU_ASSERT(ns_info.io_outstanding == 0);
2815 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2816 }
2817 
2818 static void
2819 test_zcopy_write(void)
2820 {
2821 	struct spdk_nvmf_request req = {};
2822 	struct spdk_nvmf_qpair qpair = {};
2823 	struct spdk_nvmf_transport transport = {};
2824 	struct spdk_nvme_cmd cmd = {};
2825 	union nvmf_c2h_msg rsp = {};
2826 	struct spdk_nvmf_ctrlr ctrlr = {};
2827 	struct spdk_nvmf_subsystem subsystem = {};
2828 	struct spdk_nvmf_ns ns = {};
2829 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2830 	enum spdk_nvme_ana_state ana_state[1];
2831 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2832 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2833 
2834 	struct spdk_nvmf_poll_group group = {};
2835 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2836 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2837 	struct spdk_io_channel io_ch = {};
2838 
2839 	ns.bdev = &bdev;
2840 	ns.zcopy = true;
2841 	ns.anagrpid = 1;
2842 
2843 	subsystem.id = 0;
2844 	subsystem.max_nsid = 1;
2845 	subsys_ns[0] = &ns;
2846 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2847 
2848 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2849 
2850 	/* Enable controller */
2851 	ctrlr.vcprop.cc.bits.en = 1;
2852 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2853 	ctrlr.listener = &listener;
2854 
2855 	transport.opts.zcopy = true;
2856 
2857 	group.thread = spdk_get_thread();
2858 	group.num_sgroups = 1;
2859 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2860 	sgroups.num_ns = 1;
2861 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2862 	ns_info.channel = &io_ch;
2863 	sgroups.ns_info = &ns_info;
2864 	TAILQ_INIT(&sgroups.queued);
2865 	group.sgroups = &sgroups;
2866 	TAILQ_INIT(&qpair.outstanding);
2867 
2868 	qpair.ctrlr = &ctrlr;
2869 	qpair.group = &group;
2870 	qpair.transport = &transport;
2871 	qpair.qid = 1;
2872 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2873 
2874 	cmd.nsid = 1;
2875 
2876 	req.qpair = &qpair;
2877 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2878 	req.rsp = &rsp;
2879 	cmd.opc = SPDK_NVME_OPC_WRITE;
2880 
2881 	/* Prepare for zcopy */
2882 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2883 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2884 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2885 	CU_ASSERT(ns_info.io_outstanding == 0);
2886 
2887 	/* Perform the zcopy start */
2888 	spdk_nvmf_request_zcopy_start(&req);
2889 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2890 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2891 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2892 	CU_ASSERT(ns_info.io_outstanding == 1);
2893 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2894 
2895 	/* Perform the zcopy end */
2896 	spdk_nvmf_request_zcopy_end(&req, true);
2897 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2898 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2899 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2900 	CU_ASSERT(ns_info.io_outstanding == 0);
2901 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2902 }
2903 
2904 static void
2905 test_nvmf_property_set(void)
2906 {
2907 	int rc;
2908 	struct spdk_nvmf_request req = {};
2909 	struct spdk_nvmf_qpair qpair = {};
2910 	struct spdk_nvmf_ctrlr ctrlr = {};
2911 	union nvmf_h2c_msg cmd = {};
2912 	union nvmf_c2h_msg rsp = {};
2913 
2914 	req.qpair = &qpair;
2915 	qpair.ctrlr = &ctrlr;
2916 	req.cmd = &cmd;
2917 	req.rsp = &rsp;
2918 
2919 	/* Invalid parameters */
2920 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2921 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2922 
2923 	rc = nvmf_property_set(&req);
2924 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2925 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2926 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2927 
2928 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2929 
2930 	rc = nvmf_property_get(&req);
2931 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2932 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2933 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2934 
2935 	/* Set cc with same property size */
2936 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2937 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2938 
2939 	rc = nvmf_property_set(&req);
2940 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2941 
2942 	/* Emulate cc data */
2943 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2944 
2945 	rc = nvmf_property_get(&req);
2946 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2947 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2948 
2949 	/* Set asq with different property size */
2950 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2951 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2952 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2953 
2954 	rc = nvmf_property_set(&req);
2955 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2956 
2957 	/* Emulate asq data */
2958 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2959 
2960 	rc = nvmf_property_get(&req);
2961 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2962 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2963 }
2964 
2965 static void
2966 test_nvmf_ctrlr_get_features_host_behavior_support(void)
2967 {
2968 	int rc;
2969 	struct spdk_nvmf_request req = {};
2970 	struct spdk_nvmf_qpair qpair = {};
2971 	struct spdk_nvmf_ctrlr ctrlr = {};
2972 	struct spdk_nvme_host_behavior behavior = {};
2973 	union nvmf_h2c_msg cmd = {};
2974 	union nvmf_c2h_msg rsp = {};
2975 
2976 	qpair.ctrlr = &ctrlr;
2977 	req.qpair = &qpair;
2978 	req.cmd = &cmd;
2979 	req.rsp = &rsp;
2980 
2981 	/* Invalid data */
2982 	req.length = sizeof(struct spdk_nvme_host_behavior);
2983 	req.iovcnt = 0;
2984 
2985 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2986 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2987 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2988 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2989 
2990 	/* Wrong structure length */
2991 	req.length = sizeof(struct spdk_nvme_host_behavior) - 1;
2992 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
2993 
2994 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2995 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2996 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2997 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2998 
2999 	/* Get Features Host Behavior Support Success */
3000 	req.length = sizeof(struct spdk_nvme_host_behavior);
3001 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
3002 
3003 	ctrlr.acre_enabled = true;
3004 	behavior.acre = false;
3005 
3006 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
3007 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3008 	CU_ASSERT(behavior.acre == true);
3009 }
3010 
3011 static void
3012 test_nvmf_ctrlr_set_features_host_behavior_support(void)
3013 {
3014 	int rc;
3015 	struct spdk_nvmf_request req = {};
3016 	struct spdk_nvmf_qpair qpair = {};
3017 	struct spdk_nvmf_ctrlr ctrlr = {};
3018 	struct spdk_nvme_host_behavior host_behavior = {};
3019 	union nvmf_h2c_msg cmd = {};
3020 	union nvmf_c2h_msg rsp = {};
3021 
3022 	qpair.ctrlr = &ctrlr;
3023 	req.qpair = &qpair;
3024 	req.cmd = &cmd;
3025 	req.rsp = &rsp;
3026 	req.iov[0].iov_base = &host_behavior;
3027 	req.iov[0].iov_len = sizeof(host_behavior);
3028 
3029 	/* Invalid iovcnt */
3030 	req.iovcnt = 0;
3031 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3032 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3033 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3034 
3035 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3036 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3037 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3038 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3039 
3040 	/* Invalid iov_len */
3041 	req.iovcnt = 1;
3042 	req.iov[0].iov_len = 0;
3043 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3044 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3045 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3046 
3047 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3048 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3049 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3050 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3051 
3052 	/* acre is false */
3053 	host_behavior.acre = 0;
3054 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3055 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3056 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3057 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3058 
3059 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3060 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3061 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3062 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3063 	CU_ASSERT(ctrlr.acre_enabled == false);
3064 
3065 	/* acre is true */
3066 	host_behavior.acre = 1;
3067 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3068 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3069 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3070 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3071 
3072 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3073 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3074 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3075 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3076 	CU_ASSERT(ctrlr.acre_enabled == true);
3077 
3078 	/* Invalid acre */
3079 	host_behavior.acre = 2;
3080 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3081 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3082 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3083 
3084 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3085 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3086 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3087 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3088 }
3089 
3090 int
3091 main(int argc, char **argv)
3092 {
3093 	CU_pSuite	suite = NULL;
3094 	unsigned int	num_failures;
3095 
3096 	CU_initialize_registry();
3097 
3098 	suite = CU_add_suite("nvmf", NULL, NULL);
3099 	CU_ADD_TEST(suite, test_get_log_page);
3100 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
3101 	CU_ADD_TEST(suite, test_connect);
3102 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
3103 	CU_ADD_TEST(suite, test_identify_ns);
3104 	CU_ADD_TEST(suite, test_identify_ns_iocs_specific);
3105 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
3106 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
3107 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
3108 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
3109 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
3110 	CU_ADD_TEST(suite, test_get_dif_ctx);
3111 	CU_ADD_TEST(suite, test_set_get_features);
3112 	CU_ADD_TEST(suite, test_identify_ctrlr);
3113 	CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific);
3114 	CU_ADD_TEST(suite, test_custom_admin_cmd);
3115 	CU_ADD_TEST(suite, test_fused_compare_and_write);
3116 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
3117 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
3118 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
3119 	CU_ADD_TEST(suite, test_multi_async_events);
3120 	CU_ADD_TEST(suite, test_rae);
3121 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
3122 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
3123 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
3124 	CU_ADD_TEST(suite, test_zcopy_read);
3125 	CU_ADD_TEST(suite, test_zcopy_write);
3126 	CU_ADD_TEST(suite, test_nvmf_property_set);
3127 	CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support);
3128 	CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support);
3129 
3130 	allocate_threads(1);
3131 	set_thread(0);
3132 
3133 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3134 	CU_cleanup_registry();
3135 
3136 	free_threads();
3137 
3138 	return num_failures;
3139 }
3140