xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision 45a053c5777494f4e8ce4bc1191c9de3920377f7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/bdev_zone.h"
8 #include "spdk/nvme_spec.h"
9 #include "spdk/stdinc.h"
10 
11 #include "spdk_internal/cunit.h"
12 #include "spdk_internal/mock.h"
13 #include "thread/thread_internal.h"
14 
15 #include "common/lib/ut_multithread.c"
16 #include "nvmf/ctrlr.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 struct spdk_bdev {
21 	int ut_mock;
22 	uint64_t blockcnt;
23 	uint32_t blocklen;
24 	bool zoned;
25 	uint32_t zone_size;
26 	uint32_t max_open_zones;
27 	uint32_t max_active_zones;
28 };
29 
30 #define MAX_OPEN_ZONES 12
31 #define MAX_ACTIVE_ZONES 34
32 #define ZONE_SIZE 56
33 
34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
36 
37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
39 		0x8877665544332211UL;
40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
41 
42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
43 	    struct spdk_nvmf_subsystem *,
44 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
45 	    NULL);
46 
47 DEFINE_STUB(spdk_nvmf_poll_group_create,
48 	    struct spdk_nvmf_poll_group *,
49 	    (struct spdk_nvmf_tgt *tgt),
50 	    NULL);
51 
52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
53 	    const char *,
54 	    (const struct spdk_nvmf_subsystem *subsystem),
55 	    subsystem_default_sn);
56 
57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
58 	    const char *,
59 	    (const struct spdk_nvmf_subsystem *subsystem),
60 	    subsystem_default_mn);
61 
62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
63 	    bool,
64 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
65 	    true);
66 
67 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
68 	    int,
69 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
70 	    0);
71 
72 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
73 	    struct spdk_nvmf_ctrlr *,
74 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
75 	    NULL);
76 
77 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
78 	    bool,
79 	    (struct spdk_nvmf_ctrlr *ctrlr),
80 	    false);
81 
82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
83 	    bool,
84 	    (struct spdk_nvmf_ctrlr *ctrlr),
85 	    false);
86 
87 DEFINE_STUB(nvmf_ctrlr_copy_supported,
88 	    bool,
89 	    (struct spdk_nvmf_ctrlr *ctrlr),
90 	    false);
91 
92 DEFINE_STUB_V(nvmf_get_discovery_log_page,
93 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
94 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
95 
96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
97 	    int,
98 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
99 	    0);
100 
101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
102 	    bool,
103 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
104 	    true);
105 
106 DEFINE_STUB(nvmf_subsystem_find_listener,
107 	    struct spdk_nvmf_subsystem_listener *,
108 	    (struct spdk_nvmf_subsystem *subsystem,
109 	     const struct spdk_nvme_transport_id *trid),
110 	    (void *)0x1);
111 
112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
113 	    int,
114 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
115 	     struct spdk_nvmf_request *req),
116 	    0);
117 
118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
119 	    int,
120 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
121 	     struct spdk_nvmf_request *req),
122 	    0);
123 
124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
134 	    0);
135 
136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *req),
140 	    0);
141 
142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *req),
146 	    0);
147 
148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
161 	    int,
162 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct spdk_nvmf_request *req),
164 	    0);
165 
166 DEFINE_STUB(nvmf_transport_req_complete,
167 	    int,
168 	    (struct spdk_nvmf_request *req),
169 	    0);
170 
171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
172 
173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
174 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
175 	     struct spdk_dif_ctx *dif_ctx),
176 	    true);
177 
178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
179 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
180 
181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
183 
184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
185 		struct spdk_nvmf_ctrlr *ctrlr));
186 
187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
188 	    int,
189 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
190 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
191 	    0);
192 
193 DEFINE_STUB(nvmf_transport_req_free,
194 	    int,
195 	    (struct spdk_nvmf_request *req),
196 	    0);
197 
198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
199 	    int,
200 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
201 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
202 	    0);
203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
204 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
206 
207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev),
208 	    MAX_ACTIVE_ZONES);
209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES);
210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE);
211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
212 
213 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t,
214 	    (const struct spdk_nvme_ns_data *nsdata), 0);
215 
216 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false);
217 
218 int
219 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
220 {
221 	return 0;
222 }
223 
224 void
225 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
226 			    bool dif_insert_or_strip)
227 {
228 	uint64_t num_blocks;
229 
230 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
231 	num_blocks = ns->bdev->blockcnt;
232 	nsdata->nsze = num_blocks;
233 	nsdata->ncap = num_blocks;
234 	nsdata->nuse = num_blocks;
235 	nsdata->nlbaf = 0;
236 	nsdata->flbas.format = 0;
237 	nsdata->flbas.msb_format = 0;
238 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
239 }
240 
241 struct spdk_nvmf_ns *
242 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
243 {
244 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
245 	return subsystem->ns[0];
246 }
247 
248 struct spdk_nvmf_ns *
249 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
250 				struct spdk_nvmf_ns *prev_ns)
251 {
252 	uint32_t nsid;
253 
254 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
255 	nsid = prev_ns->nsid;
256 
257 	if (nsid >= subsystem->max_nsid) {
258 		return NULL;
259 	}
260 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
261 		if (subsystem->ns[nsid - 1]) {
262 			return subsystem->ns[nsid - 1];
263 		}
264 	}
265 	return NULL;
266 }
267 
268 bool
269 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
270 {
271 	return true;
272 }
273 
274 int
275 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
276 			    struct spdk_bdev_desc *desc,
277 			    struct spdk_io_channel *ch,
278 			    struct spdk_nvmf_request *req)
279 {
280 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
281 	uint64_t start_lba;
282 	uint64_t num_blocks;
283 
284 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
285 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
286 
287 	if ((start_lba + num_blocks) > bdev->blockcnt) {
288 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
289 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
290 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
291 	}
292 
293 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
294 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
295 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
296 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
297 	} else {
298 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
299 	}
300 
301 
302 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
303 }
304 
305 void
306 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
307 {
308 	req->zcopy_bdev_io = NULL;
309 	spdk_nvmf_request_complete(req);
310 }
311 
312 static void
313 test_get_log_page(void)
314 {
315 	struct spdk_nvmf_subsystem subsystem = {};
316 	struct spdk_nvmf_request req = {};
317 	struct spdk_nvmf_qpair qpair = {};
318 	struct spdk_nvmf_ctrlr ctrlr = {};
319 	union nvmf_h2c_msg cmd = {};
320 	union nvmf_c2h_msg rsp = {};
321 	char data[4096];
322 
323 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
324 
325 	ctrlr.subsys = &subsystem;
326 
327 	qpair.ctrlr = &ctrlr;
328 
329 	req.qpair = &qpair;
330 	req.cmd = &cmd;
331 	req.rsp = &rsp;
332 	req.length = sizeof(data);
333 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &data, req.length);
334 
335 	/* Get Log Page - all valid */
336 	memset(&cmd, 0, sizeof(cmd));
337 	memset(&rsp, 0, sizeof(rsp));
338 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
339 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
340 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
341 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
342 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
343 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
344 
345 	/* Get Log Page with invalid log ID */
346 	memset(&cmd, 0, sizeof(cmd));
347 	memset(&rsp, 0, sizeof(rsp));
348 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
349 	cmd.nvme_cmd.cdw10 = 0;
350 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
351 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
352 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
353 
354 	/* Get Log Page with invalid offset (not dword aligned) */
355 	memset(&cmd, 0, sizeof(cmd));
356 	memset(&rsp, 0, sizeof(rsp));
357 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
358 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
359 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
360 	cmd.nvme_cmd.cdw12 = 2;
361 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
362 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
363 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
364 
365 	/* Get Log Page without data buffer */
366 	memset(&cmd, 0, sizeof(cmd));
367 	memset(&rsp, 0, sizeof(rsp));
368 	req.iovcnt = 0;
369 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
370 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
371 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
372 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
373 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
374 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
375 }
376 
377 static void
378 test_process_fabrics_cmd(void)
379 {
380 	struct	spdk_nvmf_request req = {};
381 	int	ret;
382 	struct	spdk_nvmf_qpair req_qpair = {};
383 	union	nvmf_h2c_msg  req_cmd = {};
384 	union	nvmf_c2h_msg   req_rsp = {};
385 
386 	req.qpair = &req_qpair;
387 	req.cmd  = &req_cmd;
388 	req.rsp  = &req_rsp;
389 	req.qpair->ctrlr = NULL;
390 
391 	/* No ctrlr and invalid command check */
392 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
393 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
394 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
395 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
396 }
397 
398 static bool
399 nvme_status_success(const struct spdk_nvme_status *status)
400 {
401 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
402 }
403 
404 static void
405 test_connect(void)
406 {
407 	struct spdk_nvmf_fabric_connect_data connect_data;
408 	struct spdk_nvmf_poll_group group;
409 	struct spdk_nvmf_subsystem_poll_group *sgroups;
410 	struct spdk_nvmf_transport transport;
411 	struct spdk_nvmf_transport_ops tops = {};
412 	struct spdk_nvmf_subsystem subsystem;
413 	struct spdk_nvmf_request req;
414 	struct spdk_nvmf_qpair admin_qpair;
415 	struct spdk_nvmf_qpair qpair;
416 	struct spdk_nvmf_ctrlr ctrlr;
417 	struct spdk_nvmf_tgt tgt;
418 	union nvmf_h2c_msg cmd;
419 	union nvmf_c2h_msg rsp;
420 	const uint8_t hostid[16] = {
421 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
422 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
423 	};
424 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
425 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
426 	int rc;
427 
428 	memset(&group, 0, sizeof(group));
429 	group.thread = spdk_get_thread();
430 
431 	memset(&ctrlr, 0, sizeof(ctrlr));
432 	ctrlr.subsys = &subsystem;
433 	ctrlr.qpair_mask = spdk_bit_array_create(3);
434 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
435 	ctrlr.vcprop.cc.bits.en = 1;
436 	ctrlr.vcprop.cc.bits.iosqes = 6;
437 	ctrlr.vcprop.cc.bits.iocqes = 4;
438 
439 	memset(&admin_qpair, 0, sizeof(admin_qpair));
440 	admin_qpair.group = &group;
441 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
442 
443 	memset(&tgt, 0, sizeof(tgt));
444 	memset(&transport, 0, sizeof(transport));
445 	transport.ops = &tops;
446 	transport.opts.max_aq_depth = 32;
447 	transport.opts.max_queue_depth = 64;
448 	transport.opts.max_qpairs_per_ctrlr = 3;
449 	transport.tgt = &tgt;
450 
451 	memset(&qpair, 0, sizeof(qpair));
452 	qpair.transport = &transport;
453 	qpair.group = &group;
454 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
455 	TAILQ_INIT(&qpair.outstanding);
456 
457 	memset(&connect_data, 0, sizeof(connect_data));
458 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
459 	connect_data.cntlid = 0xFFFF;
460 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
461 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
462 
463 	memset(&subsystem, 0, sizeof(subsystem));
464 	subsystem.thread = spdk_get_thread();
465 	subsystem.id = 1;
466 	TAILQ_INIT(&subsystem.ctrlrs);
467 	subsystem.tgt = &tgt;
468 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
469 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
470 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
471 
472 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
473 	group.sgroups = sgroups;
474 
475 	memset(&cmd, 0, sizeof(cmd));
476 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
477 	cmd.connect_cmd.cid = 1;
478 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
479 	cmd.connect_cmd.recfmt = 0;
480 	cmd.connect_cmd.qid = 0;
481 	cmd.connect_cmd.sqsize = 31;
482 	cmd.connect_cmd.cattr = 0;
483 	cmd.connect_cmd.kato = 120000;
484 
485 	memset(&req, 0, sizeof(req));
486 	req.qpair = &qpair;
487 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
488 	req.length = sizeof(connect_data);
489 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
490 	req.cmd = &cmd;
491 	req.rsp = &rsp;
492 
493 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
494 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
495 
496 	/* Valid admin connect command */
497 	memset(&rsp, 0, sizeof(rsp));
498 	sgroups[subsystem.id].mgmt_io_outstanding++;
499 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
500 	rc = nvmf_ctrlr_cmd_connect(&req);
501 	poll_threads();
502 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
503 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
504 	CU_ASSERT(qpair.ctrlr != NULL);
505 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
506 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
507 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
508 	free(qpair.ctrlr);
509 	qpair.ctrlr = NULL;
510 
511 	/* Valid admin connect command with kato = 0 */
512 	cmd.connect_cmd.kato = 0;
513 	memset(&rsp, 0, sizeof(rsp));
514 	sgroups[subsystem.id].mgmt_io_outstanding++;
515 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
516 	rc = nvmf_ctrlr_cmd_connect(&req);
517 	poll_threads();
518 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
519 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
520 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
521 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
522 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
523 	free(qpair.ctrlr);
524 	qpair.ctrlr = NULL;
525 	cmd.connect_cmd.kato = 120000;
526 
527 	/* Invalid data length */
528 	memset(&rsp, 0, sizeof(rsp));
529 	req.length = sizeof(connect_data) - 1;
530 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
531 	rc = nvmf_ctrlr_cmd_connect(&req);
532 	poll_threads();
533 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
534 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
535 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
536 	CU_ASSERT(qpair.ctrlr == NULL);
537 	req.length = sizeof(connect_data);
538 
539 	/* Invalid recfmt */
540 	memset(&rsp, 0, sizeof(rsp));
541 	cmd.connect_cmd.recfmt = 1234;
542 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
543 	rc = nvmf_ctrlr_cmd_connect(&req);
544 	poll_threads();
545 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
546 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
547 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
548 	CU_ASSERT(qpair.ctrlr == NULL);
549 	cmd.connect_cmd.recfmt = 0;
550 
551 	/* Subsystem not found */
552 	memset(&rsp, 0, sizeof(rsp));
553 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
554 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
555 	rc = nvmf_ctrlr_cmd_connect(&req);
556 	poll_threads();
557 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
558 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
559 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
560 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
561 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
562 	CU_ASSERT(qpair.ctrlr == NULL);
563 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
564 
565 	/* Unterminated hostnqn */
566 	memset(&rsp, 0, sizeof(rsp));
567 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
568 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
569 	rc = nvmf_ctrlr_cmd_connect(&req);
570 	poll_threads();
571 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
572 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
573 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
574 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
575 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
576 	CU_ASSERT(qpair.ctrlr == NULL);
577 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
578 
579 	/* Host not allowed */
580 	memset(&rsp, 0, sizeof(rsp));
581 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
582 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
583 	rc = nvmf_ctrlr_cmd_connect(&req);
584 	poll_threads();
585 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
586 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
587 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
588 	CU_ASSERT(qpair.ctrlr == NULL);
589 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
590 
591 	/* Invalid sqsize == 0 */
592 	memset(&rsp, 0, sizeof(rsp));
593 	cmd.connect_cmd.sqsize = 0;
594 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
595 	rc = nvmf_ctrlr_cmd_connect(&req);
596 	poll_threads();
597 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
598 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
599 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
600 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
601 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
602 	CU_ASSERT(qpair.ctrlr == NULL);
603 	cmd.connect_cmd.sqsize = 31;
604 
605 	/* Invalid admin sqsize > max_aq_depth */
606 	memset(&rsp, 0, sizeof(rsp));
607 	cmd.connect_cmd.sqsize = 32;
608 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
609 	rc = nvmf_ctrlr_cmd_connect(&req);
610 	poll_threads();
611 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
612 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
613 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
614 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
615 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
616 	CU_ASSERT(qpair.ctrlr == NULL);
617 	cmd.connect_cmd.sqsize = 31;
618 
619 	/* Invalid I/O sqsize > max_queue_depth */
620 	memset(&rsp, 0, sizeof(rsp));
621 	cmd.connect_cmd.qid = 1;
622 	cmd.connect_cmd.sqsize = 64;
623 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
624 	rc = nvmf_ctrlr_cmd_connect(&req);
625 	poll_threads();
626 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
627 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
628 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
629 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
630 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
631 	CU_ASSERT(qpair.ctrlr == NULL);
632 	cmd.connect_cmd.qid = 0;
633 	cmd.connect_cmd.sqsize = 31;
634 
635 	/* Invalid cntlid for admin queue */
636 	memset(&rsp, 0, sizeof(rsp));
637 	connect_data.cntlid = 0x1234;
638 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
639 	rc = nvmf_ctrlr_cmd_connect(&req);
640 	poll_threads();
641 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
642 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
643 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
644 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
645 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
646 	CU_ASSERT(qpair.ctrlr == NULL);
647 	connect_data.cntlid = 0xFFFF;
648 
649 	ctrlr.admin_qpair = &admin_qpair;
650 	ctrlr.subsys = &subsystem;
651 
652 	/* Valid I/O queue connect command */
653 	memset(&rsp, 0, sizeof(rsp));
654 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
655 	cmd.connect_cmd.qid = 1;
656 	cmd.connect_cmd.sqsize = 63;
657 	sgroups[subsystem.id].mgmt_io_outstanding++;
658 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
659 	rc = nvmf_ctrlr_cmd_connect(&req);
660 	poll_threads();
661 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
662 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
663 	CU_ASSERT(qpair.ctrlr == &ctrlr);
664 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
665 	qpair.ctrlr = NULL;
666 	cmd.connect_cmd.sqsize = 31;
667 
668 	/* Non-existent controller */
669 	memset(&rsp, 0, sizeof(rsp));
670 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
671 	sgroups[subsystem.id].mgmt_io_outstanding++;
672 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
673 	rc = nvmf_ctrlr_cmd_connect(&req);
674 	poll_threads();
675 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
676 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
677 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
678 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
679 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
680 	CU_ASSERT(qpair.ctrlr == NULL);
681 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
682 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
683 
684 	/* I/O connect to discovery controller */
685 	memset(&rsp, 0, sizeof(rsp));
686 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
687 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
688 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
689 	sgroups[subsystem.id].mgmt_io_outstanding++;
690 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
691 	rc = nvmf_ctrlr_cmd_connect(&req);
692 	poll_threads();
693 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
694 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
695 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
696 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
697 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
698 	CU_ASSERT(qpair.ctrlr == NULL);
699 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
700 
701 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
702 	cmd.connect_cmd.qid = 0;
703 	cmd.connect_cmd.kato = 120000;
704 	memset(&rsp, 0, sizeof(rsp));
705 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
706 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
707 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
708 	sgroups[subsystem.id].mgmt_io_outstanding++;
709 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
710 	rc = nvmf_ctrlr_cmd_connect(&req);
711 	poll_threads();
712 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
713 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
714 	CU_ASSERT(qpair.ctrlr != NULL);
715 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
716 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
717 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
718 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
719 	free(qpair.ctrlr);
720 	qpair.ctrlr = NULL;
721 
722 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
723 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
724 	 */
725 	cmd.connect_cmd.kato = 0;
726 	memset(&rsp, 0, sizeof(rsp));
727 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
728 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
729 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
730 	sgroups[subsystem.id].mgmt_io_outstanding++;
731 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
732 	rc = nvmf_ctrlr_cmd_connect(&req);
733 	poll_threads();
734 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
735 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
736 	CU_ASSERT(qpair.ctrlr != NULL);
737 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
738 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
739 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
740 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
741 	free(qpair.ctrlr);
742 	qpair.ctrlr = NULL;
743 	cmd.connect_cmd.qid = 1;
744 	cmd.connect_cmd.kato = 120000;
745 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
746 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, false);
747 
748 	/* I/O connect to disabled controller */
749 	memset(&rsp, 0, sizeof(rsp));
750 	ctrlr.vcprop.cc.bits.en = 0;
751 	sgroups[subsystem.id].mgmt_io_outstanding++;
752 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
753 	rc = nvmf_ctrlr_cmd_connect(&req);
754 	poll_threads();
755 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
756 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
757 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
758 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
759 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
760 	CU_ASSERT(qpair.ctrlr == NULL);
761 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
762 	ctrlr.vcprop.cc.bits.en = 1;
763 
764 	/* I/O connect with invalid IOSQES */
765 	memset(&rsp, 0, sizeof(rsp));
766 	ctrlr.vcprop.cc.bits.iosqes = 3;
767 	sgroups[subsystem.id].mgmt_io_outstanding++;
768 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
769 	rc = nvmf_ctrlr_cmd_connect(&req);
770 	poll_threads();
771 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
772 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
773 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
774 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
775 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
776 	CU_ASSERT(qpair.ctrlr == NULL);
777 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
778 	ctrlr.vcprop.cc.bits.iosqes = 6;
779 
780 	/* I/O connect with invalid IOCQES */
781 	memset(&rsp, 0, sizeof(rsp));
782 	ctrlr.vcprop.cc.bits.iocqes = 3;
783 	sgroups[subsystem.id].mgmt_io_outstanding++;
784 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
785 	rc = nvmf_ctrlr_cmd_connect(&req);
786 	poll_threads();
787 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
788 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
789 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
790 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
791 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
792 	CU_ASSERT(qpair.ctrlr == NULL);
793 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
794 	ctrlr.vcprop.cc.bits.iocqes = 4;
795 
796 	/* I/O connect with qid that is too large */
797 	memset(&rsp, 0, sizeof(rsp));
798 	cmd.connect_cmd.qid = 3;
799 	sgroups[subsystem.id].mgmt_io_outstanding++;
800 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
801 	rc = nvmf_ctrlr_cmd_connect(&req);
802 	poll_threads();
803 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
804 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
805 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
806 	CU_ASSERT(qpair.ctrlr == NULL);
807 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
808 
809 	/* I/O connect with duplicate queue ID */
810 	memset(&rsp, 0, sizeof(rsp));
811 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
812 	cmd.connect_cmd.qid = 1;
813 	sgroups[subsystem.id].mgmt_io_outstanding++;
814 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
815 	rc = nvmf_ctrlr_cmd_connect(&req);
816 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
817 	poll_threads();
818 	/* First time, it will detect duplicate QID and schedule a retry.  So for
819 	 * now we should expect the response to still be all zeroes.
820 	 */
821 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
822 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
823 
824 	/* Now advance the clock, so that the retry poller executes. */
825 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
826 	poll_threads();
827 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
828 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
829 	CU_ASSERT(qpair.ctrlr == NULL);
830 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
831 
832 	/* I/O connect with temporarily duplicate queue ID. This covers race
833 	 * where qpair_mask bit may not yet be cleared, even though initiator
834 	 * has closed the connection.  See issue #2955. */
835 	memset(&rsp, 0, sizeof(rsp));
836 	sgroups[subsystem.id].mgmt_io_outstanding++;
837 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
838 	rc = nvmf_ctrlr_cmd_connect(&req);
839 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
840 	poll_threads();
841 	/* First time, it will detect duplicate QID and schedule a retry.  So for
842 	 * now we should expect the response to still be all zeroes.
843 	 */
844 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
845 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
846 
847 	/* Now advance the clock, so that the retry poller executes. */
848 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
849 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
850 	poll_threads();
851 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
852 	CU_ASSERT(qpair.ctrlr == &ctrlr);
853 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
854 	qpair.ctrlr = NULL;
855 
856 	/* I/O connect when admin qpair is being destroyed */
857 	admin_qpair.group = NULL;
858 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
859 	memset(&rsp, 0, sizeof(rsp));
860 	sgroups[subsystem.id].mgmt_io_outstanding++;
861 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
862 	rc = nvmf_ctrlr_cmd_connect(&req);
863 	poll_threads();
864 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
865 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
866 	CU_ASSERT(qpair.ctrlr == NULL);
867 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
868 	admin_qpair.group = &group;
869 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
870 
871 	/* Clean up globals */
872 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
873 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
874 
875 	spdk_bit_array_free(&ctrlr.qpair_mask);
876 	free(sgroups);
877 }
878 
879 static void
880 test_get_ns_id_desc_list(void)
881 {
882 	struct spdk_nvmf_subsystem subsystem;
883 	struct spdk_nvmf_qpair qpair;
884 	struct spdk_nvmf_ctrlr ctrlr;
885 	struct spdk_nvmf_request req;
886 	struct spdk_nvmf_ns *ns_ptrs[1];
887 	struct spdk_nvmf_ns ns;
888 	union nvmf_h2c_msg cmd;
889 	union nvmf_c2h_msg rsp;
890 	struct spdk_bdev bdev;
891 	uint8_t buf[4096];
892 
893 	memset(&subsystem, 0, sizeof(subsystem));
894 	ns_ptrs[0] = &ns;
895 	subsystem.ns = ns_ptrs;
896 	subsystem.max_nsid = 1;
897 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
898 
899 	memset(&ns, 0, sizeof(ns));
900 	ns.opts.nsid = 1;
901 	ns.bdev = &bdev;
902 
903 	memset(&qpair, 0, sizeof(qpair));
904 	qpair.ctrlr = &ctrlr;
905 
906 	memset(&ctrlr, 0, sizeof(ctrlr));
907 	ctrlr.subsys = &subsystem;
908 	ctrlr.vcprop.cc.bits.en = 1;
909 	ctrlr.thread = spdk_get_thread();
910 
911 	memset(&req, 0, sizeof(req));
912 	req.qpair = &qpair;
913 	req.cmd = &cmd;
914 	req.rsp = &rsp;
915 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
916 	req.length = sizeof(buf);
917 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
918 
919 	memset(&cmd, 0, sizeof(cmd));
920 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
921 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
922 
923 	/* Invalid NSID */
924 	cmd.nvme_cmd.nsid = 0;
925 	memset(&rsp, 0, sizeof(rsp));
926 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
927 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
928 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
929 
930 	/* Valid NSID, but ns has no IDs defined */
931 	cmd.nvme_cmd.nsid = 1;
932 	memset(&rsp, 0, sizeof(rsp));
933 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
934 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
935 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
936 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
937 
938 	/* Valid NSID, only EUI64 defined */
939 	ns.opts.eui64[0] = 0x11;
940 	ns.opts.eui64[7] = 0xFF;
941 	memset(&rsp, 0, sizeof(rsp));
942 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
943 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
944 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
945 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
946 	CU_ASSERT(buf[1] == 8);
947 	CU_ASSERT(buf[4] == 0x11);
948 	CU_ASSERT(buf[11] == 0xFF);
949 	CU_ASSERT(buf[13] == 0);
950 
951 	/* Valid NSID, only NGUID defined */
952 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
953 	ns.opts.nguid[0] = 0x22;
954 	ns.opts.nguid[15] = 0xEE;
955 	memset(&rsp, 0, sizeof(rsp));
956 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
957 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
958 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
959 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
960 	CU_ASSERT(buf[1] == 16);
961 	CU_ASSERT(buf[4] == 0x22);
962 	CU_ASSERT(buf[19] == 0xEE);
963 	CU_ASSERT(buf[21] == 0);
964 
965 	/* Valid NSID, both EUI64 and NGUID defined */
966 	ns.opts.eui64[0] = 0x11;
967 	ns.opts.eui64[7] = 0xFF;
968 	ns.opts.nguid[0] = 0x22;
969 	ns.opts.nguid[15] = 0xEE;
970 	memset(&rsp, 0, sizeof(rsp));
971 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
972 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
973 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
974 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
975 	CU_ASSERT(buf[1] == 8);
976 	CU_ASSERT(buf[4] == 0x11);
977 	CU_ASSERT(buf[11] == 0xFF);
978 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
979 	CU_ASSERT(buf[13] == 16);
980 	CU_ASSERT(buf[16] == 0x22);
981 	CU_ASSERT(buf[31] == 0xEE);
982 	CU_ASSERT(buf[33] == 0);
983 
984 	/* Valid NSID, EUI64, NGUID, and UUID defined */
985 	ns.opts.eui64[0] = 0x11;
986 	ns.opts.eui64[7] = 0xFF;
987 	ns.opts.nguid[0] = 0x22;
988 	ns.opts.nguid[15] = 0xEE;
989 	ns.opts.uuid.u.raw[0] = 0x33;
990 	ns.opts.uuid.u.raw[15] = 0xDD;
991 	memset(&rsp, 0, sizeof(rsp));
992 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
993 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
994 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
995 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
996 	CU_ASSERT(buf[1] == 8);
997 	CU_ASSERT(buf[4] == 0x11);
998 	CU_ASSERT(buf[11] == 0xFF);
999 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
1000 	CU_ASSERT(buf[13] == 16);
1001 	CU_ASSERT(buf[16] == 0x22);
1002 	CU_ASSERT(buf[31] == 0xEE);
1003 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
1004 	CU_ASSERT(buf[33] == 16);
1005 	CU_ASSERT(buf[36] == 0x33);
1006 	CU_ASSERT(buf[51] == 0xDD);
1007 	CU_ASSERT(buf[53] == 0);
1008 }
1009 
1010 static void
1011 test_identify_ns(void)
1012 {
1013 	struct spdk_nvmf_subsystem subsystem = {};
1014 	struct spdk_nvmf_transport transport = {};
1015 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1016 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1017 	struct spdk_nvme_cmd cmd = {};
1018 	struct spdk_nvme_cpl rsp = {};
1019 	struct spdk_nvme_ns_data nsdata = {};
1020 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
1021 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
1022 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1023 
1024 	subsystem.ns = ns_arr;
1025 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1026 
1027 	/* Invalid NSID 0 */
1028 	cmd.nsid = 0;
1029 	memset(&nsdata, 0, sizeof(nsdata));
1030 	memset(&rsp, 0, sizeof(rsp));
1031 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1032 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1033 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1034 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1035 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1036 
1037 	/* Valid NSID 1 */
1038 	cmd.nsid = 1;
1039 	memset(&nsdata, 0, sizeof(nsdata));
1040 	memset(&rsp, 0, sizeof(rsp));
1041 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1042 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1043 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1044 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1045 	CU_ASSERT(nsdata.nsze == 1234);
1046 
1047 	/* Valid but inactive NSID 2 */
1048 	cmd.nsid = 2;
1049 	memset(&nsdata, 0, sizeof(nsdata));
1050 	memset(&rsp, 0, sizeof(rsp));
1051 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1052 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1053 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1054 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1055 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1056 
1057 	/* Valid NSID 3 */
1058 	cmd.nsid = 3;
1059 	memset(&nsdata, 0, sizeof(nsdata));
1060 	memset(&rsp, 0, sizeof(rsp));
1061 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1062 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1063 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1064 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1065 	CU_ASSERT(nsdata.nsze == 5678);
1066 
1067 	/* Invalid NSID 4 */
1068 	cmd.nsid = 4;
1069 	memset(&nsdata, 0, sizeof(nsdata));
1070 	memset(&rsp, 0, sizeof(rsp));
1071 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1072 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1073 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1074 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1075 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1076 
1077 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1078 	cmd.nsid = 0xFFFFFFFF;
1079 	memset(&nsdata, 0, sizeof(nsdata));
1080 	memset(&rsp, 0, sizeof(rsp));
1081 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1082 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1083 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1084 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1085 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1086 }
1087 
1088 static void
1089 test_identify_ns_iocs_specific(void)
1090 {
1091 	struct spdk_nvmf_subsystem subsystem = {};
1092 	struct spdk_nvmf_transport transport = {};
1093 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport };
1094 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1095 	struct spdk_nvme_cmd cmd = {};
1096 	struct spdk_nvme_cpl rsp = {};
1097 	struct spdk_nvme_zns_ns_data nsdata = {};
1098 	struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}};
1099 	struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}};
1100 	struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]};
1101 
1102 	subsystem.ns = ns_arr;
1103 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1104 
1105 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1106 
1107 	/* Invalid ZNS NSID 0 */
1108 	cmd.nsid = 0;
1109 	memset(&nsdata, 0xFF, sizeof(nsdata));
1110 	memset(&rsp, 0, sizeof(rsp));
1111 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1112 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1113 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1114 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1115 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1116 
1117 	/* Valid ZNS NSID 1 */
1118 	cmd.nsid = 1;
1119 	memset(&nsdata, 0xFF, sizeof(nsdata));
1120 	memset(&rsp, 0, sizeof(rsp));
1121 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1122 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1123 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1124 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1125 	CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1);
1126 	CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1);
1127 	CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1);
1128 	CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE);
1129 	nsdata.ozcs.read_across_zone_boundaries = 0;
1130 	nsdata.mar = 0;
1131 	nsdata.mor = 0;
1132 	nsdata.lbafe[0].zsze = 0;
1133 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1134 
1135 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1136 
1137 	/* Valid NVM NSID 2 */
1138 	cmd.nsid = 2;
1139 	memset(&nsdata, 0xFF, sizeof(nsdata));
1140 	memset(&rsp, 0, sizeof(rsp));
1141 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1142 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1143 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1144 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1145 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1146 
1147 	/* Invalid NVM NSID 3 */
1148 	cmd.nsid = 0;
1149 	memset(&nsdata, 0xFF, sizeof(nsdata));
1150 	memset(&rsp, 0, sizeof(rsp));
1151 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1152 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1153 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1154 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1155 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1156 }
1157 
1158 static void
1159 test_set_get_features(void)
1160 {
1161 	struct spdk_nvmf_subsystem subsystem = {};
1162 	struct spdk_nvmf_qpair admin_qpair = {};
1163 	enum spdk_nvme_ana_state ana_state[3];
1164 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1165 	struct spdk_nvmf_ctrlr ctrlr = {
1166 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1167 	};
1168 	union nvmf_h2c_msg cmd = {};
1169 	union nvmf_c2h_msg rsp = {};
1170 	struct spdk_nvmf_ns ns[3];
1171 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1172 	struct spdk_nvmf_request req;
1173 	int rc;
1174 
1175 	ns[0].anagrpid = 1;
1176 	ns[2].anagrpid = 3;
1177 	subsystem.ns = ns_arr;
1178 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1179 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1180 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1181 	admin_qpair.ctrlr = &ctrlr;
1182 	req.qpair = &admin_qpair;
1183 	cmd.nvme_cmd.nsid = 1;
1184 	req.cmd = &cmd;
1185 	req.rsp = &rsp;
1186 
1187 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1188 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1189 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1190 	ns[0].ptpl_file = "testcfg";
1191 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1192 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1193 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1194 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1195 	CU_ASSERT(ns[0].ptpl_activated == true);
1196 
1197 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1198 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1199 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1200 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1201 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1202 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1203 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1204 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1205 
1206 
1207 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1208 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1209 	cmd.nvme_cmd.cdw11 = 0x42;
1210 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1211 
1212 	rc = nvmf_ctrlr_get_features(&req);
1213 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1214 
1215 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1216 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1217 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1218 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1219 
1220 	rc = nvmf_ctrlr_get_features(&req);
1221 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1222 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1223 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1224 
1225 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1226 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1227 	cmd.nvme_cmd.cdw11 = 0x42;
1228 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1229 
1230 	rc = nvmf_ctrlr_set_features(&req);
1231 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1232 
1233 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1234 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1235 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1236 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1237 
1238 	rc = nvmf_ctrlr_set_features(&req);
1239 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1240 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1241 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1242 
1243 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1244 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1245 	cmd.nvme_cmd.cdw11 = 0x42;
1246 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1247 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1248 
1249 	rc = nvmf_ctrlr_set_features(&req);
1250 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1251 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1252 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1253 
1254 
1255 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1256 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1257 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1258 
1259 	rc = nvmf_ctrlr_get_features(&req);
1260 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1261 
1262 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1263 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1264 	cmd.nvme_cmd.cdw11 = 0x42;
1265 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1266 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1267 
1268 	rc = nvmf_ctrlr_set_features(&req);
1269 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1270 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1271 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1272 
1273 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1274 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1275 	cmd.nvme_cmd.cdw11 = 0x42;
1276 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1277 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1278 
1279 	rc = nvmf_ctrlr_set_features(&req);
1280 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1281 }
1282 
1283 /*
1284  * Reservation Unit Test Configuration
1285  *       --------             --------    --------
1286  *      | Host A |           | Host B |  | Host C |
1287  *       --------             --------    --------
1288  *      /        \               |           |
1289  *  --------   --------       -------     -------
1290  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1291  *  --------   --------       -------     -------
1292  *    \           \              /           /
1293  *     \           \            /           /
1294  *      \           \          /           /
1295  *      --------------------------------------
1296  *     |            NAMESPACE 1               |
1297  *      --------------------------------------
1298  */
1299 
1300 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1301 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1302 
1303 static void
1304 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1305 {
1306 	/* Host A has two controllers */
1307 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1308 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1309 
1310 	/* Host B has 1 controller */
1311 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1312 
1313 	/* Host C has 1 controller */
1314 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1315 
1316 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1317 	g_ns_info.rtype = rtype;
1318 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1319 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1320 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1321 }
1322 
1323 static void
1324 test_reservation_write_exclusive(void)
1325 {
1326 	struct spdk_nvmf_request req = {};
1327 	union nvmf_h2c_msg cmd = {};
1328 	union nvmf_c2h_msg rsp = {};
1329 	int rc;
1330 
1331 	req.cmd = &cmd;
1332 	req.rsp = &rsp;
1333 
1334 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1335 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1336 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1337 
1338 	/* Test Case: Issue a Read command from Host A and Host B */
1339 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1340 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1341 	SPDK_CU_ASSERT_FATAL(rc == 0);
1342 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1343 	SPDK_CU_ASSERT_FATAL(rc == 0);
1344 
1345 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1346 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1347 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1348 	SPDK_CU_ASSERT_FATAL(rc == 0);
1349 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1350 	SPDK_CU_ASSERT_FATAL(rc < 0);
1351 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1352 
1353 	/* Test Case: Issue a Write command from Host C */
1354 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1355 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1356 	SPDK_CU_ASSERT_FATAL(rc < 0);
1357 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1358 
1359 	/* Test Case: Issue a Read command from Host B */
1360 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1361 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1362 	SPDK_CU_ASSERT_FATAL(rc == 0);
1363 
1364 	/* Unregister Host C */
1365 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1366 
1367 	/* Test Case: Read and Write commands from non-registrant Host C */
1368 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1369 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1370 	SPDK_CU_ASSERT_FATAL(rc < 0);
1371 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1372 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1373 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1374 	SPDK_CU_ASSERT_FATAL(rc == 0);
1375 }
1376 
1377 static void
1378 test_reservation_exclusive_access(void)
1379 {
1380 	struct spdk_nvmf_request req = {};
1381 	union nvmf_h2c_msg cmd = {};
1382 	union nvmf_c2h_msg rsp = {};
1383 	int rc;
1384 
1385 	req.cmd = &cmd;
1386 	req.rsp = &rsp;
1387 
1388 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1389 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1390 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1391 
1392 	/* Test Case: Issue a Read command from Host B */
1393 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1394 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1395 	SPDK_CU_ASSERT_FATAL(rc < 0);
1396 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1397 
1398 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1399 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1400 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1401 	SPDK_CU_ASSERT_FATAL(rc == 0);
1402 }
1403 
1404 static void
1405 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1406 {
1407 	struct spdk_nvmf_request req = {};
1408 	union nvmf_h2c_msg cmd = {};
1409 	union nvmf_c2h_msg rsp = {};
1410 	int rc;
1411 
1412 	req.cmd = &cmd;
1413 	req.rsp = &rsp;
1414 
1415 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1416 	ut_reservation_init(rtype);
1417 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1418 
1419 	/* Test Case: Issue a Read command from Host A and Host C */
1420 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1421 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1422 	SPDK_CU_ASSERT_FATAL(rc == 0);
1423 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1424 	SPDK_CU_ASSERT_FATAL(rc == 0);
1425 
1426 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1427 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1428 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1429 	SPDK_CU_ASSERT_FATAL(rc == 0);
1430 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1431 	SPDK_CU_ASSERT_FATAL(rc == 0);
1432 
1433 	/* Unregister Host C */
1434 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1435 
1436 	/* Test Case: Read and Write commands from non-registrant Host C */
1437 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1438 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1439 	SPDK_CU_ASSERT_FATAL(rc == 0);
1440 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1441 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1442 	SPDK_CU_ASSERT_FATAL(rc < 0);
1443 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1444 }
1445 
1446 static void
1447 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1448 {
1449 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1450 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1451 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1452 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1453 }
1454 
1455 static void
1456 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1457 {
1458 	struct spdk_nvmf_request req = {};
1459 	union nvmf_h2c_msg cmd = {};
1460 	union nvmf_c2h_msg rsp = {};
1461 	int rc;
1462 
1463 	req.cmd = &cmd;
1464 	req.rsp = &rsp;
1465 
1466 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1467 	ut_reservation_init(rtype);
1468 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1469 
1470 	/* Test Case: Issue a Write command from Host B */
1471 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1472 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1473 	SPDK_CU_ASSERT_FATAL(rc == 0);
1474 
1475 	/* Unregister Host B */
1476 	spdk_uuid_set_null(&g_ns_info.reg_hostid[1]);
1477 
1478 	/* Test Case: Issue a Read command from Host B */
1479 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1480 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1481 	SPDK_CU_ASSERT_FATAL(rc < 0);
1482 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1483 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1484 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1485 	SPDK_CU_ASSERT_FATAL(rc < 0);
1486 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1487 }
1488 
1489 static void
1490 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1491 {
1492 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1493 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1494 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1495 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1496 }
1497 
1498 static void
1499 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1500 {
1501 	STAILQ_INIT(&ctrlr->async_events);
1502 }
1503 
1504 static void
1505 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1506 {
1507 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1508 
1509 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1510 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1511 		free(event);
1512 	}
1513 }
1514 
1515 static int
1516 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1517 {
1518 	int num = 0;
1519 	struct spdk_nvmf_async_event_completion *event;
1520 
1521 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1522 		num++;
1523 	}
1524 	return num;
1525 }
1526 
1527 static void
1528 test_reservation_notification_log_page(void)
1529 {
1530 	struct spdk_nvmf_ctrlr ctrlr;
1531 	struct spdk_nvmf_qpair qpair;
1532 	struct spdk_nvmf_ns ns;
1533 	struct spdk_nvmf_request req = {};
1534 	union nvmf_h2c_msg cmd = {};
1535 	union nvmf_c2h_msg rsp = {};
1536 	union spdk_nvme_async_event_completion event = {};
1537 	struct spdk_nvme_reservation_notification_log logs[3];
1538 	struct iovec iov;
1539 
1540 	memset(&ctrlr, 0, sizeof(ctrlr));
1541 	ctrlr.thread = spdk_get_thread();
1542 	TAILQ_INIT(&ctrlr.log_head);
1543 	init_pending_async_events(&ctrlr);
1544 	ns.nsid = 1;
1545 
1546 	/* Test Case: Mask all the reservation notifications */
1547 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1548 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1549 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1550 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1551 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1552 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1553 					  SPDK_NVME_RESERVATION_RELEASED);
1554 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1555 					  SPDK_NVME_RESERVATION_PREEMPTED);
1556 	poll_threads();
1557 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1558 
1559 	/* Test Case: Unmask all the reservation notifications,
1560 	 * 3 log pages are generated, and AER was triggered.
1561 	 */
1562 	ns.mask = 0;
1563 	ctrlr.num_avail_log_pages = 0;
1564 	req.cmd = &cmd;
1565 	req.rsp = &rsp;
1566 	ctrlr.aer_req[0] = &req;
1567 	ctrlr.nr_aer_reqs = 1;
1568 	req.qpair = &qpair;
1569 	TAILQ_INIT(&qpair.outstanding);
1570 	qpair.ctrlr = NULL;
1571 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1572 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1573 
1574 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1575 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1576 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1577 					  SPDK_NVME_RESERVATION_RELEASED);
1578 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1579 					  SPDK_NVME_RESERVATION_PREEMPTED);
1580 	poll_threads();
1581 	event.raw = rsp.nvme_cpl.cdw0;
1582 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1583 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1584 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1585 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1586 
1587 	/* Test Case: Get Log Page to clear the log pages */
1588 	iov.iov_base = &logs[0];
1589 	iov.iov_len = sizeof(logs);
1590 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1591 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1592 
1593 	cleanup_pending_async_events(&ctrlr);
1594 }
1595 
1596 static void
1597 test_get_dif_ctx(void)
1598 {
1599 	struct spdk_nvmf_subsystem subsystem = {};
1600 	struct spdk_nvmf_request req = {};
1601 	struct spdk_nvmf_qpair qpair = {};
1602 	struct spdk_nvmf_ctrlr ctrlr = {};
1603 	struct spdk_nvmf_ns ns = {};
1604 	struct spdk_nvmf_ns *_ns = NULL;
1605 	struct spdk_bdev bdev = {};
1606 	union nvmf_h2c_msg cmd = {};
1607 	struct spdk_dif_ctx dif_ctx = {};
1608 	bool ret;
1609 
1610 	ctrlr.subsys = &subsystem;
1611 
1612 	qpair.ctrlr = &ctrlr;
1613 
1614 	req.qpair = &qpair;
1615 	req.cmd = &cmd;
1616 
1617 	ns.bdev = &bdev;
1618 
1619 	ctrlr.dif_insert_or_strip = false;
1620 
1621 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1622 	CU_ASSERT(ret == false);
1623 
1624 	ctrlr.dif_insert_or_strip = true;
1625 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1626 
1627 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1628 	CU_ASSERT(ret == false);
1629 
1630 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1631 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1632 
1633 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1634 	CU_ASSERT(ret == false);
1635 
1636 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1637 
1638 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1639 	CU_ASSERT(ret == false);
1640 
1641 	qpair.qid = 1;
1642 
1643 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1644 	CU_ASSERT(ret == false);
1645 
1646 	cmd.nvme_cmd.nsid = 1;
1647 
1648 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1649 	CU_ASSERT(ret == false);
1650 
1651 	subsystem.max_nsid = 1;
1652 	subsystem.ns = &_ns;
1653 	subsystem.ns[0] = &ns;
1654 
1655 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1656 	CU_ASSERT(ret == false);
1657 
1658 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1659 
1660 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1661 	CU_ASSERT(ret == true);
1662 }
1663 
1664 static void
1665 test_identify_ctrlr(void)
1666 {
1667 	struct spdk_nvmf_tgt tgt = {};
1668 	struct spdk_nvmf_subsystem subsystem = {
1669 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1670 		.tgt = &tgt,
1671 	};
1672 	struct spdk_nvmf_transport_ops tops = {};
1673 	struct spdk_nvmf_transport transport = {
1674 		.ops = &tops,
1675 		.opts = {
1676 			.in_capsule_data_size = 4096,
1677 		},
1678 	};
1679 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1680 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1681 	struct spdk_nvme_ctrlr_data cdata = {};
1682 	uint32_t expected_ioccsz;
1683 
1684 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1685 
1686 	/* Check ioccsz, TCP transport */
1687 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1688 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1689 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1690 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1691 
1692 	/* Check ioccsz, RDMA transport */
1693 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1694 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1695 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1696 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1697 
1698 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1699 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1700 	ctrlr.dif_insert_or_strip = true;
1701 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1702 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1703 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1704 }
1705 
1706 static void
1707 test_identify_ctrlr_iocs_specific(void)
1708 {
1709 	struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 };
1710 	struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 };
1711 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop };
1712 	struct spdk_nvme_cmd cmd = {};
1713 	struct spdk_nvme_cpl rsp = {};
1714 	struct spdk_nvme_zns_ctrlr_data ctrlr_data = {};
1715 	struct spdk_nvme_nvm_ctrlr_data cdata_nvm = {};
1716 
1717 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1718 
1719 	/* ZNS max_zone_append_size_kib no limit */
1720 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1721 	memset(&rsp, 0, sizeof(rsp));
1722 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1723 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1724 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1725 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1726 	CU_ASSERT(ctrlr_data.zasl == 0);
1727 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1728 
1729 	/* ZNS max_zone_append_size_kib = 4096 */
1730 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1731 	memset(&rsp, 0, sizeof(rsp));
1732 	subsystem.max_zone_append_size_kib = 4096;
1733 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1734 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1735 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1736 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1737 	CU_ASSERT(ctrlr_data.zasl == 0);
1738 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1739 
1740 	/* ZNS max_zone_append_size_kib = 60000 */
1741 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1742 	memset(&rsp, 0, sizeof(rsp));
1743 	subsystem.max_zone_append_size_kib = 60000;
1744 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1745 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1746 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1747 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1748 	CU_ASSERT(ctrlr_data.zasl == 3);
1749 	ctrlr_data.zasl = 0;
1750 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1751 
1752 	/* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */
1753 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1754 	memset(&rsp, 0, sizeof(rsp));
1755 	ctrlr.vcprop.cap.bits.mpsmin = 2;
1756 	subsystem.max_zone_append_size_kib = 60000;
1757 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1758 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1759 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1760 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1761 	CU_ASSERT(ctrlr_data.zasl == 1);
1762 	ctrlr_data.zasl = 0;
1763 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1764 	ctrlr.vcprop.cap.bits.mpsmin = 0;
1765 
1766 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1767 
1768 	/* NVM max_discard_size_kib = 1024;
1769 	 * max_write_zeroes_size_kib = 1024;
1770 	 * mpsmin = 0;
1771 	 */
1772 	memset(&cdata_nvm, 0xFF, sizeof(cdata_nvm));
1773 	memset(&rsp, 0, sizeof(rsp));
1774 	subsystem.max_discard_size_kib = (uint64_t)1024;
1775 	subsystem.max_write_zeroes_size_kib = (uint64_t)1024;
1776 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1777 			&cdata_nvm, sizeof(cdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1778 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1779 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1780 	CU_ASSERT(cdata_nvm.wzsl == 8);
1781 	CU_ASSERT(cdata_nvm.dmrsl == 2048);
1782 	CU_ASSERT(cdata_nvm.dmrl == 1);
1783 }
1784 
1785 static int
1786 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1787 {
1788 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1789 
1790 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1791 };
1792 
1793 static void
1794 test_custom_admin_cmd(void)
1795 {
1796 	struct spdk_nvmf_subsystem subsystem;
1797 	struct spdk_nvmf_qpair qpair;
1798 	struct spdk_nvmf_ctrlr ctrlr;
1799 	struct spdk_nvmf_request req;
1800 	struct spdk_nvmf_ns *ns_ptrs[1];
1801 	struct spdk_nvmf_ns ns;
1802 	union nvmf_h2c_msg cmd;
1803 	union nvmf_c2h_msg rsp;
1804 	struct spdk_bdev bdev;
1805 	uint8_t buf[4096];
1806 	int rc;
1807 
1808 	memset(&subsystem, 0, sizeof(subsystem));
1809 	ns_ptrs[0] = &ns;
1810 	subsystem.ns = ns_ptrs;
1811 	subsystem.max_nsid = 1;
1812 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1813 
1814 	memset(&ns, 0, sizeof(ns));
1815 	ns.opts.nsid = 1;
1816 	ns.bdev = &bdev;
1817 
1818 	memset(&qpair, 0, sizeof(qpair));
1819 	qpair.ctrlr = &ctrlr;
1820 
1821 	memset(&ctrlr, 0, sizeof(ctrlr));
1822 	ctrlr.subsys = &subsystem;
1823 	ctrlr.vcprop.cc.bits.en = 1;
1824 	ctrlr.thread = spdk_get_thread();
1825 
1826 	memset(&req, 0, sizeof(req));
1827 	req.qpair = &qpair;
1828 	req.cmd = &cmd;
1829 	req.rsp = &rsp;
1830 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1831 	req.length = sizeof(buf);
1832 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
1833 
1834 	memset(&cmd, 0, sizeof(cmd));
1835 	cmd.nvme_cmd.opc = 0xc1;
1836 	cmd.nvme_cmd.nsid = 0;
1837 	memset(&rsp, 0, sizeof(rsp));
1838 
1839 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1840 
1841 	/* Ensure that our hdlr is being called */
1842 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1843 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1844 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1845 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1846 }
1847 
1848 static void
1849 test_fused_compare_and_write(void)
1850 {
1851 	struct spdk_nvmf_request req = {};
1852 	struct spdk_nvmf_qpair qpair = {};
1853 	struct spdk_nvme_cmd cmd = {};
1854 	union nvmf_c2h_msg rsp = {};
1855 	struct spdk_nvmf_ctrlr ctrlr = {};
1856 	struct spdk_nvmf_subsystem subsystem = {};
1857 	struct spdk_nvmf_ns ns = {};
1858 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1859 	enum spdk_nvme_ana_state ana_state[1];
1860 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1861 	struct spdk_bdev bdev = {};
1862 
1863 	struct spdk_nvmf_poll_group group = {};
1864 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1865 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1866 	struct spdk_io_channel io_ch = {};
1867 
1868 	ns.bdev = &bdev;
1869 	ns.anagrpid = 1;
1870 
1871 	subsystem.id = 0;
1872 	subsystem.max_nsid = 1;
1873 	subsys_ns[0] = &ns;
1874 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1875 
1876 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1877 
1878 	/* Enable controller */
1879 	ctrlr.vcprop.cc.bits.en = 1;
1880 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1881 	ctrlr.listener = &listener;
1882 
1883 	group.num_sgroups = 1;
1884 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1885 	sgroups.num_ns = 1;
1886 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1887 	ns_info.channel = &io_ch;
1888 	sgroups.ns_info = &ns_info;
1889 	TAILQ_INIT(&sgroups.queued);
1890 	group.sgroups = &sgroups;
1891 	TAILQ_INIT(&qpair.outstanding);
1892 
1893 	qpair.ctrlr = &ctrlr;
1894 	qpair.group = &group;
1895 	qpair.qid = 1;
1896 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1897 
1898 	cmd.nsid = 1;
1899 
1900 	req.qpair = &qpair;
1901 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1902 	req.rsp = &rsp;
1903 
1904 	/* SUCCESS/SUCCESS */
1905 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1906 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1907 
1908 	spdk_nvmf_request_exec(&req);
1909 	CU_ASSERT(qpair.first_fused_req != NULL);
1910 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1911 
1912 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1913 	cmd.opc = SPDK_NVME_OPC_WRITE;
1914 
1915 	spdk_nvmf_request_exec(&req);
1916 	CU_ASSERT(qpair.first_fused_req == NULL);
1917 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1918 
1919 	/* Wrong sequence */
1920 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1921 	cmd.opc = SPDK_NVME_OPC_WRITE;
1922 
1923 	spdk_nvmf_request_exec(&req);
1924 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1925 	CU_ASSERT(qpair.first_fused_req == NULL);
1926 
1927 	/* Write as FUSE_FIRST (Wrong op code) */
1928 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1929 	cmd.opc = SPDK_NVME_OPC_WRITE;
1930 
1931 	spdk_nvmf_request_exec(&req);
1932 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1933 	CU_ASSERT(qpair.first_fused_req == NULL);
1934 
1935 	/* Compare as FUSE_SECOND (Wrong op code) */
1936 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1937 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1938 
1939 	spdk_nvmf_request_exec(&req);
1940 	CU_ASSERT(qpair.first_fused_req != NULL);
1941 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1942 
1943 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1944 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1945 
1946 	spdk_nvmf_request_exec(&req);
1947 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1948 	CU_ASSERT(qpair.first_fused_req == NULL);
1949 }
1950 
1951 static void
1952 test_multi_async_event_reqs(void)
1953 {
1954 	struct spdk_nvmf_subsystem subsystem = {};
1955 	struct spdk_nvmf_qpair qpair = {};
1956 	struct spdk_nvmf_ctrlr ctrlr = {};
1957 	struct spdk_nvmf_request req[5] = {};
1958 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1959 	struct spdk_nvmf_ns ns = {};
1960 	union nvmf_h2c_msg cmd[5] = {};
1961 	union nvmf_c2h_msg rsp[5] = {};
1962 
1963 	struct spdk_nvmf_poll_group group = {};
1964 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1965 
1966 	int i;
1967 
1968 	ns_ptrs[0] = &ns;
1969 	subsystem.ns = ns_ptrs;
1970 	subsystem.max_nsid = 1;
1971 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1972 
1973 	ns.opts.nsid = 1;
1974 	group.sgroups = &sgroups;
1975 
1976 	qpair.ctrlr = &ctrlr;
1977 	qpair.group = &group;
1978 	TAILQ_INIT(&qpair.outstanding);
1979 
1980 	ctrlr.subsys = &subsystem;
1981 	ctrlr.vcprop.cc.bits.en = 1;
1982 	ctrlr.thread = spdk_get_thread();
1983 
1984 	for (i = 0; i < 5; i++) {
1985 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1986 		cmd[i].nvme_cmd.nsid = 1;
1987 		cmd[i].nvme_cmd.cid = i;
1988 
1989 		req[i].qpair = &qpair;
1990 		req[i].cmd = &cmd[i];
1991 		req[i].rsp = &rsp[i];
1992 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1993 	}
1994 
1995 	/* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */
1996 	sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS;
1997 	for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
1998 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1999 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
2000 	}
2001 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2002 
2003 	/* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */
2004 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2005 	CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS);
2006 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
2007 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
2008 
2009 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
2010 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
2011 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2012 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2013 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
2014 
2015 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
2016 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2017 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2018 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
2019 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
2020 
2021 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
2022 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
2023 }
2024 
2025 static void
2026 test_get_ana_log_page_one_ns_per_anagrp(void)
2027 {
2028 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
2029 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
2030 	uint32_t ana_group[3];
2031 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
2032 	struct spdk_nvmf_ctrlr ctrlr = {};
2033 	enum spdk_nvme_ana_state ana_state[3];
2034 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2035 	struct spdk_nvmf_ns ns[3];
2036 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
2037 	uint64_t offset;
2038 	uint32_t length;
2039 	int i;
2040 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2041 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2042 	struct iovec iov, iovs[2];
2043 	struct spdk_nvme_ana_page *ana_hdr;
2044 	char _ana_desc[UT_ANA_DESC_SIZE];
2045 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2046 
2047 	subsystem.ns = ns_arr;
2048 	subsystem.max_nsid = 3;
2049 	for (i = 0; i < 3; i++) {
2050 		subsystem.ana_group[i] = 1;
2051 	}
2052 	ctrlr.subsys = &subsystem;
2053 	ctrlr.listener = &listener;
2054 
2055 	for (i = 0; i < 3; i++) {
2056 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2057 	}
2058 
2059 	for (i = 0; i < 3; i++) {
2060 		ns_arr[i]->nsid = i + 1;
2061 		ns_arr[i]->anagrpid = i + 1;
2062 	}
2063 
2064 	/* create expected page */
2065 	ana_hdr = (void *)&expected_page[0];
2066 	ana_hdr->num_ana_group_desc = 3;
2067 	ana_hdr->change_count = 0;
2068 
2069 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2070 	ana_desc = (void *)_ana_desc;
2071 	offset = sizeof(struct spdk_nvme_ana_page);
2072 
2073 	for (i = 0; i < 3; i++) {
2074 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
2075 		ana_desc->ana_group_id = ns_arr[i]->nsid;
2076 		ana_desc->num_of_nsid = 1;
2077 		ana_desc->change_count = 0;
2078 		ana_desc->ana_state = ctrlr.listener->ana_state[i];
2079 		ana_desc->nsid[0] = ns_arr[i]->nsid;
2080 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
2081 		offset += UT_ANA_DESC_SIZE;
2082 	}
2083 
2084 	/* read entire actual log page */
2085 	offset = 0;
2086 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2087 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2088 		iov.iov_base = &actual_page[offset];
2089 		iov.iov_len = length;
2090 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2091 		offset += length;
2092 	}
2093 
2094 	/* compare expected page and actual page */
2095 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2096 
2097 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2098 	offset = 0;
2099 	iovs[0].iov_base = &actual_page[offset];
2100 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2101 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2102 	iovs[1].iov_base = &actual_page[offset];
2103 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
2104 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2105 
2106 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2107 
2108 #undef UT_ANA_DESC_SIZE
2109 #undef UT_ANA_LOG_PAGE_SIZE
2110 }
2111 
2112 static void
2113 test_get_ana_log_page_multi_ns_per_anagrp(void)
2114 {
2115 #define UT_ANA_LOG_PAGE_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
2116 				 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2117 				 sizeof(uint32_t) * 5)
2118 	struct spdk_nvmf_ns ns[5];
2119 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
2120 	uint32_t ana_group[5] = {0};
2121 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
2122 	enum spdk_nvme_ana_state ana_state[5];
2123 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
2124 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
2125 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2126 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2127 	struct iovec iov, iovs[2];
2128 	struct spdk_nvme_ana_page *ana_hdr;
2129 	char _ana_desc[UT_ANA_LOG_PAGE_SIZE];
2130 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2131 	uint64_t offset;
2132 	uint32_t length;
2133 	int i;
2134 
2135 	subsystem.max_nsid = 5;
2136 	subsystem.ana_group[1] = 3;
2137 	subsystem.ana_group[2] = 2;
2138 	for (i = 0; i < 5; i++) {
2139 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2140 	}
2141 
2142 	for (i = 0; i < 5; i++) {
2143 		ns_arr[i]->nsid = i + 1;
2144 	}
2145 	ns_arr[0]->anagrpid = 2;
2146 	ns_arr[1]->anagrpid = 3;
2147 	ns_arr[2]->anagrpid = 2;
2148 	ns_arr[3]->anagrpid = 3;
2149 	ns_arr[4]->anagrpid = 2;
2150 
2151 	/* create expected page */
2152 	ana_hdr = (void *)&expected_page[0];
2153 	ana_hdr->num_ana_group_desc = 2;
2154 	ana_hdr->change_count = 0;
2155 
2156 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2157 	ana_desc = (void *)_ana_desc;
2158 	offset = sizeof(struct spdk_nvme_ana_page);
2159 
2160 	memset(_ana_desc, 0, sizeof(_ana_desc));
2161 	ana_desc->ana_group_id = 2;
2162 	ana_desc->num_of_nsid = 3;
2163 	ana_desc->change_count = 0;
2164 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2165 	ana_desc->nsid[0] = 1;
2166 	ana_desc->nsid[1] = 3;
2167 	ana_desc->nsid[2] = 5;
2168 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2169 	       sizeof(uint32_t) * 3);
2170 	offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3;
2171 
2172 	memset(_ana_desc, 0, sizeof(_ana_desc));
2173 	ana_desc->ana_group_id = 3;
2174 	ana_desc->num_of_nsid = 2;
2175 	ana_desc->change_count = 0;
2176 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2177 	ana_desc->nsid[0] = 2;
2178 	ana_desc->nsid[1] = 4;
2179 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2180 	       sizeof(uint32_t) * 2);
2181 
2182 	/* read entire actual log page, and compare expected page and actual page. */
2183 	offset = 0;
2184 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2185 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2186 		iov.iov_base = &actual_page[offset];
2187 		iov.iov_len = length;
2188 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2189 		offset += length;
2190 	}
2191 
2192 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2193 
2194 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2195 	offset = 0;
2196 	iovs[0].iov_base = &actual_page[offset];
2197 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2198 	offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2199 	iovs[1].iov_base = &actual_page[offset];
2200 	iovs[1].iov_len = sizeof(uint32_t) * 5;
2201 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2202 
2203 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2204 
2205 #undef UT_ANA_LOG_PAGE_SIZE
2206 }
2207 static void
2208 test_multi_async_events(void)
2209 {
2210 	struct spdk_nvmf_subsystem subsystem = {};
2211 	struct spdk_nvmf_qpair qpair = {};
2212 	struct spdk_nvmf_ctrlr ctrlr = {};
2213 	struct spdk_nvmf_request req[4] = {};
2214 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2215 	struct spdk_nvmf_ns ns = {};
2216 	union nvmf_h2c_msg cmd[4] = {};
2217 	union nvmf_c2h_msg rsp[4] = {};
2218 	union spdk_nvme_async_event_completion event = {};
2219 	struct spdk_nvmf_poll_group group = {};
2220 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2221 	int i;
2222 
2223 	ns_ptrs[0] = &ns;
2224 	subsystem.ns = ns_ptrs;
2225 	subsystem.max_nsid = 1;
2226 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2227 
2228 	ns.opts.nsid = 1;
2229 	group.sgroups = &sgroups;
2230 
2231 	qpair.ctrlr = &ctrlr;
2232 	qpair.group = &group;
2233 	TAILQ_INIT(&qpair.outstanding);
2234 
2235 	ctrlr.subsys = &subsystem;
2236 	ctrlr.vcprop.cc.bits.en = 1;
2237 	ctrlr.thread = spdk_get_thread();
2238 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2239 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2240 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2241 	init_pending_async_events(&ctrlr);
2242 
2243 	/* Target queue pending events when there is no outstanding AER request */
2244 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2245 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2246 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2247 
2248 	for (i = 0; i < 4; i++) {
2249 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2250 		cmd[i].nvme_cmd.nsid = 1;
2251 		cmd[i].nvme_cmd.cid = i;
2252 
2253 		req[i].qpair = &qpair;
2254 		req[i].cmd = &cmd[i];
2255 		req[i].rsp = &rsp[i];
2256 
2257 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2258 
2259 		sgroups.mgmt_io_outstanding = 1;
2260 		if (i < 3) {
2261 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2262 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2263 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2264 		} else {
2265 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2266 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2267 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2268 		}
2269 	}
2270 
2271 	event.raw = rsp[0].nvme_cpl.cdw0;
2272 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2273 	event.raw = rsp[1].nvme_cpl.cdw0;
2274 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2275 	event.raw = rsp[2].nvme_cpl.cdw0;
2276 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2277 
2278 	cleanup_pending_async_events(&ctrlr);
2279 }
2280 
2281 static void
2282 test_rae(void)
2283 {
2284 	struct spdk_nvmf_subsystem subsystem = {};
2285 	struct spdk_nvmf_qpair qpair = {};
2286 	struct spdk_nvmf_ctrlr ctrlr = {};
2287 	struct spdk_nvmf_request req[3] = {};
2288 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2289 	struct spdk_nvmf_ns ns = {};
2290 	union nvmf_h2c_msg cmd[3] = {};
2291 	union nvmf_c2h_msg rsp[3] = {};
2292 	union spdk_nvme_async_event_completion event = {};
2293 	struct spdk_nvmf_poll_group group = {};
2294 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2295 	int i;
2296 	char data[4096];
2297 
2298 	ns_ptrs[0] = &ns;
2299 	subsystem.ns = ns_ptrs;
2300 	subsystem.max_nsid = 1;
2301 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2302 
2303 	ns.opts.nsid = 1;
2304 	group.sgroups = &sgroups;
2305 
2306 	qpair.ctrlr = &ctrlr;
2307 	qpair.group = &group;
2308 	TAILQ_INIT(&qpair.outstanding);
2309 
2310 	ctrlr.subsys = &subsystem;
2311 	ctrlr.vcprop.cc.bits.en = 1;
2312 	ctrlr.thread = spdk_get_thread();
2313 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2314 	init_pending_async_events(&ctrlr);
2315 
2316 	/* Target queue pending events when there is no outstanding AER request */
2317 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2318 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2319 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2320 	/* only one event will be queued before RAE is clear */
2321 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2322 
2323 	req[0].qpair = &qpair;
2324 	req[0].cmd = &cmd[0];
2325 	req[0].rsp = &rsp[0];
2326 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2327 	cmd[0].nvme_cmd.nsid = 1;
2328 	cmd[0].nvme_cmd.cid = 0;
2329 
2330 	for (i = 1; i < 3; i++) {
2331 		req[i].qpair = &qpair;
2332 		req[i].cmd = &cmd[i];
2333 		req[i].rsp = &rsp[i];
2334 		req[i].length = sizeof(data);
2335 		SPDK_IOV_ONE(req[i].iov, &req[i].iovcnt, &data, req[i].length);
2336 
2337 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2338 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2339 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2340 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2341 			spdk_nvme_bytes_to_numd(req[i].length);
2342 		cmd[i].nvme_cmd.cid = i;
2343 	}
2344 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2345 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2346 
2347 	/* consume the pending event */
2348 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2349 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2350 	event.raw = rsp[0].nvme_cpl.cdw0;
2351 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2352 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2353 
2354 	/* get log with RAE set */
2355 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2356 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2357 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2358 
2359 	/* will not generate new event until RAE is clear */
2360 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2361 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2362 
2363 	/* get log with RAE clear */
2364 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2365 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2366 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2367 
2368 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2369 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2370 
2371 	cleanup_pending_async_events(&ctrlr);
2372 }
2373 
2374 static void
2375 test_nvmf_ctrlr_create_destruct(void)
2376 {
2377 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2378 	struct spdk_nvmf_poll_group group = {};
2379 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2380 	struct spdk_nvmf_transport transport = {};
2381 	struct spdk_nvmf_transport_ops tops = {};
2382 	struct spdk_nvmf_subsystem subsystem = {};
2383 	struct spdk_nvmf_request req = {};
2384 	struct spdk_nvmf_qpair qpair = {};
2385 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2386 	struct spdk_nvmf_tgt tgt = {};
2387 	union nvmf_h2c_msg cmd = {};
2388 	union nvmf_c2h_msg rsp = {};
2389 	const uint8_t hostid[16] = {
2390 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2391 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2392 	};
2393 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2394 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2395 
2396 	group.thread = spdk_get_thread();
2397 	transport.ops = &tops;
2398 	transport.opts.max_aq_depth = 32;
2399 	transport.opts.max_queue_depth = 64;
2400 	transport.opts.max_qpairs_per_ctrlr = 3;
2401 	transport.opts.dif_insert_or_strip = true;
2402 	transport.tgt = &tgt;
2403 	qpair.transport = &transport;
2404 	qpair.group = &group;
2405 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2406 	TAILQ_INIT(&qpair.outstanding);
2407 
2408 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2409 	connect_data.cntlid = 0xFFFF;
2410 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2411 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2412 
2413 	subsystem.thread = spdk_get_thread();
2414 	subsystem.id = 1;
2415 	TAILQ_INIT(&subsystem.ctrlrs);
2416 	subsystem.tgt = &tgt;
2417 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2418 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2419 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2420 
2421 	group.sgroups = sgroups;
2422 
2423 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2424 	cmd.connect_cmd.cid = 1;
2425 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2426 	cmd.connect_cmd.recfmt = 0;
2427 	cmd.connect_cmd.qid = 0;
2428 	cmd.connect_cmd.sqsize = 31;
2429 	cmd.connect_cmd.cattr = 0;
2430 	cmd.connect_cmd.kato = 120000;
2431 
2432 	req.qpair = &qpair;
2433 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2434 	req.length = sizeof(connect_data);
2435 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
2436 	req.cmd = &cmd;
2437 	req.rsp = &rsp;
2438 
2439 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2440 	sgroups[subsystem.id].mgmt_io_outstanding++;
2441 
2442 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base);
2443 	poll_threads();
2444 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2445 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2446 	CU_ASSERT(ctrlr->subsys == &subsystem);
2447 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2448 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2449 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2450 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2451 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2452 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2453 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2454 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2455 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2456 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2457 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2458 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2459 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2460 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2461 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2462 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2463 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2464 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2465 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2466 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2467 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2468 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2469 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2470 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2471 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2472 
2473 	ctrlr->in_destruct = true;
2474 	nvmf_ctrlr_destruct(ctrlr);
2475 	poll_threads();
2476 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2477 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2478 }
2479 
2480 static void
2481 test_nvmf_ctrlr_use_zcopy(void)
2482 {
2483 	struct spdk_nvmf_subsystem subsystem = {};
2484 	struct spdk_nvmf_transport transport = {};
2485 	struct spdk_nvmf_request req = {};
2486 	struct spdk_nvmf_qpair qpair = {};
2487 	struct spdk_nvmf_ctrlr ctrlr = {};
2488 	union nvmf_h2c_msg cmd = {};
2489 	struct spdk_nvmf_ns ns = {};
2490 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2491 	struct spdk_bdev bdev = {};
2492 	struct spdk_nvmf_poll_group group = {};
2493 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2494 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2495 	struct spdk_io_channel io_ch = {};
2496 	int opc;
2497 
2498 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2499 	ns.bdev = &bdev;
2500 
2501 	subsystem.id = 0;
2502 	subsystem.max_nsid = 1;
2503 	subsys_ns[0] = &ns;
2504 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2505 
2506 	ctrlr.subsys = &subsystem;
2507 
2508 	transport.opts.zcopy = true;
2509 
2510 	qpair.ctrlr = &ctrlr;
2511 	qpair.group = &group;
2512 	qpair.qid = 1;
2513 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2514 	qpair.transport = &transport;
2515 
2516 	group.thread = spdk_get_thread();
2517 	group.num_sgroups = 1;
2518 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2519 	sgroups.num_ns = 1;
2520 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2521 	ns_info.channel = &io_ch;
2522 	sgroups.ns_info = &ns_info;
2523 	TAILQ_INIT(&sgroups.queued);
2524 	group.sgroups = &sgroups;
2525 	TAILQ_INIT(&qpair.outstanding);
2526 
2527 	req.qpair = &qpair;
2528 	req.cmd = &cmd;
2529 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2530 
2531 	/* Admin queue */
2532 	qpair.qid = 0;
2533 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2534 	qpair.qid = 1;
2535 
2536 	/* Invalid Opcodes */
2537 	for (opc = 0; opc <= 255; opc++) {
2538 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2539 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2540 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2541 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2542 		}
2543 	}
2544 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2545 
2546 	/* Fused WRITE */
2547 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2548 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2549 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2550 
2551 	/* Non bdev */
2552 	cmd.nvme_cmd.nsid = 4;
2553 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2554 	cmd.nvme_cmd.nsid = 1;
2555 
2556 	/* ZCOPY Not supported */
2557 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2558 	ns.zcopy = true;
2559 
2560 	/* ZCOPY disabled on transport level */
2561 	transport.opts.zcopy = false;
2562 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2563 	transport.opts.zcopy = true;
2564 
2565 	/* Success */
2566 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2567 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2568 }
2569 
2570 static void
2571 qpair_state_change_done(void *cb_arg, int status)
2572 {
2573 }
2574 
2575 static void
2576 test_spdk_nvmf_request_zcopy_start(void)
2577 {
2578 	struct spdk_nvmf_request req = {};
2579 	struct spdk_nvmf_qpair qpair = {};
2580 	struct spdk_nvmf_transport transport = {};
2581 	struct spdk_nvme_cmd cmd = {};
2582 	union nvmf_c2h_msg rsp = {};
2583 	struct spdk_nvmf_ctrlr ctrlr = {};
2584 	struct spdk_nvmf_subsystem subsystem = {};
2585 	struct spdk_nvmf_ns ns = {};
2586 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2587 	enum spdk_nvme_ana_state ana_state[1];
2588 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2589 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2590 
2591 	struct spdk_nvmf_poll_group group = {};
2592 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2593 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2594 	struct spdk_io_channel io_ch = {};
2595 
2596 	ns.bdev = &bdev;
2597 	ns.zcopy = true;
2598 	ns.anagrpid = 1;
2599 
2600 	subsystem.id = 0;
2601 	subsystem.max_nsid = 1;
2602 	subsys_ns[0] = &ns;
2603 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2604 
2605 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2606 
2607 	/* Enable controller */
2608 	ctrlr.vcprop.cc.bits.en = 1;
2609 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2610 	ctrlr.listener = &listener;
2611 
2612 	transport.opts.zcopy = true;
2613 
2614 	group.thread = spdk_get_thread();
2615 	group.num_sgroups = 1;
2616 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2617 	sgroups.num_ns = 1;
2618 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2619 	ns_info.channel = &io_ch;
2620 	sgroups.ns_info = &ns_info;
2621 	TAILQ_INIT(&sgroups.queued);
2622 	group.sgroups = &sgroups;
2623 	TAILQ_INIT(&qpair.outstanding);
2624 
2625 	qpair.ctrlr = &ctrlr;
2626 	qpair.group = &group;
2627 	qpair.transport = &transport;
2628 	qpair.qid = 1;
2629 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2630 
2631 	cmd.nsid = 1;
2632 
2633 	req.qpair = &qpair;
2634 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2635 	req.rsp = &rsp;
2636 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2637 	cmd.opc = SPDK_NVME_OPC_READ;
2638 
2639 	/* Fail because no controller */
2640 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2641 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2642 	qpair.ctrlr = NULL;
2643 	spdk_nvmf_request_zcopy_start(&req);
2644 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2645 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2646 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2647 	qpair.ctrlr = &ctrlr;
2648 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2649 
2650 	/* Fail because bad NSID */
2651 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2652 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2653 	cmd.nsid = 0;
2654 	spdk_nvmf_request_zcopy_start(&req);
2655 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2656 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2657 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2658 	cmd.nsid = 1;
2659 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2660 
2661 	/* Fail because bad Channel */
2662 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2663 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2664 	ns_info.channel = NULL;
2665 	spdk_nvmf_request_zcopy_start(&req);
2666 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2667 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2668 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2669 	ns_info.channel = &io_ch;
2670 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2671 
2672 	/* Queue the requet because NSID is not active */
2673 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2674 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2675 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2676 	spdk_nvmf_request_zcopy_start(&req);
2677 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2678 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2679 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2680 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2681 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2682 
2683 	/* Fail because QPair is not active */
2684 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2685 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2686 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2687 	qpair.state_cb = qpair_state_change_done;
2688 	spdk_nvmf_request_zcopy_start(&req);
2689 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2690 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2691 	qpair.state_cb = NULL;
2692 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2693 
2694 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2695 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2696 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2697 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2698 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2699 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2700 	spdk_nvmf_request_zcopy_start(&req);
2701 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2702 	cmd.cdw10 = 0;
2703 	cmd.cdw12 = 0;
2704 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2705 
2706 	/* Success */
2707 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2708 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2709 	spdk_nvmf_request_zcopy_start(&req);
2710 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2711 }
2712 
2713 static void
2714 test_zcopy_read(void)
2715 {
2716 	struct spdk_nvmf_request req = {};
2717 	struct spdk_nvmf_qpair qpair = {};
2718 	struct spdk_nvmf_transport transport = {};
2719 	struct spdk_nvme_cmd cmd = {};
2720 	union nvmf_c2h_msg rsp = {};
2721 	struct spdk_nvmf_ctrlr ctrlr = {};
2722 	struct spdk_nvmf_subsystem subsystem = {};
2723 	struct spdk_nvmf_ns ns = {};
2724 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2725 	enum spdk_nvme_ana_state ana_state[1];
2726 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2727 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2728 
2729 	struct spdk_nvmf_poll_group group = {};
2730 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2731 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2732 	struct spdk_io_channel io_ch = {};
2733 
2734 	ns.bdev = &bdev;
2735 	ns.zcopy = true;
2736 	ns.anagrpid = 1;
2737 
2738 	subsystem.id = 0;
2739 	subsystem.max_nsid = 1;
2740 	subsys_ns[0] = &ns;
2741 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2742 
2743 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2744 
2745 	/* Enable controller */
2746 	ctrlr.vcprop.cc.bits.en = 1;
2747 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2748 	ctrlr.listener = &listener;
2749 
2750 	transport.opts.zcopy = true;
2751 
2752 	group.thread = spdk_get_thread();
2753 	group.num_sgroups = 1;
2754 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2755 	sgroups.num_ns = 1;
2756 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2757 	ns_info.channel = &io_ch;
2758 	sgroups.ns_info = &ns_info;
2759 	TAILQ_INIT(&sgroups.queued);
2760 	group.sgroups = &sgroups;
2761 	TAILQ_INIT(&qpair.outstanding);
2762 
2763 	qpair.ctrlr = &ctrlr;
2764 	qpair.group = &group;
2765 	qpair.transport = &transport;
2766 	qpair.qid = 1;
2767 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2768 
2769 	cmd.nsid = 1;
2770 
2771 	req.qpair = &qpair;
2772 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2773 	req.rsp = &rsp;
2774 	cmd.opc = SPDK_NVME_OPC_READ;
2775 
2776 	/* Prepare for zcopy */
2777 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2778 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2779 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2780 	CU_ASSERT(ns_info.io_outstanding == 0);
2781 
2782 	/* Perform the zcopy start */
2783 	spdk_nvmf_request_zcopy_start(&req);
2784 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2785 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2786 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2787 	CU_ASSERT(ns_info.io_outstanding == 1);
2788 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2789 
2790 	/* Perform the zcopy end */
2791 	spdk_nvmf_request_zcopy_end(&req, false);
2792 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2793 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2794 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2795 	CU_ASSERT(ns_info.io_outstanding == 0);
2796 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2797 }
2798 
2799 static void
2800 test_zcopy_write(void)
2801 {
2802 	struct spdk_nvmf_request req = {};
2803 	struct spdk_nvmf_qpair qpair = {};
2804 	struct spdk_nvmf_transport transport = {};
2805 	struct spdk_nvme_cmd cmd = {};
2806 	union nvmf_c2h_msg rsp = {};
2807 	struct spdk_nvmf_ctrlr ctrlr = {};
2808 	struct spdk_nvmf_subsystem subsystem = {};
2809 	struct spdk_nvmf_ns ns = {};
2810 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2811 	enum spdk_nvme_ana_state ana_state[1];
2812 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2813 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2814 
2815 	struct spdk_nvmf_poll_group group = {};
2816 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2817 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2818 	struct spdk_io_channel io_ch = {};
2819 
2820 	ns.bdev = &bdev;
2821 	ns.zcopy = true;
2822 	ns.anagrpid = 1;
2823 
2824 	subsystem.id = 0;
2825 	subsystem.max_nsid = 1;
2826 	subsys_ns[0] = &ns;
2827 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2828 
2829 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2830 
2831 	/* Enable controller */
2832 	ctrlr.vcprop.cc.bits.en = 1;
2833 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2834 	ctrlr.listener = &listener;
2835 
2836 	transport.opts.zcopy = true;
2837 
2838 	group.thread = spdk_get_thread();
2839 	group.num_sgroups = 1;
2840 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2841 	sgroups.num_ns = 1;
2842 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2843 	ns_info.channel = &io_ch;
2844 	sgroups.ns_info = &ns_info;
2845 	TAILQ_INIT(&sgroups.queued);
2846 	group.sgroups = &sgroups;
2847 	TAILQ_INIT(&qpair.outstanding);
2848 
2849 	qpair.ctrlr = &ctrlr;
2850 	qpair.group = &group;
2851 	qpair.transport = &transport;
2852 	qpair.qid = 1;
2853 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2854 
2855 	cmd.nsid = 1;
2856 
2857 	req.qpair = &qpair;
2858 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2859 	req.rsp = &rsp;
2860 	cmd.opc = SPDK_NVME_OPC_WRITE;
2861 
2862 	/* Prepare for zcopy */
2863 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2864 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2865 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2866 	CU_ASSERT(ns_info.io_outstanding == 0);
2867 
2868 	/* Perform the zcopy start */
2869 	spdk_nvmf_request_zcopy_start(&req);
2870 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2871 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2872 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2873 	CU_ASSERT(ns_info.io_outstanding == 1);
2874 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2875 
2876 	/* Perform the zcopy end */
2877 	spdk_nvmf_request_zcopy_end(&req, true);
2878 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2879 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2880 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2881 	CU_ASSERT(ns_info.io_outstanding == 0);
2882 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2883 }
2884 
2885 static void
2886 test_nvmf_property_set(void)
2887 {
2888 	int rc;
2889 	struct spdk_nvmf_request req = {};
2890 	struct spdk_nvmf_qpair qpair = {};
2891 	struct spdk_nvmf_ctrlr ctrlr = {};
2892 	union nvmf_h2c_msg cmd = {};
2893 	union nvmf_c2h_msg rsp = {};
2894 
2895 	req.qpair = &qpair;
2896 	qpair.ctrlr = &ctrlr;
2897 	req.cmd = &cmd;
2898 	req.rsp = &rsp;
2899 
2900 	/* Invalid parameters */
2901 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2902 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2903 
2904 	rc = nvmf_property_set(&req);
2905 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2906 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2907 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2908 
2909 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2910 
2911 	rc = nvmf_property_get(&req);
2912 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2913 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2914 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2915 
2916 	/* Set cc with same property size */
2917 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2918 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2919 
2920 	rc = nvmf_property_set(&req);
2921 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2922 
2923 	/* Emulate cc data */
2924 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2925 
2926 	rc = nvmf_property_get(&req);
2927 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2928 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2929 
2930 	/* Set asq with different property size */
2931 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2932 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2933 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2934 
2935 	rc = nvmf_property_set(&req);
2936 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2937 
2938 	/* Emulate asq data */
2939 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2940 
2941 	rc = nvmf_property_get(&req);
2942 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2943 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2944 }
2945 
2946 static void
2947 test_nvmf_ctrlr_get_features_host_behavior_support(void)
2948 {
2949 	int rc;
2950 	struct spdk_nvmf_request req = {};
2951 	struct spdk_nvmf_qpair qpair = {};
2952 	struct spdk_nvmf_ctrlr ctrlr = {};
2953 	struct spdk_nvme_host_behavior behavior = {};
2954 	union nvmf_h2c_msg cmd = {};
2955 	union nvmf_c2h_msg rsp = {};
2956 
2957 	qpair.ctrlr = &ctrlr;
2958 	req.qpair = &qpair;
2959 	req.cmd = &cmd;
2960 	req.rsp = &rsp;
2961 
2962 	/* Invalid data */
2963 	req.length = sizeof(struct spdk_nvme_host_behavior);
2964 	req.iovcnt = 0;
2965 
2966 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2967 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2968 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2969 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2970 
2971 	/* Wrong structure length */
2972 	req.length = sizeof(struct spdk_nvme_host_behavior) - 1;
2973 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
2974 
2975 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2976 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2977 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2978 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2979 
2980 	/* Get Features Host Behavior Support Success */
2981 	req.length = sizeof(struct spdk_nvme_host_behavior);
2982 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
2983 
2984 	ctrlr.acre_enabled = true;
2985 	behavior.acre = false;
2986 
2987 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2988 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2989 	CU_ASSERT(behavior.acre == true);
2990 }
2991 
2992 static void
2993 test_nvmf_ctrlr_set_features_host_behavior_support(void)
2994 {
2995 	int rc;
2996 	struct spdk_nvmf_request req = {};
2997 	struct spdk_nvmf_qpair qpair = {};
2998 	struct spdk_nvmf_ctrlr ctrlr = {};
2999 	struct spdk_nvme_host_behavior host_behavior = {};
3000 	union nvmf_h2c_msg cmd = {};
3001 	union nvmf_c2h_msg rsp = {};
3002 
3003 	qpair.ctrlr = &ctrlr;
3004 	req.qpair = &qpair;
3005 	req.cmd = &cmd;
3006 	req.rsp = &rsp;
3007 	req.iov[0].iov_base = &host_behavior;
3008 	req.iov[0].iov_len = sizeof(host_behavior);
3009 
3010 	/* Invalid iovcnt */
3011 	req.iovcnt = 0;
3012 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3013 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3014 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3015 
3016 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3017 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3018 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3019 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3020 
3021 	/* Invalid iov_len */
3022 	req.iovcnt = 1;
3023 	req.iov[0].iov_len = 0;
3024 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3025 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3026 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3027 
3028 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3029 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3030 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3031 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3032 
3033 	/* acre is false */
3034 	host_behavior.acre = 0;
3035 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3036 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3037 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3038 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3039 
3040 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3041 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3042 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3043 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3044 	CU_ASSERT(ctrlr.acre_enabled == false);
3045 
3046 	/* acre is true */
3047 	host_behavior.acre = 1;
3048 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3049 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3050 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3051 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3052 
3053 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3054 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3055 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3056 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3057 	CU_ASSERT(ctrlr.acre_enabled == true);
3058 
3059 	/* Invalid acre */
3060 	host_behavior.acre = 2;
3061 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3062 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3063 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3064 
3065 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3066 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3067 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3068 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3069 }
3070 
3071 int
3072 main(int argc, char **argv)
3073 {
3074 	CU_pSuite	suite = NULL;
3075 	unsigned int	num_failures;
3076 
3077 	CU_initialize_registry();
3078 
3079 	suite = CU_add_suite("nvmf", NULL, NULL);
3080 	CU_ADD_TEST(suite, test_get_log_page);
3081 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
3082 	CU_ADD_TEST(suite, test_connect);
3083 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
3084 	CU_ADD_TEST(suite, test_identify_ns);
3085 	CU_ADD_TEST(suite, test_identify_ns_iocs_specific);
3086 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
3087 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
3088 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
3089 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
3090 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
3091 	CU_ADD_TEST(suite, test_get_dif_ctx);
3092 	CU_ADD_TEST(suite, test_set_get_features);
3093 	CU_ADD_TEST(suite, test_identify_ctrlr);
3094 	CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific);
3095 	CU_ADD_TEST(suite, test_custom_admin_cmd);
3096 	CU_ADD_TEST(suite, test_fused_compare_and_write);
3097 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
3098 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
3099 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
3100 	CU_ADD_TEST(suite, test_multi_async_events);
3101 	CU_ADD_TEST(suite, test_rae);
3102 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
3103 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
3104 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
3105 	CU_ADD_TEST(suite, test_zcopy_read);
3106 	CU_ADD_TEST(suite, test_zcopy_write);
3107 	CU_ADD_TEST(suite, test_nvmf_property_set);
3108 	CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support);
3109 	CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support);
3110 
3111 	allocate_threads(1);
3112 	set_thread(0);
3113 
3114 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3115 	CU_cleanup_registry();
3116 
3117 	free_threads();
3118 
3119 	return num_failures;
3120 }
3121