xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision a8d21b9b550dde7d3e7ffc0cd1171528a136165f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/bdev_zone.h"
8 #include "spdk/nvme_spec.h"
9 #include "spdk/stdinc.h"
10 
11 #include "spdk_cunit.h"
12 #include "spdk_internal/mock.h"
13 #include "thread/thread_internal.h"
14 
15 #include "common/lib/ut_multithread.c"
16 #include "nvmf/ctrlr.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 struct spdk_bdev {
21 	int ut_mock;
22 	uint64_t blockcnt;
23 	uint32_t blocklen;
24 	bool zoned;
25 	uint32_t zone_size;
26 	uint32_t max_open_zones;
27 	uint32_t max_active_zones;
28 };
29 
30 const uint32_t MAX_OPEN_ZONES = 12;
31 const uint32_t MAX_ACTIVE_ZONES = 34;
32 const uint32_t ZONE_SIZE = 56;
33 
34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
36 
37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
39 		0x8877665544332211UL;
40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
41 
42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
43 	    struct spdk_nvmf_subsystem *,
44 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
45 	    NULL);
46 
47 DEFINE_STUB(spdk_nvmf_poll_group_create,
48 	    struct spdk_nvmf_poll_group *,
49 	    (struct spdk_nvmf_tgt *tgt),
50 	    NULL);
51 
52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
53 	    const char *,
54 	    (const struct spdk_nvmf_subsystem *subsystem),
55 	    subsystem_default_sn);
56 
57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
58 	    const char *,
59 	    (const struct spdk_nvmf_subsystem *subsystem),
60 	    subsystem_default_mn);
61 
62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
63 	    bool,
64 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
65 	    true);
66 
67 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
68 	    int,
69 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
70 	    0);
71 
72 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
73 	    struct spdk_nvmf_ctrlr *,
74 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
75 	    NULL);
76 
77 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
78 	    bool,
79 	    (struct spdk_nvmf_ctrlr *ctrlr),
80 	    false);
81 
82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
83 	    bool,
84 	    (struct spdk_nvmf_ctrlr *ctrlr),
85 	    false);
86 
87 DEFINE_STUB(nvmf_ctrlr_copy_supported,
88 	    bool,
89 	    (struct spdk_nvmf_ctrlr *ctrlr),
90 	    false);
91 
92 DEFINE_STUB_V(nvmf_get_discovery_log_page,
93 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
94 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
95 
96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
97 	    int,
98 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
99 	    0);
100 
101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
102 	    bool,
103 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
104 	    true);
105 
106 DEFINE_STUB(nvmf_subsystem_find_listener,
107 	    struct spdk_nvmf_subsystem_listener *,
108 	    (struct spdk_nvmf_subsystem *subsystem,
109 	     const struct spdk_nvme_transport_id *trid),
110 	    (void *)0x1);
111 
112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
113 	    int,
114 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
115 	     struct spdk_nvmf_request *req),
116 	    0);
117 
118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
119 	    int,
120 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
121 	     struct spdk_nvmf_request *req),
122 	    0);
123 
124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
134 	    0);
135 
136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *req),
140 	    0);
141 
142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *req),
146 	    0);
147 
148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
161 	    int,
162 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct spdk_nvmf_request *req),
164 	    0);
165 
166 DEFINE_STUB(nvmf_transport_req_complete,
167 	    int,
168 	    (struct spdk_nvmf_request *req),
169 	    0);
170 
171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
172 
173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
174 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
175 	     struct spdk_dif_ctx *dif_ctx),
176 	    true);
177 
178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
179 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
180 
181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
183 
184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
185 		struct spdk_nvmf_ctrlr *ctrlr));
186 
187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
188 	    int,
189 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
190 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
191 	    0);
192 
193 DEFINE_STUB(nvmf_transport_req_free,
194 	    int,
195 	    (struct spdk_nvmf_request *req),
196 	    0);
197 
198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
199 	    int,
200 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
201 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
202 	    0);
203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
204 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
206 
207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev),
208 	    MAX_ACTIVE_ZONES);
209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES);
210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE);
211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
212 
213 int
214 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
215 {
216 	return 0;
217 }
218 
219 void
220 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
221 			    bool dif_insert_or_strip)
222 {
223 	uint64_t num_blocks;
224 
225 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
226 	num_blocks = ns->bdev->blockcnt;
227 	nsdata->nsze = num_blocks;
228 	nsdata->ncap = num_blocks;
229 	nsdata->nuse = num_blocks;
230 	nsdata->nlbaf = 0;
231 	nsdata->flbas.format = 0;
232 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
233 }
234 
235 struct spdk_nvmf_ns *
236 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
237 {
238 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
239 	return subsystem->ns[0];
240 }
241 
242 struct spdk_nvmf_ns *
243 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
244 				struct spdk_nvmf_ns *prev_ns)
245 {
246 	uint32_t nsid;
247 
248 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
249 	nsid = prev_ns->nsid;
250 
251 	if (nsid >= subsystem->max_nsid) {
252 		return NULL;
253 	}
254 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
255 		if (subsystem->ns[nsid - 1]) {
256 			return subsystem->ns[nsid - 1];
257 		}
258 	}
259 	return NULL;
260 }
261 
262 bool
263 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
264 {
265 	return true;
266 }
267 
268 int
269 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
270 			    struct spdk_bdev_desc *desc,
271 			    struct spdk_io_channel *ch,
272 			    struct spdk_nvmf_request *req)
273 {
274 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
275 	uint64_t start_lba;
276 	uint64_t num_blocks;
277 
278 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
279 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
280 
281 	if ((start_lba + num_blocks) > bdev->blockcnt) {
282 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
283 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
284 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
285 	}
286 
287 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
288 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
289 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
290 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
291 	} else {
292 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
293 	}
294 
295 
296 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
297 }
298 
299 void
300 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
301 {
302 	req->zcopy_bdev_io = NULL;
303 	spdk_nvmf_request_complete(req);
304 }
305 
306 static void
307 test_get_log_page(void)
308 {
309 	struct spdk_nvmf_subsystem subsystem = {};
310 	struct spdk_nvmf_request req = {};
311 	struct spdk_nvmf_qpair qpair = {};
312 	struct spdk_nvmf_ctrlr ctrlr = {};
313 	union nvmf_h2c_msg cmd = {};
314 	union nvmf_c2h_msg rsp = {};
315 	char data[4096];
316 
317 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
318 
319 	ctrlr.subsys = &subsystem;
320 
321 	qpair.ctrlr = &ctrlr;
322 
323 	req.qpair = &qpair;
324 	req.cmd = &cmd;
325 	req.rsp = &rsp;
326 	req.data = &data;
327 	req.length = sizeof(data);
328 	spdk_iov_one(req.iov, &req.iovcnt, &data, req.length);
329 
330 	/* Get Log Page - all valid */
331 	memset(&cmd, 0, sizeof(cmd));
332 	memset(&rsp, 0, sizeof(rsp));
333 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
334 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
335 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
336 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
337 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
338 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
339 
340 	/* Get Log Page with invalid log ID */
341 	memset(&cmd, 0, sizeof(cmd));
342 	memset(&rsp, 0, sizeof(rsp));
343 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
344 	cmd.nvme_cmd.cdw10 = 0;
345 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
346 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
347 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
348 
349 	/* Get Log Page with invalid offset (not dword aligned) */
350 	memset(&cmd, 0, sizeof(cmd));
351 	memset(&rsp, 0, sizeof(rsp));
352 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
353 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
354 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
355 	cmd.nvme_cmd.cdw12 = 2;
356 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
357 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
358 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
359 
360 	/* Get Log Page without data buffer */
361 	memset(&cmd, 0, sizeof(cmd));
362 	memset(&rsp, 0, sizeof(rsp));
363 	req.data = NULL;
364 	req.iovcnt = 0;
365 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
366 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
367 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
368 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
369 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
370 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
371 	req.data = data;
372 }
373 
374 static void
375 test_process_fabrics_cmd(void)
376 {
377 	struct	spdk_nvmf_request req = {};
378 	int	ret;
379 	struct	spdk_nvmf_qpair req_qpair = {};
380 	union	nvmf_h2c_msg  req_cmd = {};
381 	union	nvmf_c2h_msg   req_rsp = {};
382 
383 	req.qpair = &req_qpair;
384 	req.cmd  = &req_cmd;
385 	req.rsp  = &req_rsp;
386 	req.qpair->ctrlr = NULL;
387 
388 	/* No ctrlr and invalid command check */
389 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
390 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
391 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
392 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
393 }
394 
395 static bool
396 nvme_status_success(const struct spdk_nvme_status *status)
397 {
398 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
399 }
400 
401 static void
402 test_connect(void)
403 {
404 	struct spdk_nvmf_fabric_connect_data connect_data;
405 	struct spdk_nvmf_poll_group group;
406 	struct spdk_nvmf_subsystem_poll_group *sgroups;
407 	struct spdk_nvmf_transport transport;
408 	struct spdk_nvmf_transport_ops tops = {};
409 	struct spdk_nvmf_subsystem subsystem;
410 	struct spdk_nvmf_request req;
411 	struct spdk_nvmf_qpair admin_qpair;
412 	struct spdk_nvmf_qpair qpair;
413 	struct spdk_nvmf_qpair qpair2;
414 	struct spdk_nvmf_ctrlr ctrlr;
415 	struct spdk_nvmf_tgt tgt;
416 	union nvmf_h2c_msg cmd;
417 	union nvmf_c2h_msg rsp;
418 	const uint8_t hostid[16] = {
419 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
420 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
421 	};
422 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
423 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
424 	int rc;
425 
426 	memset(&group, 0, sizeof(group));
427 	group.thread = spdk_get_thread();
428 
429 	memset(&ctrlr, 0, sizeof(ctrlr));
430 	ctrlr.subsys = &subsystem;
431 	ctrlr.qpair_mask = spdk_bit_array_create(3);
432 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
433 	ctrlr.vcprop.cc.bits.en = 1;
434 	ctrlr.vcprop.cc.bits.iosqes = 6;
435 	ctrlr.vcprop.cc.bits.iocqes = 4;
436 
437 	memset(&admin_qpair, 0, sizeof(admin_qpair));
438 	admin_qpair.group = &group;
439 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
440 
441 	memset(&tgt, 0, sizeof(tgt));
442 	memset(&transport, 0, sizeof(transport));
443 	transport.ops = &tops;
444 	transport.opts.max_aq_depth = 32;
445 	transport.opts.max_queue_depth = 64;
446 	transport.opts.max_qpairs_per_ctrlr = 3;
447 	transport.tgt = &tgt;
448 
449 	memset(&qpair, 0, sizeof(qpair));
450 	qpair.transport = &transport;
451 	qpair.group = &group;
452 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
453 	TAILQ_INIT(&qpair.outstanding);
454 
455 	memset(&connect_data, 0, sizeof(connect_data));
456 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
457 	connect_data.cntlid = 0xFFFF;
458 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
459 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
460 
461 	memset(&subsystem, 0, sizeof(subsystem));
462 	subsystem.thread = spdk_get_thread();
463 	subsystem.id = 1;
464 	TAILQ_INIT(&subsystem.ctrlrs);
465 	subsystem.tgt = &tgt;
466 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
467 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
468 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
469 
470 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
471 	group.sgroups = sgroups;
472 
473 	memset(&cmd, 0, sizeof(cmd));
474 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
475 	cmd.connect_cmd.cid = 1;
476 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
477 	cmd.connect_cmd.recfmt = 0;
478 	cmd.connect_cmd.qid = 0;
479 	cmd.connect_cmd.sqsize = 31;
480 	cmd.connect_cmd.cattr = 0;
481 	cmd.connect_cmd.kato = 120000;
482 
483 	memset(&req, 0, sizeof(req));
484 	req.qpair = &qpair;
485 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
486 	req.data = &connect_data;
487 	req.length = sizeof(connect_data);
488 	spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length);
489 	req.cmd = &cmd;
490 	req.rsp = &rsp;
491 
492 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
493 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
494 
495 	/* Valid admin connect command */
496 	memset(&rsp, 0, sizeof(rsp));
497 	sgroups[subsystem.id].mgmt_io_outstanding++;
498 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
499 	rc = nvmf_ctrlr_cmd_connect(&req);
500 	poll_threads();
501 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
502 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
503 	CU_ASSERT(qpair.ctrlr != NULL);
504 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
505 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
506 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
507 	free(qpair.ctrlr);
508 	qpair.ctrlr = NULL;
509 
510 	/* Valid admin connect command with kato = 0 */
511 	cmd.connect_cmd.kato = 0;
512 	memset(&rsp, 0, sizeof(rsp));
513 	sgroups[subsystem.id].mgmt_io_outstanding++;
514 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
515 	rc = nvmf_ctrlr_cmd_connect(&req);
516 	poll_threads();
517 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
518 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
519 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
520 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
521 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
522 	free(qpair.ctrlr);
523 	qpair.ctrlr = NULL;
524 	cmd.connect_cmd.kato = 120000;
525 
526 	/* Invalid data length */
527 	memset(&rsp, 0, sizeof(rsp));
528 	req.length = sizeof(connect_data) - 1;
529 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
530 	rc = nvmf_ctrlr_cmd_connect(&req);
531 	poll_threads();
532 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
533 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
534 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
535 	CU_ASSERT(qpair.ctrlr == NULL);
536 	req.length = sizeof(connect_data);
537 
538 	/* Invalid recfmt */
539 	memset(&rsp, 0, sizeof(rsp));
540 	cmd.connect_cmd.recfmt = 1234;
541 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
542 	rc = nvmf_ctrlr_cmd_connect(&req);
543 	poll_threads();
544 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
545 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
546 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
547 	CU_ASSERT(qpair.ctrlr == NULL);
548 	cmd.connect_cmd.recfmt = 0;
549 
550 	/* Subsystem not found */
551 	memset(&rsp, 0, sizeof(rsp));
552 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
553 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
554 	rc = nvmf_ctrlr_cmd_connect(&req);
555 	poll_threads();
556 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
557 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
558 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
559 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
560 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
561 	CU_ASSERT(qpair.ctrlr == NULL);
562 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
563 
564 	/* Unterminated hostnqn */
565 	memset(&rsp, 0, sizeof(rsp));
566 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
567 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
568 	rc = nvmf_ctrlr_cmd_connect(&req);
569 	poll_threads();
570 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
571 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
572 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
573 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
574 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
575 	CU_ASSERT(qpair.ctrlr == NULL);
576 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
577 
578 	/* Host not allowed */
579 	memset(&rsp, 0, sizeof(rsp));
580 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
581 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
582 	rc = nvmf_ctrlr_cmd_connect(&req);
583 	poll_threads();
584 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
585 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
586 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
587 	CU_ASSERT(qpair.ctrlr == NULL);
588 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
589 
590 	/* Invalid sqsize == 0 */
591 	memset(&rsp, 0, sizeof(rsp));
592 	cmd.connect_cmd.sqsize = 0;
593 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
594 	rc = nvmf_ctrlr_cmd_connect(&req);
595 	poll_threads();
596 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
597 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
598 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
599 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
600 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
601 	CU_ASSERT(qpair.ctrlr == NULL);
602 	cmd.connect_cmd.sqsize = 31;
603 
604 	/* Invalid admin sqsize > max_aq_depth */
605 	memset(&rsp, 0, sizeof(rsp));
606 	cmd.connect_cmd.sqsize = 32;
607 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
608 	rc = nvmf_ctrlr_cmd_connect(&req);
609 	poll_threads();
610 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
611 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
612 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
613 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
614 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
615 	CU_ASSERT(qpair.ctrlr == NULL);
616 	cmd.connect_cmd.sqsize = 31;
617 
618 	/* Invalid I/O sqsize > max_queue_depth */
619 	memset(&rsp, 0, sizeof(rsp));
620 	cmd.connect_cmd.qid = 1;
621 	cmd.connect_cmd.sqsize = 64;
622 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
623 	rc = nvmf_ctrlr_cmd_connect(&req);
624 	poll_threads();
625 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
626 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
627 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
628 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
629 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
630 	CU_ASSERT(qpair.ctrlr == NULL);
631 	cmd.connect_cmd.qid = 0;
632 	cmd.connect_cmd.sqsize = 31;
633 
634 	/* Invalid cntlid for admin queue */
635 	memset(&rsp, 0, sizeof(rsp));
636 	connect_data.cntlid = 0x1234;
637 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
638 	rc = nvmf_ctrlr_cmd_connect(&req);
639 	poll_threads();
640 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
641 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
642 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
643 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
644 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
645 	CU_ASSERT(qpair.ctrlr == NULL);
646 	connect_data.cntlid = 0xFFFF;
647 
648 	ctrlr.admin_qpair = &admin_qpair;
649 	ctrlr.subsys = &subsystem;
650 
651 	/* Valid I/O queue connect command */
652 	memset(&rsp, 0, sizeof(rsp));
653 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
654 	cmd.connect_cmd.qid = 1;
655 	cmd.connect_cmd.sqsize = 63;
656 	sgroups[subsystem.id].mgmt_io_outstanding++;
657 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
658 	rc = nvmf_ctrlr_cmd_connect(&req);
659 	poll_threads();
660 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
661 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
662 	CU_ASSERT(qpair.ctrlr == &ctrlr);
663 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
664 	qpair.ctrlr = NULL;
665 	cmd.connect_cmd.sqsize = 31;
666 
667 	/* Non-existent controller */
668 	memset(&rsp, 0, sizeof(rsp));
669 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
670 	sgroups[subsystem.id].mgmt_io_outstanding++;
671 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
672 	rc = nvmf_ctrlr_cmd_connect(&req);
673 	poll_threads();
674 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
675 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
676 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
677 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
678 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
679 	CU_ASSERT(qpair.ctrlr == NULL);
680 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
681 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
682 
683 	/* I/O connect to discovery controller */
684 	memset(&rsp, 0, sizeof(rsp));
685 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
686 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
687 	sgroups[subsystem.id].mgmt_io_outstanding++;
688 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
689 	rc = nvmf_ctrlr_cmd_connect(&req);
690 	poll_threads();
691 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
692 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
693 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
694 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
695 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
696 	CU_ASSERT(qpair.ctrlr == NULL);
697 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
698 
699 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
700 	cmd.connect_cmd.qid = 0;
701 	cmd.connect_cmd.kato = 120000;
702 	memset(&rsp, 0, sizeof(rsp));
703 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
704 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
705 	sgroups[subsystem.id].mgmt_io_outstanding++;
706 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
707 	rc = nvmf_ctrlr_cmd_connect(&req);
708 	poll_threads();
709 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
710 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
711 	CU_ASSERT(qpair.ctrlr != NULL);
712 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
713 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
714 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
715 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
716 	free(qpair.ctrlr);
717 	qpair.ctrlr = NULL;
718 
719 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
720 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
721 	 */
722 	cmd.connect_cmd.kato = 0;
723 	memset(&rsp, 0, sizeof(rsp));
724 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
725 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
726 	sgroups[subsystem.id].mgmt_io_outstanding++;
727 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
728 	rc = nvmf_ctrlr_cmd_connect(&req);
729 	poll_threads();
730 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
731 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
732 	CU_ASSERT(qpair.ctrlr != NULL);
733 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
734 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
735 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
736 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
737 	free(qpair.ctrlr);
738 	qpair.ctrlr = NULL;
739 	cmd.connect_cmd.qid = 1;
740 	cmd.connect_cmd.kato = 120000;
741 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
742 
743 	/* I/O connect to disabled controller */
744 	memset(&rsp, 0, sizeof(rsp));
745 	ctrlr.vcprop.cc.bits.en = 0;
746 	sgroups[subsystem.id].mgmt_io_outstanding++;
747 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
748 	rc = nvmf_ctrlr_cmd_connect(&req);
749 	poll_threads();
750 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
751 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
752 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
753 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
754 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
755 	CU_ASSERT(qpair.ctrlr == NULL);
756 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
757 	ctrlr.vcprop.cc.bits.en = 1;
758 
759 	/* I/O connect with invalid IOSQES */
760 	memset(&rsp, 0, sizeof(rsp));
761 	ctrlr.vcprop.cc.bits.iosqes = 3;
762 	sgroups[subsystem.id].mgmt_io_outstanding++;
763 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
764 	rc = nvmf_ctrlr_cmd_connect(&req);
765 	poll_threads();
766 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
767 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
768 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
769 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
770 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
771 	CU_ASSERT(qpair.ctrlr == NULL);
772 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
773 	ctrlr.vcprop.cc.bits.iosqes = 6;
774 
775 	/* I/O connect with invalid IOCQES */
776 	memset(&rsp, 0, sizeof(rsp));
777 	ctrlr.vcprop.cc.bits.iocqes = 3;
778 	sgroups[subsystem.id].mgmt_io_outstanding++;
779 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
780 	rc = nvmf_ctrlr_cmd_connect(&req);
781 	poll_threads();
782 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
783 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
784 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
785 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
786 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
787 	CU_ASSERT(qpair.ctrlr == NULL);
788 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
789 	ctrlr.vcprop.cc.bits.iocqes = 4;
790 
791 	/* I/O connect with too many existing qpairs */
792 	memset(&rsp, 0, sizeof(rsp));
793 	spdk_bit_array_set(ctrlr.qpair_mask, 0);
794 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
795 	spdk_bit_array_set(ctrlr.qpair_mask, 2);
796 	sgroups[subsystem.id].mgmt_io_outstanding++;
797 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
798 	rc = nvmf_ctrlr_cmd_connect(&req);
799 	poll_threads();
800 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
801 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
802 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
803 	CU_ASSERT(qpair.ctrlr == NULL);
804 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
805 	spdk_bit_array_clear(ctrlr.qpair_mask, 0);
806 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
807 	spdk_bit_array_clear(ctrlr.qpair_mask, 2);
808 
809 	/* I/O connect with duplicate queue ID */
810 	memset(&rsp, 0, sizeof(rsp));
811 	memset(&qpair2, 0, sizeof(qpair2));
812 	qpair2.group = &group;
813 	qpair2.qid = 1;
814 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
815 	cmd.connect_cmd.qid = 1;
816 	sgroups[subsystem.id].mgmt_io_outstanding++;
817 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
818 	rc = nvmf_ctrlr_cmd_connect(&req);
819 	poll_threads();
820 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
821 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
822 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
823 	CU_ASSERT(qpair.ctrlr == NULL);
824 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
825 
826 	/* I/O connect when admin qpair is being destroyed */
827 	admin_qpair.group = NULL;
828 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
829 	memset(&rsp, 0, sizeof(rsp));
830 	sgroups[subsystem.id].mgmt_io_outstanding++;
831 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
832 	rc = nvmf_ctrlr_cmd_connect(&req);
833 	poll_threads();
834 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
835 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
836 	CU_ASSERT(qpair.ctrlr == NULL);
837 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
838 	admin_qpair.group = &group;
839 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
840 
841 	/* Clean up globals */
842 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
843 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
844 
845 	spdk_bit_array_free(&ctrlr.qpair_mask);
846 	free(sgroups);
847 }
848 
849 static void
850 test_get_ns_id_desc_list(void)
851 {
852 	struct spdk_nvmf_subsystem subsystem;
853 	struct spdk_nvmf_qpair qpair;
854 	struct spdk_nvmf_ctrlr ctrlr;
855 	struct spdk_nvmf_request req;
856 	struct spdk_nvmf_ns *ns_ptrs[1];
857 	struct spdk_nvmf_ns ns;
858 	union nvmf_h2c_msg cmd;
859 	union nvmf_c2h_msg rsp;
860 	struct spdk_bdev bdev;
861 	uint8_t buf[4096];
862 
863 	memset(&subsystem, 0, sizeof(subsystem));
864 	ns_ptrs[0] = &ns;
865 	subsystem.ns = ns_ptrs;
866 	subsystem.max_nsid = 1;
867 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
868 
869 	memset(&ns, 0, sizeof(ns));
870 	ns.opts.nsid = 1;
871 	ns.bdev = &bdev;
872 
873 	memset(&qpair, 0, sizeof(qpair));
874 	qpair.ctrlr = &ctrlr;
875 
876 	memset(&ctrlr, 0, sizeof(ctrlr));
877 	ctrlr.subsys = &subsystem;
878 	ctrlr.vcprop.cc.bits.en = 1;
879 	ctrlr.thread = spdk_get_thread();
880 
881 	memset(&req, 0, sizeof(req));
882 	req.qpair = &qpair;
883 	req.cmd = &cmd;
884 	req.rsp = &rsp;
885 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
886 	req.data = buf;
887 	req.length = sizeof(buf);
888 	spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length);
889 
890 	memset(&cmd, 0, sizeof(cmd));
891 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
892 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
893 
894 	/* Invalid NSID */
895 	cmd.nvme_cmd.nsid = 0;
896 	memset(&rsp, 0, sizeof(rsp));
897 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
898 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
899 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
900 
901 	/* Valid NSID, but ns has no IDs defined */
902 	cmd.nvme_cmd.nsid = 1;
903 	memset(&rsp, 0, sizeof(rsp));
904 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
905 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
906 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
907 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
908 
909 	/* Valid NSID, only EUI64 defined */
910 	ns.opts.eui64[0] = 0x11;
911 	ns.opts.eui64[7] = 0xFF;
912 	memset(&rsp, 0, sizeof(rsp));
913 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
914 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
915 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
916 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
917 	CU_ASSERT(buf[1] == 8);
918 	CU_ASSERT(buf[4] == 0x11);
919 	CU_ASSERT(buf[11] == 0xFF);
920 	CU_ASSERT(buf[13] == 0);
921 
922 	/* Valid NSID, only NGUID defined */
923 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
924 	ns.opts.nguid[0] = 0x22;
925 	ns.opts.nguid[15] = 0xEE;
926 	memset(&rsp, 0, sizeof(rsp));
927 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
928 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
929 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
930 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
931 	CU_ASSERT(buf[1] == 16);
932 	CU_ASSERT(buf[4] == 0x22);
933 	CU_ASSERT(buf[19] == 0xEE);
934 	CU_ASSERT(buf[21] == 0);
935 
936 	/* Valid NSID, both EUI64 and NGUID defined */
937 	ns.opts.eui64[0] = 0x11;
938 	ns.opts.eui64[7] = 0xFF;
939 	ns.opts.nguid[0] = 0x22;
940 	ns.opts.nguid[15] = 0xEE;
941 	memset(&rsp, 0, sizeof(rsp));
942 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
943 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
944 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
945 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
946 	CU_ASSERT(buf[1] == 8);
947 	CU_ASSERT(buf[4] == 0x11);
948 	CU_ASSERT(buf[11] == 0xFF);
949 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
950 	CU_ASSERT(buf[13] == 16);
951 	CU_ASSERT(buf[16] == 0x22);
952 	CU_ASSERT(buf[31] == 0xEE);
953 	CU_ASSERT(buf[33] == 0);
954 
955 	/* Valid NSID, EUI64, NGUID, and UUID defined */
956 	ns.opts.eui64[0] = 0x11;
957 	ns.opts.eui64[7] = 0xFF;
958 	ns.opts.nguid[0] = 0x22;
959 	ns.opts.nguid[15] = 0xEE;
960 	ns.opts.uuid.u.raw[0] = 0x33;
961 	ns.opts.uuid.u.raw[15] = 0xDD;
962 	memset(&rsp, 0, sizeof(rsp));
963 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
964 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
965 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
966 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
967 	CU_ASSERT(buf[1] == 8);
968 	CU_ASSERT(buf[4] == 0x11);
969 	CU_ASSERT(buf[11] == 0xFF);
970 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
971 	CU_ASSERT(buf[13] == 16);
972 	CU_ASSERT(buf[16] == 0x22);
973 	CU_ASSERT(buf[31] == 0xEE);
974 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
975 	CU_ASSERT(buf[33] == 16);
976 	CU_ASSERT(buf[36] == 0x33);
977 	CU_ASSERT(buf[51] == 0xDD);
978 	CU_ASSERT(buf[53] == 0);
979 }
980 
981 static void
982 test_identify_ns(void)
983 {
984 	struct spdk_nvmf_subsystem subsystem = {};
985 	struct spdk_nvmf_transport transport = {};
986 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
987 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
988 	struct spdk_nvme_cmd cmd = {};
989 	struct spdk_nvme_cpl rsp = {};
990 	struct spdk_nvme_ns_data nsdata = {};
991 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
992 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
993 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
994 
995 	subsystem.ns = ns_arr;
996 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
997 
998 	/* Invalid NSID 0 */
999 	cmd.nsid = 0;
1000 	memset(&nsdata, 0, sizeof(nsdata));
1001 	memset(&rsp, 0, sizeof(rsp));
1002 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1003 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1004 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1005 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1006 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1007 
1008 	/* Valid NSID 1 */
1009 	cmd.nsid = 1;
1010 	memset(&nsdata, 0, sizeof(nsdata));
1011 	memset(&rsp, 0, sizeof(rsp));
1012 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1013 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1014 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1015 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1016 	CU_ASSERT(nsdata.nsze == 1234);
1017 
1018 	/* Valid but inactive NSID 2 */
1019 	cmd.nsid = 2;
1020 	memset(&nsdata, 0, sizeof(nsdata));
1021 	memset(&rsp, 0, sizeof(rsp));
1022 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1023 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1024 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1025 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1026 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1027 
1028 	/* Valid NSID 3 */
1029 	cmd.nsid = 3;
1030 	memset(&nsdata, 0, sizeof(nsdata));
1031 	memset(&rsp, 0, sizeof(rsp));
1032 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1033 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1034 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1035 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1036 	CU_ASSERT(nsdata.nsze == 5678);
1037 
1038 	/* Invalid NSID 4 */
1039 	cmd.nsid = 4;
1040 	memset(&nsdata, 0, sizeof(nsdata));
1041 	memset(&rsp, 0, sizeof(rsp));
1042 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1043 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1044 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1045 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1046 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1047 
1048 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1049 	cmd.nsid = 0xFFFFFFFF;
1050 	memset(&nsdata, 0, sizeof(nsdata));
1051 	memset(&rsp, 0, sizeof(rsp));
1052 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1053 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1054 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1055 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1056 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1057 }
1058 
1059 static void
1060 test_identify_ns_iocs_specific(void)
1061 {
1062 	struct spdk_nvmf_subsystem subsystem = {};
1063 	struct spdk_nvmf_transport transport = {};
1064 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport };
1065 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1066 	struct spdk_nvme_cmd cmd = {};
1067 	struct spdk_nvme_cpl rsp = {};
1068 	struct spdk_nvme_zns_ns_data nsdata = {};
1069 	struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}};
1070 	struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}};
1071 	struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]};
1072 
1073 	subsystem.ns = ns_arr;
1074 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1075 
1076 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1077 
1078 	/* Invalid ZNS NSID 0 */
1079 	cmd.nsid = 0;
1080 	memset(&nsdata, 0xFF, sizeof(nsdata));
1081 	memset(&rsp, 0, sizeof(rsp));
1082 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1083 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1084 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1085 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1086 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1087 
1088 	/* Valid ZNS NSID 1 */
1089 	cmd.nsid = 1;
1090 	memset(&nsdata, 0xFF, sizeof(nsdata));
1091 	memset(&rsp, 0, sizeof(rsp));
1092 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1093 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1094 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1095 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1096 	CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1);
1097 	CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1);
1098 	CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1);
1099 	CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE);
1100 	nsdata.ozcs.read_across_zone_boundaries = 0;
1101 	nsdata.mar = 0;
1102 	nsdata.mor = 0;
1103 	nsdata.lbafe[0].zsze = 0;
1104 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1105 
1106 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1107 
1108 	/* Valid NVM NSID 2 */
1109 	cmd.nsid = 2;
1110 	memset(&nsdata, 0xFF, sizeof(nsdata));
1111 	memset(&rsp, 0, sizeof(rsp));
1112 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1113 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1114 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1115 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1116 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1117 
1118 	/* Invalid NVM NSID 3 */
1119 	cmd.nsid = 0;
1120 	memset(&nsdata, 0xFF, sizeof(nsdata));
1121 	memset(&rsp, 0, sizeof(rsp));
1122 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1123 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1124 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1125 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1126 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1127 }
1128 
1129 static void
1130 test_set_get_features(void)
1131 {
1132 	struct spdk_nvmf_subsystem subsystem = {};
1133 	struct spdk_nvmf_qpair admin_qpair = {};
1134 	enum spdk_nvme_ana_state ana_state[3];
1135 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1136 	struct spdk_nvmf_ctrlr ctrlr = {
1137 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1138 	};
1139 	union nvmf_h2c_msg cmd = {};
1140 	union nvmf_c2h_msg rsp = {};
1141 	struct spdk_nvmf_ns ns[3];
1142 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1143 	struct spdk_nvmf_request req;
1144 	int rc;
1145 
1146 	ns[0].anagrpid = 1;
1147 	ns[2].anagrpid = 3;
1148 	subsystem.ns = ns_arr;
1149 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1150 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1151 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1152 	admin_qpair.ctrlr = &ctrlr;
1153 	req.qpair = &admin_qpair;
1154 	cmd.nvme_cmd.nsid = 1;
1155 	req.cmd = &cmd;
1156 	req.rsp = &rsp;
1157 
1158 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1159 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1160 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1161 	ns[0].ptpl_file = "testcfg";
1162 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1163 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1164 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1165 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1166 	CU_ASSERT(ns[0].ptpl_activated == true);
1167 
1168 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1169 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1170 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1171 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1172 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1173 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1174 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1175 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1176 
1177 
1178 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1179 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1180 	cmd.nvme_cmd.cdw11 = 0x42;
1181 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1182 
1183 	rc = nvmf_ctrlr_get_features(&req);
1184 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1185 
1186 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1187 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1188 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1189 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1190 
1191 	rc = nvmf_ctrlr_get_features(&req);
1192 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1193 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1194 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1195 
1196 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1197 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1198 	cmd.nvme_cmd.cdw11 = 0x42;
1199 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1200 
1201 	rc = nvmf_ctrlr_set_features(&req);
1202 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1203 
1204 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1205 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1206 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1207 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1208 
1209 	rc = nvmf_ctrlr_set_features(&req);
1210 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1211 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1212 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1213 
1214 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1215 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1216 	cmd.nvme_cmd.cdw11 = 0x42;
1217 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1218 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1219 
1220 	rc = nvmf_ctrlr_set_features(&req);
1221 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1222 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1223 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1224 
1225 
1226 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1227 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1228 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1229 
1230 	rc = nvmf_ctrlr_get_features(&req);
1231 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1232 
1233 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1234 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1235 	cmd.nvme_cmd.cdw11 = 0x42;
1236 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1237 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1238 
1239 	rc = nvmf_ctrlr_set_features(&req);
1240 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1241 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1242 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1243 
1244 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1245 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1246 	cmd.nvme_cmd.cdw11 = 0x42;
1247 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1248 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1249 
1250 	rc = nvmf_ctrlr_set_features(&req);
1251 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1252 }
1253 
1254 /*
1255  * Reservation Unit Test Configuration
1256  *       --------             --------    --------
1257  *      | Host A |           | Host B |  | Host C |
1258  *       --------             --------    --------
1259  *      /        \               |           |
1260  *  --------   --------       -------     -------
1261  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1262  *  --------   --------       -------     -------
1263  *    \           \              /           /
1264  *     \           \            /           /
1265  *      \           \          /           /
1266  *      --------------------------------------
1267  *     |            NAMESPACE 1               |
1268  *      --------------------------------------
1269  */
1270 
1271 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1272 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1273 
1274 static void
1275 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1276 {
1277 	/* Host A has two controllers */
1278 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1279 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1280 
1281 	/* Host B has 1 controller */
1282 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1283 
1284 	/* Host C has 1 controller */
1285 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1286 
1287 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1288 	g_ns_info.rtype = rtype;
1289 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1290 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1291 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1292 }
1293 
1294 static void
1295 test_reservation_write_exclusive(void)
1296 {
1297 	struct spdk_nvmf_request req = {};
1298 	union nvmf_h2c_msg cmd = {};
1299 	union nvmf_c2h_msg rsp = {};
1300 	int rc;
1301 
1302 	req.cmd = &cmd;
1303 	req.rsp = &rsp;
1304 
1305 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1306 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1307 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1308 
1309 	/* Test Case: Issue a Read command from Host A and Host B */
1310 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1311 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1312 	SPDK_CU_ASSERT_FATAL(rc == 0);
1313 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1314 	SPDK_CU_ASSERT_FATAL(rc == 0);
1315 
1316 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1317 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1318 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1319 	SPDK_CU_ASSERT_FATAL(rc == 0);
1320 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1321 	SPDK_CU_ASSERT_FATAL(rc < 0);
1322 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1323 
1324 	/* Test Case: Issue a Write command from Host C */
1325 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1326 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1327 	SPDK_CU_ASSERT_FATAL(rc < 0);
1328 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1329 
1330 	/* Test Case: Issue a Read command from Host B */
1331 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1332 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1333 	SPDK_CU_ASSERT_FATAL(rc == 0);
1334 
1335 	/* Unregister Host C */
1336 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1337 
1338 	/* Test Case: Read and Write commands from non-registrant Host C */
1339 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1340 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1341 	SPDK_CU_ASSERT_FATAL(rc < 0);
1342 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1343 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1344 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1345 	SPDK_CU_ASSERT_FATAL(rc == 0);
1346 }
1347 
1348 static void
1349 test_reservation_exclusive_access(void)
1350 {
1351 	struct spdk_nvmf_request req = {};
1352 	union nvmf_h2c_msg cmd = {};
1353 	union nvmf_c2h_msg rsp = {};
1354 	int rc;
1355 
1356 	req.cmd = &cmd;
1357 	req.rsp = &rsp;
1358 
1359 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1360 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1361 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1362 
1363 	/* Test Case: Issue a Read command from Host B */
1364 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1365 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1366 	SPDK_CU_ASSERT_FATAL(rc < 0);
1367 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1368 
1369 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1370 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1371 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1372 	SPDK_CU_ASSERT_FATAL(rc == 0);
1373 }
1374 
1375 static void
1376 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1377 {
1378 	struct spdk_nvmf_request req = {};
1379 	union nvmf_h2c_msg cmd = {};
1380 	union nvmf_c2h_msg rsp = {};
1381 	int rc;
1382 
1383 	req.cmd = &cmd;
1384 	req.rsp = &rsp;
1385 
1386 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1387 	ut_reservation_init(rtype);
1388 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1389 
1390 	/* Test Case: Issue a Read command from Host A and Host C */
1391 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1392 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1393 	SPDK_CU_ASSERT_FATAL(rc == 0);
1394 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1395 	SPDK_CU_ASSERT_FATAL(rc == 0);
1396 
1397 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1398 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1399 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1400 	SPDK_CU_ASSERT_FATAL(rc == 0);
1401 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1402 	SPDK_CU_ASSERT_FATAL(rc == 0);
1403 
1404 	/* Unregister Host C */
1405 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1406 
1407 	/* Test Case: Read and Write commands from non-registrant Host C */
1408 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1409 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1410 	SPDK_CU_ASSERT_FATAL(rc == 0);
1411 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1412 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1413 	SPDK_CU_ASSERT_FATAL(rc < 0);
1414 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1415 }
1416 
1417 static void
1418 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1419 {
1420 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1421 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1422 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1423 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1424 }
1425 
1426 static void
1427 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1428 {
1429 	struct spdk_nvmf_request req = {};
1430 	union nvmf_h2c_msg cmd = {};
1431 	union nvmf_c2h_msg rsp = {};
1432 	int rc;
1433 
1434 	req.cmd = &cmd;
1435 	req.rsp = &rsp;
1436 
1437 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1438 	ut_reservation_init(rtype);
1439 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1440 
1441 	/* Test Case: Issue a Write command from Host B */
1442 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1443 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1444 	SPDK_CU_ASSERT_FATAL(rc == 0);
1445 
1446 	/* Unregister Host B */
1447 	memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
1448 
1449 	/* Test Case: Issue a Read command from Host B */
1450 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1451 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1452 	SPDK_CU_ASSERT_FATAL(rc < 0);
1453 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1454 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1455 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1456 	SPDK_CU_ASSERT_FATAL(rc < 0);
1457 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1458 }
1459 
1460 static void
1461 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1462 {
1463 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1464 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1465 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1466 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1467 }
1468 
1469 static void
1470 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1471 {
1472 	STAILQ_INIT(&ctrlr->async_events);
1473 }
1474 
1475 static void
1476 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1477 {
1478 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1479 
1480 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1481 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1482 		free(event);
1483 	}
1484 }
1485 
1486 static int
1487 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1488 {
1489 	int num = 0;
1490 	struct spdk_nvmf_async_event_completion *event;
1491 
1492 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1493 		num++;
1494 	}
1495 	return num;
1496 }
1497 
1498 static void
1499 test_reservation_notification_log_page(void)
1500 {
1501 	struct spdk_nvmf_ctrlr ctrlr;
1502 	struct spdk_nvmf_qpair qpair;
1503 	struct spdk_nvmf_ns ns;
1504 	struct spdk_nvmf_request req = {};
1505 	union nvmf_h2c_msg cmd = {};
1506 	union nvmf_c2h_msg rsp = {};
1507 	union spdk_nvme_async_event_completion event = {};
1508 	struct spdk_nvme_reservation_notification_log logs[3];
1509 	struct iovec iov;
1510 
1511 	memset(&ctrlr, 0, sizeof(ctrlr));
1512 	ctrlr.thread = spdk_get_thread();
1513 	TAILQ_INIT(&ctrlr.log_head);
1514 	init_pending_async_events(&ctrlr);
1515 	ns.nsid = 1;
1516 
1517 	/* Test Case: Mask all the reservation notifications */
1518 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1519 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1520 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1521 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1522 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1523 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1524 					  SPDK_NVME_RESERVATION_RELEASED);
1525 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1526 					  SPDK_NVME_RESERVATION_PREEMPTED);
1527 	poll_threads();
1528 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1529 
1530 	/* Test Case: Unmask all the reservation notifications,
1531 	 * 3 log pages are generated, and AER was triggered.
1532 	 */
1533 	ns.mask = 0;
1534 	ctrlr.num_avail_log_pages = 0;
1535 	req.cmd = &cmd;
1536 	req.rsp = &rsp;
1537 	ctrlr.aer_req[0] = &req;
1538 	ctrlr.nr_aer_reqs = 1;
1539 	req.qpair = &qpair;
1540 	TAILQ_INIT(&qpair.outstanding);
1541 	qpair.ctrlr = NULL;
1542 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1543 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1544 
1545 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1546 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1547 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1548 					  SPDK_NVME_RESERVATION_RELEASED);
1549 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1550 					  SPDK_NVME_RESERVATION_PREEMPTED);
1551 	poll_threads();
1552 	event.raw = rsp.nvme_cpl.cdw0;
1553 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1554 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1555 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1556 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1557 
1558 	/* Test Case: Get Log Page to clear the log pages */
1559 	iov.iov_base = &logs[0];
1560 	iov.iov_len = sizeof(logs);
1561 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1562 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1563 
1564 	cleanup_pending_async_events(&ctrlr);
1565 }
1566 
1567 static void
1568 test_get_dif_ctx(void)
1569 {
1570 	struct spdk_nvmf_subsystem subsystem = {};
1571 	struct spdk_nvmf_request req = {};
1572 	struct spdk_nvmf_qpair qpair = {};
1573 	struct spdk_nvmf_ctrlr ctrlr = {};
1574 	struct spdk_nvmf_ns ns = {};
1575 	struct spdk_nvmf_ns *_ns = NULL;
1576 	struct spdk_bdev bdev = {};
1577 	union nvmf_h2c_msg cmd = {};
1578 	struct spdk_dif_ctx dif_ctx = {};
1579 	bool ret;
1580 
1581 	ctrlr.subsys = &subsystem;
1582 
1583 	qpair.ctrlr = &ctrlr;
1584 
1585 	req.qpair = &qpair;
1586 	req.cmd = &cmd;
1587 
1588 	ns.bdev = &bdev;
1589 
1590 	ctrlr.dif_insert_or_strip = false;
1591 
1592 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1593 	CU_ASSERT(ret == false);
1594 
1595 	ctrlr.dif_insert_or_strip = true;
1596 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1597 
1598 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1599 	CU_ASSERT(ret == false);
1600 
1601 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1602 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1603 
1604 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1605 	CU_ASSERT(ret == false);
1606 
1607 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1608 
1609 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1610 	CU_ASSERT(ret == false);
1611 
1612 	qpair.qid = 1;
1613 
1614 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1615 	CU_ASSERT(ret == false);
1616 
1617 	cmd.nvme_cmd.nsid = 1;
1618 
1619 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1620 	CU_ASSERT(ret == false);
1621 
1622 	subsystem.max_nsid = 1;
1623 	subsystem.ns = &_ns;
1624 	subsystem.ns[0] = &ns;
1625 
1626 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1627 	CU_ASSERT(ret == false);
1628 
1629 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1630 
1631 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1632 	CU_ASSERT(ret == true);
1633 }
1634 
1635 static void
1636 test_identify_ctrlr(void)
1637 {
1638 	struct spdk_nvmf_tgt tgt = {};
1639 	struct spdk_nvmf_subsystem subsystem = {
1640 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1641 		.tgt = &tgt,
1642 	};
1643 	struct spdk_nvmf_transport_ops tops = {};
1644 	struct spdk_nvmf_transport transport = {
1645 		.ops = &tops,
1646 		.opts = {
1647 			.in_capsule_data_size = 4096,
1648 		},
1649 	};
1650 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1651 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1652 	struct spdk_nvme_ctrlr_data cdata = {};
1653 	uint32_t expected_ioccsz;
1654 
1655 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1656 
1657 	/* Check ioccsz, TCP transport */
1658 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1659 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1660 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1661 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1662 
1663 	/* Check ioccsz, RDMA transport */
1664 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1665 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1666 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1667 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1668 
1669 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1670 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1671 	ctrlr.dif_insert_or_strip = true;
1672 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1673 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1674 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1675 }
1676 
1677 static void
1678 test_identify_ctrlr_iocs_specific(void)
1679 {
1680 	struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 };
1681 	struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 };
1682 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop };
1683 	struct spdk_nvme_cmd cmd = {};
1684 	struct spdk_nvme_cpl rsp = {};
1685 	struct spdk_nvme_zns_ctrlr_data ctrlr_data = {};
1686 
1687 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1688 
1689 	/* ZNS max_zone_append_size_kib no limit */
1690 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1691 	memset(&rsp, 0, sizeof(rsp));
1692 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1693 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1694 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1695 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1696 	CU_ASSERT(ctrlr_data.zasl == 0);
1697 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1698 
1699 	/* ZNS max_zone_append_size_kib = 4096 */
1700 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1701 	memset(&rsp, 0, sizeof(rsp));
1702 	subsystem.max_zone_append_size_kib = 4096;
1703 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1704 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1705 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1706 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1707 	CU_ASSERT(ctrlr_data.zasl == 0);
1708 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1709 
1710 	/* ZNS max_zone_append_size_kib = 60000 */
1711 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1712 	memset(&rsp, 0, sizeof(rsp));
1713 	subsystem.max_zone_append_size_kib = 60000;
1714 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1715 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1716 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1717 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1718 	CU_ASSERT(ctrlr_data.zasl == 3);
1719 	ctrlr_data.zasl = 0;
1720 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1721 
1722 	/* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */
1723 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1724 	memset(&rsp, 0, sizeof(rsp));
1725 	ctrlr.vcprop.cap.bits.mpsmin = 2;
1726 	subsystem.max_zone_append_size_kib = 60000;
1727 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1728 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1729 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1730 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1731 	CU_ASSERT(ctrlr_data.zasl == 1);
1732 	ctrlr_data.zasl = 0;
1733 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1734 	ctrlr.vcprop.cap.bits.mpsmin = 0;
1735 
1736 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1737 
1738 	/* NVM */
1739 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1740 	memset(&rsp, 0, sizeof(rsp));
1741 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1742 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1743 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1744 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1745 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1746 }
1747 
1748 static int
1749 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1750 {
1751 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1752 
1753 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1754 };
1755 
1756 static void
1757 test_custom_admin_cmd(void)
1758 {
1759 	struct spdk_nvmf_subsystem subsystem;
1760 	struct spdk_nvmf_qpair qpair;
1761 	struct spdk_nvmf_ctrlr ctrlr;
1762 	struct spdk_nvmf_request req;
1763 	struct spdk_nvmf_ns *ns_ptrs[1];
1764 	struct spdk_nvmf_ns ns;
1765 	union nvmf_h2c_msg cmd;
1766 	union nvmf_c2h_msg rsp;
1767 	struct spdk_bdev bdev;
1768 	uint8_t buf[4096];
1769 	int rc;
1770 
1771 	memset(&subsystem, 0, sizeof(subsystem));
1772 	ns_ptrs[0] = &ns;
1773 	subsystem.ns = ns_ptrs;
1774 	subsystem.max_nsid = 1;
1775 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1776 
1777 	memset(&ns, 0, sizeof(ns));
1778 	ns.opts.nsid = 1;
1779 	ns.bdev = &bdev;
1780 
1781 	memset(&qpair, 0, sizeof(qpair));
1782 	qpair.ctrlr = &ctrlr;
1783 
1784 	memset(&ctrlr, 0, sizeof(ctrlr));
1785 	ctrlr.subsys = &subsystem;
1786 	ctrlr.vcprop.cc.bits.en = 1;
1787 	ctrlr.thread = spdk_get_thread();
1788 
1789 	memset(&req, 0, sizeof(req));
1790 	req.qpair = &qpair;
1791 	req.cmd = &cmd;
1792 	req.rsp = &rsp;
1793 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1794 	req.data = buf;
1795 	req.length = sizeof(buf);
1796 	spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length);
1797 
1798 	memset(&cmd, 0, sizeof(cmd));
1799 	cmd.nvme_cmd.opc = 0xc1;
1800 	cmd.nvme_cmd.nsid = 0;
1801 	memset(&rsp, 0, sizeof(rsp));
1802 
1803 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1804 
1805 	/* Ensure that our hdlr is being called */
1806 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1807 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1808 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1809 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1810 }
1811 
1812 static void
1813 test_fused_compare_and_write(void)
1814 {
1815 	struct spdk_nvmf_request req = {};
1816 	struct spdk_nvmf_qpair qpair = {};
1817 	struct spdk_nvme_cmd cmd = {};
1818 	union nvmf_c2h_msg rsp = {};
1819 	struct spdk_nvmf_ctrlr ctrlr = {};
1820 	struct spdk_nvmf_subsystem subsystem = {};
1821 	struct spdk_nvmf_ns ns = {};
1822 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1823 	enum spdk_nvme_ana_state ana_state[1];
1824 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1825 	struct spdk_bdev bdev = {};
1826 
1827 	struct spdk_nvmf_poll_group group = {};
1828 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1829 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1830 	struct spdk_io_channel io_ch = {};
1831 
1832 	ns.bdev = &bdev;
1833 	ns.anagrpid = 1;
1834 
1835 	subsystem.id = 0;
1836 	subsystem.max_nsid = 1;
1837 	subsys_ns[0] = &ns;
1838 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1839 
1840 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1841 
1842 	/* Enable controller */
1843 	ctrlr.vcprop.cc.bits.en = 1;
1844 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1845 	ctrlr.listener = &listener;
1846 
1847 	group.num_sgroups = 1;
1848 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1849 	sgroups.num_ns = 1;
1850 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1851 	ns_info.channel = &io_ch;
1852 	sgroups.ns_info = &ns_info;
1853 	TAILQ_INIT(&sgroups.queued);
1854 	group.sgroups = &sgroups;
1855 	TAILQ_INIT(&qpair.outstanding);
1856 
1857 	qpair.ctrlr = &ctrlr;
1858 	qpair.group = &group;
1859 	qpair.qid = 1;
1860 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1861 
1862 	cmd.nsid = 1;
1863 
1864 	req.qpair = &qpair;
1865 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1866 	req.rsp = &rsp;
1867 
1868 	/* SUCCESS/SUCCESS */
1869 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1870 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1871 
1872 	spdk_nvmf_request_exec(&req);
1873 	CU_ASSERT(qpair.first_fused_req != NULL);
1874 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1875 
1876 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1877 	cmd.opc = SPDK_NVME_OPC_WRITE;
1878 
1879 	spdk_nvmf_request_exec(&req);
1880 	CU_ASSERT(qpair.first_fused_req == NULL);
1881 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1882 
1883 	/* Wrong sequence */
1884 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1885 	cmd.opc = SPDK_NVME_OPC_WRITE;
1886 
1887 	spdk_nvmf_request_exec(&req);
1888 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1889 	CU_ASSERT(qpair.first_fused_req == NULL);
1890 
1891 	/* Write as FUSE_FIRST (Wrong op code) */
1892 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1893 	cmd.opc = SPDK_NVME_OPC_WRITE;
1894 
1895 	spdk_nvmf_request_exec(&req);
1896 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1897 	CU_ASSERT(qpair.first_fused_req == NULL);
1898 
1899 	/* Compare as FUSE_SECOND (Wrong op code) */
1900 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1901 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1902 
1903 	spdk_nvmf_request_exec(&req);
1904 	CU_ASSERT(qpair.first_fused_req != NULL);
1905 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1906 
1907 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1908 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1909 
1910 	spdk_nvmf_request_exec(&req);
1911 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1912 	CU_ASSERT(qpair.first_fused_req == NULL);
1913 }
1914 
1915 static void
1916 test_multi_async_event_reqs(void)
1917 {
1918 	struct spdk_nvmf_subsystem subsystem = {};
1919 	struct spdk_nvmf_qpair qpair = {};
1920 	struct spdk_nvmf_ctrlr ctrlr = {};
1921 	struct spdk_nvmf_request req[5] = {};
1922 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1923 	struct spdk_nvmf_ns ns = {};
1924 	union nvmf_h2c_msg cmd[5] = {};
1925 	union nvmf_c2h_msg rsp[5] = {};
1926 
1927 	struct spdk_nvmf_poll_group group = {};
1928 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1929 
1930 	int i;
1931 
1932 	ns_ptrs[0] = &ns;
1933 	subsystem.ns = ns_ptrs;
1934 	subsystem.max_nsid = 1;
1935 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1936 
1937 	ns.opts.nsid = 1;
1938 	group.sgroups = &sgroups;
1939 
1940 	qpair.ctrlr = &ctrlr;
1941 	qpair.group = &group;
1942 	TAILQ_INIT(&qpair.outstanding);
1943 
1944 	ctrlr.subsys = &subsystem;
1945 	ctrlr.vcprop.cc.bits.en = 1;
1946 	ctrlr.thread = spdk_get_thread();
1947 
1948 	for (i = 0; i < 5; i++) {
1949 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1950 		cmd[i].nvme_cmd.nsid = 1;
1951 		cmd[i].nvme_cmd.cid = i;
1952 
1953 		req[i].qpair = &qpair;
1954 		req[i].cmd = &cmd[i];
1955 		req[i].rsp = &rsp[i];
1956 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1957 	}
1958 
1959 	/* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */
1960 	sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS;
1961 	for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
1962 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1963 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
1964 	}
1965 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1966 
1967 	/* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */
1968 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1969 	CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS);
1970 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
1971 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1972 
1973 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
1974 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
1975 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1976 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1977 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
1978 
1979 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
1980 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1981 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1982 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
1983 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
1984 
1985 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
1986 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
1987 }
1988 
1989 static void
1990 test_get_ana_log_page_one_ns_per_anagrp(void)
1991 {
1992 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
1993 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
1994 	uint32_t ana_group[3];
1995 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
1996 	struct spdk_nvmf_ctrlr ctrlr = {};
1997 	enum spdk_nvme_ana_state ana_state[3];
1998 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1999 	struct spdk_nvmf_ns ns[3];
2000 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
2001 	uint64_t offset;
2002 	uint32_t length;
2003 	int i;
2004 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2005 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2006 	struct iovec iov, iovs[2];
2007 	struct spdk_nvme_ana_page *ana_hdr;
2008 	char _ana_desc[UT_ANA_DESC_SIZE];
2009 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2010 
2011 	subsystem.ns = ns_arr;
2012 	subsystem.max_nsid = 3;
2013 	for (i = 0; i < 3; i++) {
2014 		subsystem.ana_group[i] = 1;
2015 	}
2016 	ctrlr.subsys = &subsystem;
2017 	ctrlr.listener = &listener;
2018 
2019 	for (i = 0; i < 3; i++) {
2020 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2021 	}
2022 
2023 	for (i = 0; i < 3; i++) {
2024 		ns_arr[i]->nsid = i + 1;
2025 		ns_arr[i]->anagrpid = i + 1;
2026 	}
2027 
2028 	/* create expected page */
2029 	ana_hdr = (void *)&expected_page[0];
2030 	ana_hdr->num_ana_group_desc = 3;
2031 	ana_hdr->change_count = 0;
2032 
2033 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2034 	ana_desc = (void *)_ana_desc;
2035 	offset = sizeof(struct spdk_nvme_ana_page);
2036 
2037 	for (i = 0; i < 3; i++) {
2038 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
2039 		ana_desc->ana_group_id = ns_arr[i]->nsid;
2040 		ana_desc->num_of_nsid = 1;
2041 		ana_desc->change_count = 0;
2042 		ana_desc->ana_state = ctrlr.listener->ana_state[i];
2043 		ana_desc->nsid[0] = ns_arr[i]->nsid;
2044 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
2045 		offset += UT_ANA_DESC_SIZE;
2046 	}
2047 
2048 	/* read entire actual log page */
2049 	offset = 0;
2050 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2051 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2052 		iov.iov_base = &actual_page[offset];
2053 		iov.iov_len = length;
2054 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2055 		offset += length;
2056 	}
2057 
2058 	/* compare expected page and actual page */
2059 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2060 
2061 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2062 	offset = 0;
2063 	iovs[0].iov_base = &actual_page[offset];
2064 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2065 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2066 	iovs[1].iov_base = &actual_page[offset];
2067 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
2068 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2069 
2070 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2071 
2072 #undef UT_ANA_DESC_SIZE
2073 #undef UT_ANA_LOG_PAGE_SIZE
2074 }
2075 
2076 static void
2077 test_get_ana_log_page_multi_ns_per_anagrp(void)
2078 {
2079 #define UT_ANA_LOG_PAGE_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
2080 				 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2081 				 sizeof(uint32_t) * 5)
2082 	struct spdk_nvmf_ns ns[5];
2083 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
2084 	uint32_t ana_group[5] = {0};
2085 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
2086 	enum spdk_nvme_ana_state ana_state[5];
2087 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
2088 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
2089 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2090 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2091 	struct iovec iov, iovs[2];
2092 	struct spdk_nvme_ana_page *ana_hdr;
2093 	char _ana_desc[UT_ANA_LOG_PAGE_SIZE];
2094 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2095 	uint64_t offset;
2096 	uint32_t length;
2097 	int i;
2098 
2099 	subsystem.max_nsid = 5;
2100 	subsystem.ana_group[1] = 3;
2101 	subsystem.ana_group[2] = 2;
2102 	for (i = 0; i < 5; i++) {
2103 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2104 	}
2105 
2106 	for (i = 0; i < 5; i++) {
2107 		ns_arr[i]->nsid = i + 1;
2108 	}
2109 	ns_arr[0]->anagrpid = 2;
2110 	ns_arr[1]->anagrpid = 3;
2111 	ns_arr[2]->anagrpid = 2;
2112 	ns_arr[3]->anagrpid = 3;
2113 	ns_arr[4]->anagrpid = 2;
2114 
2115 	/* create expected page */
2116 	ana_hdr = (void *)&expected_page[0];
2117 	ana_hdr->num_ana_group_desc = 2;
2118 	ana_hdr->change_count = 0;
2119 
2120 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2121 	ana_desc = (void *)_ana_desc;
2122 	offset = sizeof(struct spdk_nvme_ana_page);
2123 
2124 	memset(_ana_desc, 0, sizeof(_ana_desc));
2125 	ana_desc->ana_group_id = 2;
2126 	ana_desc->num_of_nsid = 3;
2127 	ana_desc->change_count = 0;
2128 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2129 	ana_desc->nsid[0] = 1;
2130 	ana_desc->nsid[1] = 3;
2131 	ana_desc->nsid[2] = 5;
2132 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2133 	       sizeof(uint32_t) * 3);
2134 	offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3;
2135 
2136 	memset(_ana_desc, 0, sizeof(_ana_desc));
2137 	ana_desc->ana_group_id = 3;
2138 	ana_desc->num_of_nsid = 2;
2139 	ana_desc->change_count = 0;
2140 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2141 	ana_desc->nsid[0] = 2;
2142 	ana_desc->nsid[1] = 4;
2143 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2144 	       sizeof(uint32_t) * 2);
2145 
2146 	/* read entire actual log page, and compare expected page and actual page. */
2147 	offset = 0;
2148 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2149 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2150 		iov.iov_base = &actual_page[offset];
2151 		iov.iov_len = length;
2152 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2153 		offset += length;
2154 	}
2155 
2156 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2157 
2158 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2159 	offset = 0;
2160 	iovs[0].iov_base = &actual_page[offset];
2161 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2162 	offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2163 	iovs[1].iov_base = &actual_page[offset];
2164 	iovs[1].iov_len = sizeof(uint32_t) * 5;
2165 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2166 
2167 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2168 
2169 #undef UT_ANA_LOG_PAGE_SIZE
2170 }
2171 static void
2172 test_multi_async_events(void)
2173 {
2174 	struct spdk_nvmf_subsystem subsystem = {};
2175 	struct spdk_nvmf_qpair qpair = {};
2176 	struct spdk_nvmf_ctrlr ctrlr = {};
2177 	struct spdk_nvmf_request req[4] = {};
2178 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2179 	struct spdk_nvmf_ns ns = {};
2180 	union nvmf_h2c_msg cmd[4] = {};
2181 	union nvmf_c2h_msg rsp[4] = {};
2182 	union spdk_nvme_async_event_completion event = {};
2183 	struct spdk_nvmf_poll_group group = {};
2184 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2185 	int i;
2186 
2187 	ns_ptrs[0] = &ns;
2188 	subsystem.ns = ns_ptrs;
2189 	subsystem.max_nsid = 1;
2190 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2191 
2192 	ns.opts.nsid = 1;
2193 	group.sgroups = &sgroups;
2194 
2195 	qpair.ctrlr = &ctrlr;
2196 	qpair.group = &group;
2197 	TAILQ_INIT(&qpair.outstanding);
2198 
2199 	ctrlr.subsys = &subsystem;
2200 	ctrlr.vcprop.cc.bits.en = 1;
2201 	ctrlr.thread = spdk_get_thread();
2202 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2203 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2204 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2205 	init_pending_async_events(&ctrlr);
2206 
2207 	/* Target queue pending events when there is no outstanding AER request */
2208 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2209 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2210 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2211 
2212 	for (i = 0; i < 4; i++) {
2213 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2214 		cmd[i].nvme_cmd.nsid = 1;
2215 		cmd[i].nvme_cmd.cid = i;
2216 
2217 		req[i].qpair = &qpair;
2218 		req[i].cmd = &cmd[i];
2219 		req[i].rsp = &rsp[i];
2220 
2221 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2222 
2223 		sgroups.mgmt_io_outstanding = 1;
2224 		if (i < 3) {
2225 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2226 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2227 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2228 		} else {
2229 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2230 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2231 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2232 		}
2233 	}
2234 
2235 	event.raw = rsp[0].nvme_cpl.cdw0;
2236 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2237 	event.raw = rsp[1].nvme_cpl.cdw0;
2238 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2239 	event.raw = rsp[2].nvme_cpl.cdw0;
2240 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2241 
2242 	cleanup_pending_async_events(&ctrlr);
2243 }
2244 
2245 static void
2246 test_rae(void)
2247 {
2248 	struct spdk_nvmf_subsystem subsystem = {};
2249 	struct spdk_nvmf_qpair qpair = {};
2250 	struct spdk_nvmf_ctrlr ctrlr = {};
2251 	struct spdk_nvmf_request req[3] = {};
2252 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2253 	struct spdk_nvmf_ns ns = {};
2254 	union nvmf_h2c_msg cmd[3] = {};
2255 	union nvmf_c2h_msg rsp[3] = {};
2256 	union spdk_nvme_async_event_completion event = {};
2257 	struct spdk_nvmf_poll_group group = {};
2258 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2259 	int i;
2260 	char data[4096];
2261 
2262 	ns_ptrs[0] = &ns;
2263 	subsystem.ns = ns_ptrs;
2264 	subsystem.max_nsid = 1;
2265 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2266 
2267 	ns.opts.nsid = 1;
2268 	group.sgroups = &sgroups;
2269 
2270 	qpair.ctrlr = &ctrlr;
2271 	qpair.group = &group;
2272 	TAILQ_INIT(&qpair.outstanding);
2273 
2274 	ctrlr.subsys = &subsystem;
2275 	ctrlr.vcprop.cc.bits.en = 1;
2276 	ctrlr.thread = spdk_get_thread();
2277 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2278 	init_pending_async_events(&ctrlr);
2279 
2280 	/* Target queue pending events when there is no outstanding AER request */
2281 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2282 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2283 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2284 	/* only one event will be queued before RAE is clear */
2285 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2286 
2287 	req[0].qpair = &qpair;
2288 	req[0].cmd = &cmd[0];
2289 	req[0].rsp = &rsp[0];
2290 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2291 	cmd[0].nvme_cmd.nsid = 1;
2292 	cmd[0].nvme_cmd.cid = 0;
2293 
2294 	for (i = 1; i < 3; i++) {
2295 		req[i].qpair = &qpair;
2296 		req[i].cmd = &cmd[i];
2297 		req[i].rsp = &rsp[i];
2298 		req[i].data = &data;
2299 		req[i].length = sizeof(data);
2300 		spdk_iov_one(req[i].iov, &req[i].iovcnt, &data, req[i].length);
2301 
2302 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2303 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2304 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2305 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2306 			spdk_nvme_bytes_to_numd(req[i].length);
2307 		cmd[i].nvme_cmd.cid = i;
2308 	}
2309 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2310 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2311 
2312 	/* consume the pending event */
2313 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2314 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2315 	event.raw = rsp[0].nvme_cpl.cdw0;
2316 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2317 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2318 
2319 	/* get log with RAE set */
2320 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2321 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2322 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2323 
2324 	/* will not generate new event until RAE is clear */
2325 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2326 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2327 
2328 	/* get log with RAE clear */
2329 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2330 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2331 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2332 
2333 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2334 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2335 
2336 	cleanup_pending_async_events(&ctrlr);
2337 }
2338 
2339 static void
2340 test_nvmf_ctrlr_create_destruct(void)
2341 {
2342 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2343 	struct spdk_nvmf_poll_group group = {};
2344 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2345 	struct spdk_nvmf_transport transport = {};
2346 	struct spdk_nvmf_transport_ops tops = {};
2347 	struct spdk_nvmf_subsystem subsystem = {};
2348 	struct spdk_nvmf_request req = {};
2349 	struct spdk_nvmf_qpair qpair = {};
2350 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2351 	struct spdk_nvmf_tgt tgt = {};
2352 	union nvmf_h2c_msg cmd = {};
2353 	union nvmf_c2h_msg rsp = {};
2354 	const uint8_t hostid[16] = {
2355 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2356 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2357 	};
2358 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2359 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2360 
2361 	group.thread = spdk_get_thread();
2362 	transport.ops = &tops;
2363 	transport.opts.max_aq_depth = 32;
2364 	transport.opts.max_queue_depth = 64;
2365 	transport.opts.max_qpairs_per_ctrlr = 3;
2366 	transport.opts.dif_insert_or_strip = true;
2367 	transport.tgt = &tgt;
2368 	qpair.transport = &transport;
2369 	qpair.group = &group;
2370 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2371 	TAILQ_INIT(&qpair.outstanding);
2372 
2373 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2374 	connect_data.cntlid = 0xFFFF;
2375 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2376 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2377 
2378 	subsystem.thread = spdk_get_thread();
2379 	subsystem.id = 1;
2380 	TAILQ_INIT(&subsystem.ctrlrs);
2381 	subsystem.tgt = &tgt;
2382 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2383 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2384 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2385 
2386 	group.sgroups = sgroups;
2387 
2388 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2389 	cmd.connect_cmd.cid = 1;
2390 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2391 	cmd.connect_cmd.recfmt = 0;
2392 	cmd.connect_cmd.qid = 0;
2393 	cmd.connect_cmd.sqsize = 31;
2394 	cmd.connect_cmd.cattr = 0;
2395 	cmd.connect_cmd.kato = 120000;
2396 
2397 	req.qpair = &qpair;
2398 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2399 	req.data = &connect_data;
2400 	req.length = sizeof(connect_data);
2401 	spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length);
2402 	req.cmd = &cmd;
2403 	req.rsp = &rsp;
2404 
2405 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2406 	sgroups[subsystem.id].mgmt_io_outstanding++;
2407 
2408 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base);
2409 	poll_threads();
2410 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2411 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2412 	CU_ASSERT(ctrlr->subsys == &subsystem);
2413 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2414 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2415 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2416 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2417 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2418 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2419 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2420 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2421 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2422 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2423 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2424 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2425 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2426 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2427 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2428 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2429 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2430 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2431 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2432 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2433 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2434 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2435 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2436 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2437 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2438 
2439 	ctrlr->in_destruct = true;
2440 	nvmf_ctrlr_destruct(ctrlr);
2441 	poll_threads();
2442 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2443 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2444 }
2445 
2446 static void
2447 test_nvmf_ctrlr_use_zcopy(void)
2448 {
2449 	struct spdk_nvmf_subsystem subsystem = {};
2450 	struct spdk_nvmf_transport transport = {};
2451 	struct spdk_nvmf_request req = {};
2452 	struct spdk_nvmf_qpair qpair = {};
2453 	struct spdk_nvmf_ctrlr ctrlr = {};
2454 	union nvmf_h2c_msg cmd = {};
2455 	struct spdk_nvmf_ns ns = {};
2456 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2457 	struct spdk_bdev bdev = {};
2458 	struct spdk_nvmf_poll_group group = {};
2459 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2460 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2461 	struct spdk_io_channel io_ch = {};
2462 	int opc;
2463 
2464 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2465 	ns.bdev = &bdev;
2466 
2467 	subsystem.id = 0;
2468 	subsystem.max_nsid = 1;
2469 	subsys_ns[0] = &ns;
2470 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2471 
2472 	ctrlr.subsys = &subsystem;
2473 
2474 	transport.opts.zcopy = true;
2475 
2476 	qpair.ctrlr = &ctrlr;
2477 	qpair.group = &group;
2478 	qpair.qid = 1;
2479 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2480 	qpair.transport = &transport;
2481 
2482 	group.thread = spdk_get_thread();
2483 	group.num_sgroups = 1;
2484 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2485 	sgroups.num_ns = 1;
2486 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2487 	ns_info.channel = &io_ch;
2488 	sgroups.ns_info = &ns_info;
2489 	TAILQ_INIT(&sgroups.queued);
2490 	group.sgroups = &sgroups;
2491 	TAILQ_INIT(&qpair.outstanding);
2492 
2493 	req.qpair = &qpair;
2494 	req.cmd = &cmd;
2495 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2496 
2497 	/* Admin queue */
2498 	qpair.qid = 0;
2499 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2500 	qpair.qid = 1;
2501 
2502 	/* Invalid Opcodes */
2503 	for (opc = 0; opc <= 255; opc++) {
2504 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2505 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2506 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2507 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2508 		}
2509 	}
2510 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2511 
2512 	/* Fused WRITE */
2513 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2514 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2515 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2516 
2517 	/* Non bdev */
2518 	cmd.nvme_cmd.nsid = 4;
2519 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2520 	cmd.nvme_cmd.nsid = 1;
2521 
2522 	/* ZCOPY Not supported */
2523 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2524 	ns.zcopy = true;
2525 
2526 	/* ZCOPY disabled on transport level */
2527 	transport.opts.zcopy = false;
2528 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2529 	transport.opts.zcopy = true;
2530 
2531 	/* Success */
2532 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2533 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2534 }
2535 
2536 static void
2537 qpair_state_change_done(void *cb_arg, int status)
2538 {
2539 }
2540 
2541 static void
2542 test_spdk_nvmf_request_zcopy_start(void)
2543 {
2544 	struct spdk_nvmf_request req = {};
2545 	struct spdk_nvmf_qpair qpair = {};
2546 	struct spdk_nvmf_transport transport = {};
2547 	struct spdk_nvme_cmd cmd = {};
2548 	union nvmf_c2h_msg rsp = {};
2549 	struct spdk_nvmf_ctrlr ctrlr = {};
2550 	struct spdk_nvmf_subsystem subsystem = {};
2551 	struct spdk_nvmf_ns ns = {};
2552 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2553 	enum spdk_nvme_ana_state ana_state[1];
2554 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2555 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2556 
2557 	struct spdk_nvmf_poll_group group = {};
2558 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2559 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2560 	struct spdk_io_channel io_ch = {};
2561 
2562 	ns.bdev = &bdev;
2563 	ns.zcopy = true;
2564 	ns.anagrpid = 1;
2565 
2566 	subsystem.id = 0;
2567 	subsystem.max_nsid = 1;
2568 	subsys_ns[0] = &ns;
2569 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2570 
2571 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2572 
2573 	/* Enable controller */
2574 	ctrlr.vcprop.cc.bits.en = 1;
2575 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2576 	ctrlr.listener = &listener;
2577 
2578 	transport.opts.zcopy = true;
2579 
2580 	group.thread = spdk_get_thread();
2581 	group.num_sgroups = 1;
2582 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2583 	sgroups.num_ns = 1;
2584 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2585 	ns_info.channel = &io_ch;
2586 	sgroups.ns_info = &ns_info;
2587 	TAILQ_INIT(&sgroups.queued);
2588 	group.sgroups = &sgroups;
2589 	TAILQ_INIT(&qpair.outstanding);
2590 
2591 	qpair.ctrlr = &ctrlr;
2592 	qpair.group = &group;
2593 	qpair.transport = &transport;
2594 	qpair.qid = 1;
2595 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2596 
2597 	cmd.nsid = 1;
2598 
2599 	req.qpair = &qpair;
2600 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2601 	req.rsp = &rsp;
2602 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2603 	cmd.opc = SPDK_NVME_OPC_READ;
2604 
2605 	/* Fail because no controller */
2606 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2607 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2608 	qpair.ctrlr = NULL;
2609 	spdk_nvmf_request_zcopy_start(&req);
2610 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2611 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2612 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2613 	qpair.ctrlr = &ctrlr;
2614 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2615 
2616 	/* Fail because bad NSID */
2617 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2618 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2619 	cmd.nsid = 0;
2620 	spdk_nvmf_request_zcopy_start(&req);
2621 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2622 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2623 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2624 	cmd.nsid = 1;
2625 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2626 
2627 	/* Fail because bad Channel */
2628 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2629 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2630 	ns_info.channel = NULL;
2631 	spdk_nvmf_request_zcopy_start(&req);
2632 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2633 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2634 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2635 	ns_info.channel = &io_ch;
2636 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2637 
2638 	/* Queue the requet because NSID is not active */
2639 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2640 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2641 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2642 	spdk_nvmf_request_zcopy_start(&req);
2643 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2644 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2645 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2646 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2647 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2648 
2649 	/* Fail because QPair is not active */
2650 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2651 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2652 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2653 	qpair.state_cb = qpair_state_change_done;
2654 	spdk_nvmf_request_zcopy_start(&req);
2655 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2656 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2657 	qpair.state_cb = NULL;
2658 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2659 
2660 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2661 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2662 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2663 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2664 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2665 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2666 	spdk_nvmf_request_zcopy_start(&req);
2667 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2668 	cmd.cdw10 = 0;
2669 	cmd.cdw12 = 0;
2670 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2671 
2672 	/* Success */
2673 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2674 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2675 	spdk_nvmf_request_zcopy_start(&req);
2676 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2677 }
2678 
2679 static void
2680 test_zcopy_read(void)
2681 {
2682 	struct spdk_nvmf_request req = {};
2683 	struct spdk_nvmf_qpair qpair = {};
2684 	struct spdk_nvmf_transport transport = {};
2685 	struct spdk_nvme_cmd cmd = {};
2686 	union nvmf_c2h_msg rsp = {};
2687 	struct spdk_nvmf_ctrlr ctrlr = {};
2688 	struct spdk_nvmf_subsystem subsystem = {};
2689 	struct spdk_nvmf_ns ns = {};
2690 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2691 	enum spdk_nvme_ana_state ana_state[1];
2692 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2693 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2694 
2695 	struct spdk_nvmf_poll_group group = {};
2696 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2697 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2698 	struct spdk_io_channel io_ch = {};
2699 
2700 	ns.bdev = &bdev;
2701 	ns.zcopy = true;
2702 	ns.anagrpid = 1;
2703 
2704 	subsystem.id = 0;
2705 	subsystem.max_nsid = 1;
2706 	subsys_ns[0] = &ns;
2707 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2708 
2709 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2710 
2711 	/* Enable controller */
2712 	ctrlr.vcprop.cc.bits.en = 1;
2713 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2714 	ctrlr.listener = &listener;
2715 
2716 	transport.opts.zcopy = true;
2717 
2718 	group.thread = spdk_get_thread();
2719 	group.num_sgroups = 1;
2720 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2721 	sgroups.num_ns = 1;
2722 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2723 	ns_info.channel = &io_ch;
2724 	sgroups.ns_info = &ns_info;
2725 	TAILQ_INIT(&sgroups.queued);
2726 	group.sgroups = &sgroups;
2727 	TAILQ_INIT(&qpair.outstanding);
2728 
2729 	qpair.ctrlr = &ctrlr;
2730 	qpair.group = &group;
2731 	qpair.transport = &transport;
2732 	qpair.qid = 1;
2733 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2734 
2735 	cmd.nsid = 1;
2736 
2737 	req.qpair = &qpair;
2738 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2739 	req.rsp = &rsp;
2740 	cmd.opc = SPDK_NVME_OPC_READ;
2741 
2742 	/* Prepare for zcopy */
2743 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2744 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2745 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2746 	CU_ASSERT(ns_info.io_outstanding == 0);
2747 
2748 	/* Perform the zcopy start */
2749 	spdk_nvmf_request_zcopy_start(&req);
2750 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2751 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2752 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2753 	CU_ASSERT(ns_info.io_outstanding == 1);
2754 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2755 
2756 	/* Perform the zcopy end */
2757 	spdk_nvmf_request_zcopy_end(&req, false);
2758 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2759 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2760 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2761 	CU_ASSERT(ns_info.io_outstanding == 0);
2762 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2763 }
2764 
2765 static void
2766 test_zcopy_write(void)
2767 {
2768 	struct spdk_nvmf_request req = {};
2769 	struct spdk_nvmf_qpair qpair = {};
2770 	struct spdk_nvmf_transport transport = {};
2771 	struct spdk_nvme_cmd cmd = {};
2772 	union nvmf_c2h_msg rsp = {};
2773 	struct spdk_nvmf_ctrlr ctrlr = {};
2774 	struct spdk_nvmf_subsystem subsystem = {};
2775 	struct spdk_nvmf_ns ns = {};
2776 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2777 	enum spdk_nvme_ana_state ana_state[1];
2778 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2779 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2780 
2781 	struct spdk_nvmf_poll_group group = {};
2782 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2783 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2784 	struct spdk_io_channel io_ch = {};
2785 
2786 	ns.bdev = &bdev;
2787 	ns.zcopy = true;
2788 	ns.anagrpid = 1;
2789 
2790 	subsystem.id = 0;
2791 	subsystem.max_nsid = 1;
2792 	subsys_ns[0] = &ns;
2793 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2794 
2795 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2796 
2797 	/* Enable controller */
2798 	ctrlr.vcprop.cc.bits.en = 1;
2799 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2800 	ctrlr.listener = &listener;
2801 
2802 	transport.opts.zcopy = true;
2803 
2804 	group.thread = spdk_get_thread();
2805 	group.num_sgroups = 1;
2806 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2807 	sgroups.num_ns = 1;
2808 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2809 	ns_info.channel = &io_ch;
2810 	sgroups.ns_info = &ns_info;
2811 	TAILQ_INIT(&sgroups.queued);
2812 	group.sgroups = &sgroups;
2813 	TAILQ_INIT(&qpair.outstanding);
2814 
2815 	qpair.ctrlr = &ctrlr;
2816 	qpair.group = &group;
2817 	qpair.transport = &transport;
2818 	qpair.qid = 1;
2819 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2820 
2821 	cmd.nsid = 1;
2822 
2823 	req.qpair = &qpair;
2824 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2825 	req.rsp = &rsp;
2826 	cmd.opc = SPDK_NVME_OPC_WRITE;
2827 
2828 	/* Prepare for zcopy */
2829 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2830 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2831 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2832 	CU_ASSERT(ns_info.io_outstanding == 0);
2833 
2834 	/* Perform the zcopy start */
2835 	spdk_nvmf_request_zcopy_start(&req);
2836 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2837 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2838 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2839 	CU_ASSERT(ns_info.io_outstanding == 1);
2840 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2841 
2842 	/* Perform the zcopy end */
2843 	spdk_nvmf_request_zcopy_end(&req, true);
2844 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2845 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2846 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2847 	CU_ASSERT(ns_info.io_outstanding == 0);
2848 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2849 }
2850 
2851 static void
2852 test_nvmf_property_set(void)
2853 {
2854 	int rc;
2855 	struct spdk_nvmf_request req = {};
2856 	struct spdk_nvmf_qpair qpair = {};
2857 	struct spdk_nvmf_ctrlr ctrlr = {};
2858 	union nvmf_h2c_msg cmd = {};
2859 	union nvmf_c2h_msg rsp = {};
2860 
2861 	req.qpair = &qpair;
2862 	qpair.ctrlr = &ctrlr;
2863 	req.cmd = &cmd;
2864 	req.rsp = &rsp;
2865 
2866 	/* Invalid parameters */
2867 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2868 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2869 
2870 	rc = nvmf_property_set(&req);
2871 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2872 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2873 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2874 
2875 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2876 
2877 	rc = nvmf_property_get(&req);
2878 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2879 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2880 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2881 
2882 	/* Set cc with same property size */
2883 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2884 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2885 
2886 	rc = nvmf_property_set(&req);
2887 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2888 
2889 	/* Emulate cc data */
2890 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2891 
2892 	rc = nvmf_property_get(&req);
2893 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2894 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2895 
2896 	/* Set asq with different property size */
2897 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2898 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2899 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2900 
2901 	rc = nvmf_property_set(&req);
2902 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2903 
2904 	/* Emulate asq data */
2905 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2906 
2907 	rc = nvmf_property_get(&req);
2908 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2909 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2910 }
2911 
2912 static void
2913 test_nvmf_ctrlr_get_features_host_behavior_support(void)
2914 {
2915 	int rc;
2916 	struct spdk_nvmf_request req = {};
2917 	struct spdk_nvmf_qpair qpair = {};
2918 	struct spdk_nvmf_ctrlr ctrlr = {};
2919 	struct spdk_nvme_host_behavior behavior = {};
2920 	union nvmf_h2c_msg cmd = {};
2921 	union nvmf_c2h_msg rsp = {};
2922 
2923 	qpair.ctrlr = &ctrlr;
2924 	req.qpair = &qpair;
2925 	req.cmd = &cmd;
2926 	req.rsp = &rsp;
2927 
2928 	/* Invalid data */
2929 	req.data = NULL;
2930 	req.length = sizeof(struct spdk_nvme_host_behavior);
2931 	req.iovcnt = 0;
2932 
2933 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2934 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2935 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2936 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2937 	CU_ASSERT(req.data == NULL);
2938 
2939 	/* Wrong structure length */
2940 	req.data = &behavior;
2941 	req.length = sizeof(struct spdk_nvme_host_behavior) - 1;
2942 	spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length);
2943 
2944 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2945 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2946 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2947 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2948 
2949 	/* Get Features Host Behavior Support Success */
2950 	req.data = &behavior;
2951 	req.length = sizeof(struct spdk_nvme_host_behavior);
2952 	spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length);
2953 
2954 	ctrlr.acre_enabled = true;
2955 	behavior.acre = false;
2956 
2957 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2958 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2959 	CU_ASSERT(behavior.acre == true);
2960 }
2961 
2962 static void
2963 test_nvmf_ctrlr_set_features_host_behavior_support(void)
2964 {
2965 	int rc;
2966 	struct spdk_nvmf_request req = {};
2967 	struct spdk_nvmf_qpair qpair = {};
2968 	struct spdk_nvmf_ctrlr ctrlr = {};
2969 	struct spdk_nvme_host_behavior host_behavior = {};
2970 	union nvmf_h2c_msg cmd = {};
2971 	union nvmf_c2h_msg rsp = {};
2972 
2973 	qpair.ctrlr = &ctrlr;
2974 	req.qpair = &qpair;
2975 	req.cmd = &cmd;
2976 	req.rsp = &rsp;
2977 	req.iov[0].iov_base = &host_behavior;
2978 	req.iov[0].iov_len = sizeof(host_behavior);
2979 
2980 	/* Invalid iovcnt */
2981 	req.iovcnt = 0;
2982 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2983 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2984 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
2985 
2986 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
2987 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2988 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2989 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2990 
2991 	/* Invalid iov_len */
2992 	req.iovcnt = 1;
2993 	req.iov[0].iov_len = 0;
2994 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2995 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2996 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
2997 
2998 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
2999 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3000 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3001 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3002 
3003 	/* acre is false */
3004 	host_behavior.acre = 0;
3005 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3006 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3007 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3008 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3009 
3010 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3011 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3012 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3013 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3014 	CU_ASSERT(ctrlr.acre_enabled == false);
3015 
3016 	/* acre is true */
3017 	host_behavior.acre = 1;
3018 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3019 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3020 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3021 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3022 
3023 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3024 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3025 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3026 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3027 	CU_ASSERT(ctrlr.acre_enabled == true);
3028 
3029 	/* Invalid acre */
3030 	host_behavior.acre = 2;
3031 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3032 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3033 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3034 
3035 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3036 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3037 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3038 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3039 }
3040 
3041 int
3042 main(int argc, char **argv)
3043 {
3044 	CU_pSuite	suite = NULL;
3045 	unsigned int	num_failures;
3046 
3047 	CU_set_error_action(CUEA_ABORT);
3048 	CU_initialize_registry();
3049 
3050 	suite = CU_add_suite("nvmf", NULL, NULL);
3051 	CU_ADD_TEST(suite, test_get_log_page);
3052 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
3053 	CU_ADD_TEST(suite, test_connect);
3054 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
3055 	CU_ADD_TEST(suite, test_identify_ns);
3056 	CU_ADD_TEST(suite, test_identify_ns_iocs_specific);
3057 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
3058 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
3059 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
3060 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
3061 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
3062 	CU_ADD_TEST(suite, test_get_dif_ctx);
3063 	CU_ADD_TEST(suite, test_set_get_features);
3064 	CU_ADD_TEST(suite, test_identify_ctrlr);
3065 	CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific);
3066 	CU_ADD_TEST(suite, test_custom_admin_cmd);
3067 	CU_ADD_TEST(suite, test_fused_compare_and_write);
3068 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
3069 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
3070 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
3071 	CU_ADD_TEST(suite, test_multi_async_events);
3072 	CU_ADD_TEST(suite, test_rae);
3073 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
3074 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
3075 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
3076 	CU_ADD_TEST(suite, test_zcopy_read);
3077 	CU_ADD_TEST(suite, test_zcopy_write);
3078 	CU_ADD_TEST(suite, test_nvmf_property_set);
3079 	CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support);
3080 	CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support);
3081 
3082 	allocate_threads(1);
3083 	set_thread(0);
3084 
3085 	CU_basic_set_mode(CU_BRM_VERBOSE);
3086 	CU_basic_run_tests();
3087 	num_failures = CU_get_number_of_failures();
3088 	CU_cleanup_registry();
3089 
3090 	free_threads();
3091 
3092 	return num_failures;
3093 }
3094