xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision a0d24145bf3d795cf89adc414320b138fae480ab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/bdev_zone.h"
8 #include "spdk/nvme_spec.h"
9 #include "spdk/stdinc.h"
10 
11 #include "spdk_cunit.h"
12 #include "spdk_internal/mock.h"
13 #include "thread/thread_internal.h"
14 
15 #include "common/lib/ut_multithread.c"
16 #include "nvmf/ctrlr.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 struct spdk_bdev {
21 	int ut_mock;
22 	uint64_t blockcnt;
23 	uint32_t blocklen;
24 	bool zoned;
25 	uint32_t zone_size;
26 	uint32_t max_open_zones;
27 	uint32_t max_active_zones;
28 };
29 
30 #define MAX_OPEN_ZONES 12
31 #define MAX_ACTIVE_ZONES 34
32 #define ZONE_SIZE 56
33 
34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
36 
37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
39 		0x8877665544332211UL;
40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
41 
42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
43 	    struct spdk_nvmf_subsystem *,
44 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
45 	    NULL);
46 
47 DEFINE_STUB(spdk_nvmf_poll_group_create,
48 	    struct spdk_nvmf_poll_group *,
49 	    (struct spdk_nvmf_tgt *tgt),
50 	    NULL);
51 
52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
53 	    const char *,
54 	    (const struct spdk_nvmf_subsystem *subsystem),
55 	    subsystem_default_sn);
56 
57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
58 	    const char *,
59 	    (const struct spdk_nvmf_subsystem *subsystem),
60 	    subsystem_default_mn);
61 
62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
63 	    bool,
64 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
65 	    true);
66 
67 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
68 	    int,
69 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
70 	    0);
71 
72 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
73 	    struct spdk_nvmf_ctrlr *,
74 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
75 	    NULL);
76 
77 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
78 	    bool,
79 	    (struct spdk_nvmf_ctrlr *ctrlr),
80 	    false);
81 
82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
83 	    bool,
84 	    (struct spdk_nvmf_ctrlr *ctrlr),
85 	    false);
86 
87 DEFINE_STUB(nvmf_ctrlr_copy_supported,
88 	    bool,
89 	    (struct spdk_nvmf_ctrlr *ctrlr),
90 	    false);
91 
92 DEFINE_STUB_V(nvmf_get_discovery_log_page,
93 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
94 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
95 
96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
97 	    int,
98 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
99 	    0);
100 
101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
102 	    bool,
103 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
104 	    true);
105 
106 DEFINE_STUB(nvmf_subsystem_find_listener,
107 	    struct spdk_nvmf_subsystem_listener *,
108 	    (struct spdk_nvmf_subsystem *subsystem,
109 	     const struct spdk_nvme_transport_id *trid),
110 	    (void *)0x1);
111 
112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
113 	    int,
114 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
115 	     struct spdk_nvmf_request *req),
116 	    0);
117 
118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
119 	    int,
120 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
121 	     struct spdk_nvmf_request *req),
122 	    0);
123 
124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
134 	    0);
135 
136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *req),
140 	    0);
141 
142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *req),
146 	    0);
147 
148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
161 	    int,
162 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct spdk_nvmf_request *req),
164 	    0);
165 
166 DEFINE_STUB(nvmf_transport_req_complete,
167 	    int,
168 	    (struct spdk_nvmf_request *req),
169 	    0);
170 
171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
172 
173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
174 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
175 	     struct spdk_dif_ctx *dif_ctx),
176 	    true);
177 
178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
179 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
180 
181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
183 
184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
185 		struct spdk_nvmf_ctrlr *ctrlr));
186 
187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
188 	    int,
189 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
190 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
191 	    0);
192 
193 DEFINE_STUB(nvmf_transport_req_free,
194 	    int,
195 	    (struct spdk_nvmf_request *req),
196 	    0);
197 
198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
199 	    int,
200 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
201 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
202 	    0);
203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
204 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
206 
207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev),
208 	    MAX_ACTIVE_ZONES);
209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES);
210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE);
211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
212 
213 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t,
214 	    (const struct spdk_nvme_ns_data *nsdata), 0);
215 
216 int
217 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
218 {
219 	return 0;
220 }
221 
222 void
223 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
224 			    bool dif_insert_or_strip)
225 {
226 	uint64_t num_blocks;
227 
228 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
229 	num_blocks = ns->bdev->blockcnt;
230 	nsdata->nsze = num_blocks;
231 	nsdata->ncap = num_blocks;
232 	nsdata->nuse = num_blocks;
233 	nsdata->nlbaf = 0;
234 	nsdata->flbas.format = 0;
235 	nsdata->flbas.msb_format = 0;
236 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
237 }
238 
239 struct spdk_nvmf_ns *
240 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
241 {
242 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
243 	return subsystem->ns[0];
244 }
245 
246 struct spdk_nvmf_ns *
247 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
248 				struct spdk_nvmf_ns *prev_ns)
249 {
250 	uint32_t nsid;
251 
252 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
253 	nsid = prev_ns->nsid;
254 
255 	if (nsid >= subsystem->max_nsid) {
256 		return NULL;
257 	}
258 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
259 		if (subsystem->ns[nsid - 1]) {
260 			return subsystem->ns[nsid - 1];
261 		}
262 	}
263 	return NULL;
264 }
265 
266 bool
267 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
268 {
269 	return true;
270 }
271 
272 int
273 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
274 			    struct spdk_bdev_desc *desc,
275 			    struct spdk_io_channel *ch,
276 			    struct spdk_nvmf_request *req)
277 {
278 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
279 	uint64_t start_lba;
280 	uint64_t num_blocks;
281 
282 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
283 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
284 
285 	if ((start_lba + num_blocks) > bdev->blockcnt) {
286 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
287 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
288 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
289 	}
290 
291 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
292 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
293 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
294 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
295 	} else {
296 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
297 	}
298 
299 
300 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
301 }
302 
303 void
304 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
305 {
306 	req->zcopy_bdev_io = NULL;
307 	spdk_nvmf_request_complete(req);
308 }
309 
310 static void
311 test_get_log_page(void)
312 {
313 	struct spdk_nvmf_subsystem subsystem = {};
314 	struct spdk_nvmf_request req = {};
315 	struct spdk_nvmf_qpair qpair = {};
316 	struct spdk_nvmf_ctrlr ctrlr = {};
317 	union nvmf_h2c_msg cmd = {};
318 	union nvmf_c2h_msg rsp = {};
319 	char data[4096];
320 
321 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
322 
323 	ctrlr.subsys = &subsystem;
324 
325 	qpair.ctrlr = &ctrlr;
326 
327 	req.qpair = &qpair;
328 	req.cmd = &cmd;
329 	req.rsp = &rsp;
330 	req.data = &data;
331 	req.length = sizeof(data);
332 	spdk_iov_one(req.iov, &req.iovcnt, &data, req.length);
333 
334 	/* Get Log Page - all valid */
335 	memset(&cmd, 0, sizeof(cmd));
336 	memset(&rsp, 0, sizeof(rsp));
337 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
338 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
339 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
340 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
341 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
342 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
343 
344 	/* Get Log Page with invalid log ID */
345 	memset(&cmd, 0, sizeof(cmd));
346 	memset(&rsp, 0, sizeof(rsp));
347 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
348 	cmd.nvme_cmd.cdw10 = 0;
349 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
350 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
351 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
352 
353 	/* Get Log Page with invalid offset (not dword aligned) */
354 	memset(&cmd, 0, sizeof(cmd));
355 	memset(&rsp, 0, sizeof(rsp));
356 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
357 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
358 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
359 	cmd.nvme_cmd.cdw12 = 2;
360 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
361 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
362 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
363 
364 	/* Get Log Page without data buffer */
365 	memset(&cmd, 0, sizeof(cmd));
366 	memset(&rsp, 0, sizeof(rsp));
367 	req.data = NULL;
368 	req.iovcnt = 0;
369 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
370 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
371 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
372 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
373 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
374 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
375 	req.data = data;
376 }
377 
378 static void
379 test_process_fabrics_cmd(void)
380 {
381 	struct	spdk_nvmf_request req = {};
382 	int	ret;
383 	struct	spdk_nvmf_qpair req_qpair = {};
384 	union	nvmf_h2c_msg  req_cmd = {};
385 	union	nvmf_c2h_msg   req_rsp = {};
386 
387 	req.qpair = &req_qpair;
388 	req.cmd  = &req_cmd;
389 	req.rsp  = &req_rsp;
390 	req.qpair->ctrlr = NULL;
391 
392 	/* No ctrlr and invalid command check */
393 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
394 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
395 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
396 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
397 }
398 
399 static bool
400 nvme_status_success(const struct spdk_nvme_status *status)
401 {
402 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
403 }
404 
405 static void
406 test_connect(void)
407 {
408 	struct spdk_nvmf_fabric_connect_data connect_data;
409 	struct spdk_nvmf_poll_group group;
410 	struct spdk_nvmf_subsystem_poll_group *sgroups;
411 	struct spdk_nvmf_transport transport;
412 	struct spdk_nvmf_transport_ops tops = {};
413 	struct spdk_nvmf_subsystem subsystem;
414 	struct spdk_nvmf_request req;
415 	struct spdk_nvmf_qpair admin_qpair;
416 	struct spdk_nvmf_qpair qpair;
417 	struct spdk_nvmf_qpair qpair2;
418 	struct spdk_nvmf_ctrlr ctrlr;
419 	struct spdk_nvmf_tgt tgt;
420 	union nvmf_h2c_msg cmd;
421 	union nvmf_c2h_msg rsp;
422 	const uint8_t hostid[16] = {
423 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
424 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
425 	};
426 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
427 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
428 	int rc;
429 
430 	memset(&group, 0, sizeof(group));
431 	group.thread = spdk_get_thread();
432 
433 	memset(&ctrlr, 0, sizeof(ctrlr));
434 	ctrlr.subsys = &subsystem;
435 	ctrlr.qpair_mask = spdk_bit_array_create(3);
436 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
437 	ctrlr.vcprop.cc.bits.en = 1;
438 	ctrlr.vcprop.cc.bits.iosqes = 6;
439 	ctrlr.vcprop.cc.bits.iocqes = 4;
440 
441 	memset(&admin_qpair, 0, sizeof(admin_qpair));
442 	admin_qpair.group = &group;
443 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
444 
445 	memset(&tgt, 0, sizeof(tgt));
446 	memset(&transport, 0, sizeof(transport));
447 	transport.ops = &tops;
448 	transport.opts.max_aq_depth = 32;
449 	transport.opts.max_queue_depth = 64;
450 	transport.opts.max_qpairs_per_ctrlr = 3;
451 	transport.tgt = &tgt;
452 
453 	memset(&qpair, 0, sizeof(qpair));
454 	qpair.transport = &transport;
455 	qpair.group = &group;
456 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
457 	TAILQ_INIT(&qpair.outstanding);
458 
459 	memset(&connect_data, 0, sizeof(connect_data));
460 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
461 	connect_data.cntlid = 0xFFFF;
462 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
463 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
464 
465 	memset(&subsystem, 0, sizeof(subsystem));
466 	subsystem.thread = spdk_get_thread();
467 	subsystem.id = 1;
468 	TAILQ_INIT(&subsystem.ctrlrs);
469 	subsystem.tgt = &tgt;
470 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
471 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
472 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
473 
474 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
475 	group.sgroups = sgroups;
476 
477 	memset(&cmd, 0, sizeof(cmd));
478 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
479 	cmd.connect_cmd.cid = 1;
480 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
481 	cmd.connect_cmd.recfmt = 0;
482 	cmd.connect_cmd.qid = 0;
483 	cmd.connect_cmd.sqsize = 31;
484 	cmd.connect_cmd.cattr = 0;
485 	cmd.connect_cmd.kato = 120000;
486 
487 	memset(&req, 0, sizeof(req));
488 	req.qpair = &qpair;
489 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
490 	req.data = &connect_data;
491 	req.length = sizeof(connect_data);
492 	spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length);
493 	req.cmd = &cmd;
494 	req.rsp = &rsp;
495 
496 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
497 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
498 
499 	/* Valid admin connect command */
500 	memset(&rsp, 0, sizeof(rsp));
501 	sgroups[subsystem.id].mgmt_io_outstanding++;
502 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
503 	rc = nvmf_ctrlr_cmd_connect(&req);
504 	poll_threads();
505 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
506 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
507 	CU_ASSERT(qpair.ctrlr != NULL);
508 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
509 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
510 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
511 	free(qpair.ctrlr);
512 	qpair.ctrlr = NULL;
513 
514 	/* Valid admin connect command with kato = 0 */
515 	cmd.connect_cmd.kato = 0;
516 	memset(&rsp, 0, sizeof(rsp));
517 	sgroups[subsystem.id].mgmt_io_outstanding++;
518 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
519 	rc = nvmf_ctrlr_cmd_connect(&req);
520 	poll_threads();
521 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
522 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
523 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
524 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
525 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
526 	free(qpair.ctrlr);
527 	qpair.ctrlr = NULL;
528 	cmd.connect_cmd.kato = 120000;
529 
530 	/* Invalid data length */
531 	memset(&rsp, 0, sizeof(rsp));
532 	req.length = sizeof(connect_data) - 1;
533 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
534 	rc = nvmf_ctrlr_cmd_connect(&req);
535 	poll_threads();
536 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
537 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
538 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
539 	CU_ASSERT(qpair.ctrlr == NULL);
540 	req.length = sizeof(connect_data);
541 
542 	/* Invalid recfmt */
543 	memset(&rsp, 0, sizeof(rsp));
544 	cmd.connect_cmd.recfmt = 1234;
545 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
546 	rc = nvmf_ctrlr_cmd_connect(&req);
547 	poll_threads();
548 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
549 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
550 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
551 	CU_ASSERT(qpair.ctrlr == NULL);
552 	cmd.connect_cmd.recfmt = 0;
553 
554 	/* Subsystem not found */
555 	memset(&rsp, 0, sizeof(rsp));
556 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
557 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
558 	rc = nvmf_ctrlr_cmd_connect(&req);
559 	poll_threads();
560 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
561 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
562 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
563 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
564 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
565 	CU_ASSERT(qpair.ctrlr == NULL);
566 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
567 
568 	/* Unterminated hostnqn */
569 	memset(&rsp, 0, sizeof(rsp));
570 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
571 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
572 	rc = nvmf_ctrlr_cmd_connect(&req);
573 	poll_threads();
574 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
575 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
576 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
577 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
578 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
579 	CU_ASSERT(qpair.ctrlr == NULL);
580 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
581 
582 	/* Host not allowed */
583 	memset(&rsp, 0, sizeof(rsp));
584 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
585 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
586 	rc = nvmf_ctrlr_cmd_connect(&req);
587 	poll_threads();
588 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
589 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
590 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
591 	CU_ASSERT(qpair.ctrlr == NULL);
592 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
593 
594 	/* Invalid sqsize == 0 */
595 	memset(&rsp, 0, sizeof(rsp));
596 	cmd.connect_cmd.sqsize = 0;
597 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
598 	rc = nvmf_ctrlr_cmd_connect(&req);
599 	poll_threads();
600 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
601 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
602 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
603 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
604 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
605 	CU_ASSERT(qpair.ctrlr == NULL);
606 	cmd.connect_cmd.sqsize = 31;
607 
608 	/* Invalid admin sqsize > max_aq_depth */
609 	memset(&rsp, 0, sizeof(rsp));
610 	cmd.connect_cmd.sqsize = 32;
611 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
612 	rc = nvmf_ctrlr_cmd_connect(&req);
613 	poll_threads();
614 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
615 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
616 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
617 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
618 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
619 	CU_ASSERT(qpair.ctrlr == NULL);
620 	cmd.connect_cmd.sqsize = 31;
621 
622 	/* Invalid I/O sqsize > max_queue_depth */
623 	memset(&rsp, 0, sizeof(rsp));
624 	cmd.connect_cmd.qid = 1;
625 	cmd.connect_cmd.sqsize = 64;
626 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
627 	rc = nvmf_ctrlr_cmd_connect(&req);
628 	poll_threads();
629 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
630 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
631 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
632 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
633 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
634 	CU_ASSERT(qpair.ctrlr == NULL);
635 	cmd.connect_cmd.qid = 0;
636 	cmd.connect_cmd.sqsize = 31;
637 
638 	/* Invalid cntlid for admin queue */
639 	memset(&rsp, 0, sizeof(rsp));
640 	connect_data.cntlid = 0x1234;
641 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
642 	rc = nvmf_ctrlr_cmd_connect(&req);
643 	poll_threads();
644 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
645 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
646 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
647 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
648 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
649 	CU_ASSERT(qpair.ctrlr == NULL);
650 	connect_data.cntlid = 0xFFFF;
651 
652 	ctrlr.admin_qpair = &admin_qpair;
653 	ctrlr.subsys = &subsystem;
654 
655 	/* Valid I/O queue connect command */
656 	memset(&rsp, 0, sizeof(rsp));
657 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
658 	cmd.connect_cmd.qid = 1;
659 	cmd.connect_cmd.sqsize = 63;
660 	sgroups[subsystem.id].mgmt_io_outstanding++;
661 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
662 	rc = nvmf_ctrlr_cmd_connect(&req);
663 	poll_threads();
664 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
665 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
666 	CU_ASSERT(qpair.ctrlr == &ctrlr);
667 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
668 	qpair.ctrlr = NULL;
669 	cmd.connect_cmd.sqsize = 31;
670 
671 	/* Non-existent controller */
672 	memset(&rsp, 0, sizeof(rsp));
673 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
674 	sgroups[subsystem.id].mgmt_io_outstanding++;
675 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
676 	rc = nvmf_ctrlr_cmd_connect(&req);
677 	poll_threads();
678 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
679 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
680 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
681 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
682 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
683 	CU_ASSERT(qpair.ctrlr == NULL);
684 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
685 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
686 
687 	/* I/O connect to discovery controller */
688 	memset(&rsp, 0, sizeof(rsp));
689 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
690 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
691 	sgroups[subsystem.id].mgmt_io_outstanding++;
692 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
693 	rc = nvmf_ctrlr_cmd_connect(&req);
694 	poll_threads();
695 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
696 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
697 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
698 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
699 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
700 	CU_ASSERT(qpair.ctrlr == NULL);
701 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
702 
703 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
704 	cmd.connect_cmd.qid = 0;
705 	cmd.connect_cmd.kato = 120000;
706 	memset(&rsp, 0, sizeof(rsp));
707 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
708 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
709 	sgroups[subsystem.id].mgmt_io_outstanding++;
710 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
711 	rc = nvmf_ctrlr_cmd_connect(&req);
712 	poll_threads();
713 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
714 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
715 	CU_ASSERT(qpair.ctrlr != NULL);
716 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
717 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
718 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
719 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
720 	free(qpair.ctrlr);
721 	qpair.ctrlr = NULL;
722 
723 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
724 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
725 	 */
726 	cmd.connect_cmd.kato = 0;
727 	memset(&rsp, 0, sizeof(rsp));
728 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
729 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
730 	sgroups[subsystem.id].mgmt_io_outstanding++;
731 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
732 	rc = nvmf_ctrlr_cmd_connect(&req);
733 	poll_threads();
734 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
735 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
736 	CU_ASSERT(qpair.ctrlr != NULL);
737 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
738 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
739 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
740 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
741 	free(qpair.ctrlr);
742 	qpair.ctrlr = NULL;
743 	cmd.connect_cmd.qid = 1;
744 	cmd.connect_cmd.kato = 120000;
745 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
746 
747 	/* I/O connect to disabled controller */
748 	memset(&rsp, 0, sizeof(rsp));
749 	ctrlr.vcprop.cc.bits.en = 0;
750 	sgroups[subsystem.id].mgmt_io_outstanding++;
751 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
752 	rc = nvmf_ctrlr_cmd_connect(&req);
753 	poll_threads();
754 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
755 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
756 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
757 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
758 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
759 	CU_ASSERT(qpair.ctrlr == NULL);
760 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
761 	ctrlr.vcprop.cc.bits.en = 1;
762 
763 	/* I/O connect with invalid IOSQES */
764 	memset(&rsp, 0, sizeof(rsp));
765 	ctrlr.vcprop.cc.bits.iosqes = 3;
766 	sgroups[subsystem.id].mgmt_io_outstanding++;
767 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
768 	rc = nvmf_ctrlr_cmd_connect(&req);
769 	poll_threads();
770 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
771 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
772 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
773 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
774 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
775 	CU_ASSERT(qpair.ctrlr == NULL);
776 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
777 	ctrlr.vcprop.cc.bits.iosqes = 6;
778 
779 	/* I/O connect with invalid IOCQES */
780 	memset(&rsp, 0, sizeof(rsp));
781 	ctrlr.vcprop.cc.bits.iocqes = 3;
782 	sgroups[subsystem.id].mgmt_io_outstanding++;
783 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
784 	rc = nvmf_ctrlr_cmd_connect(&req);
785 	poll_threads();
786 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
787 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
788 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
789 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
790 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
791 	CU_ASSERT(qpair.ctrlr == NULL);
792 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
793 	ctrlr.vcprop.cc.bits.iocqes = 4;
794 
795 	/* I/O connect with too many existing qpairs */
796 	memset(&rsp, 0, sizeof(rsp));
797 	spdk_bit_array_set(ctrlr.qpair_mask, 0);
798 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
799 	spdk_bit_array_set(ctrlr.qpair_mask, 2);
800 	sgroups[subsystem.id].mgmt_io_outstanding++;
801 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
802 	rc = nvmf_ctrlr_cmd_connect(&req);
803 	poll_threads();
804 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
805 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
806 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
807 	CU_ASSERT(qpair.ctrlr == NULL);
808 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
809 	spdk_bit_array_clear(ctrlr.qpair_mask, 0);
810 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
811 	spdk_bit_array_clear(ctrlr.qpair_mask, 2);
812 
813 	/* I/O connect with duplicate queue ID */
814 	memset(&rsp, 0, sizeof(rsp));
815 	memset(&qpair2, 0, sizeof(qpair2));
816 	qpair2.group = &group;
817 	qpair2.qid = 1;
818 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
819 	cmd.connect_cmd.qid = 1;
820 	sgroups[subsystem.id].mgmt_io_outstanding++;
821 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
822 	rc = nvmf_ctrlr_cmd_connect(&req);
823 	poll_threads();
824 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
825 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
826 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
827 	CU_ASSERT(qpair.ctrlr == NULL);
828 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
829 
830 	/* I/O connect when admin qpair is being destroyed */
831 	admin_qpair.group = NULL;
832 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
833 	memset(&rsp, 0, sizeof(rsp));
834 	sgroups[subsystem.id].mgmt_io_outstanding++;
835 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
836 	rc = nvmf_ctrlr_cmd_connect(&req);
837 	poll_threads();
838 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
839 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
840 	CU_ASSERT(qpair.ctrlr == NULL);
841 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
842 	admin_qpair.group = &group;
843 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
844 
845 	/* Clean up globals */
846 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
847 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
848 
849 	spdk_bit_array_free(&ctrlr.qpair_mask);
850 	free(sgroups);
851 }
852 
853 static void
854 test_get_ns_id_desc_list(void)
855 {
856 	struct spdk_nvmf_subsystem subsystem;
857 	struct spdk_nvmf_qpair qpair;
858 	struct spdk_nvmf_ctrlr ctrlr;
859 	struct spdk_nvmf_request req;
860 	struct spdk_nvmf_ns *ns_ptrs[1];
861 	struct spdk_nvmf_ns ns;
862 	union nvmf_h2c_msg cmd;
863 	union nvmf_c2h_msg rsp;
864 	struct spdk_bdev bdev;
865 	uint8_t buf[4096];
866 
867 	memset(&subsystem, 0, sizeof(subsystem));
868 	ns_ptrs[0] = &ns;
869 	subsystem.ns = ns_ptrs;
870 	subsystem.max_nsid = 1;
871 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
872 
873 	memset(&ns, 0, sizeof(ns));
874 	ns.opts.nsid = 1;
875 	ns.bdev = &bdev;
876 
877 	memset(&qpair, 0, sizeof(qpair));
878 	qpair.ctrlr = &ctrlr;
879 
880 	memset(&ctrlr, 0, sizeof(ctrlr));
881 	ctrlr.subsys = &subsystem;
882 	ctrlr.vcprop.cc.bits.en = 1;
883 	ctrlr.thread = spdk_get_thread();
884 
885 	memset(&req, 0, sizeof(req));
886 	req.qpair = &qpair;
887 	req.cmd = &cmd;
888 	req.rsp = &rsp;
889 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
890 	req.data = buf;
891 	req.length = sizeof(buf);
892 	spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length);
893 
894 	memset(&cmd, 0, sizeof(cmd));
895 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
896 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
897 
898 	/* Invalid NSID */
899 	cmd.nvme_cmd.nsid = 0;
900 	memset(&rsp, 0, sizeof(rsp));
901 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
902 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
903 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
904 
905 	/* Valid NSID, but ns has no IDs defined */
906 	cmd.nvme_cmd.nsid = 1;
907 	memset(&rsp, 0, sizeof(rsp));
908 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
909 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
910 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
911 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
912 
913 	/* Valid NSID, only EUI64 defined */
914 	ns.opts.eui64[0] = 0x11;
915 	ns.opts.eui64[7] = 0xFF;
916 	memset(&rsp, 0, sizeof(rsp));
917 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
918 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
919 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
920 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
921 	CU_ASSERT(buf[1] == 8);
922 	CU_ASSERT(buf[4] == 0x11);
923 	CU_ASSERT(buf[11] == 0xFF);
924 	CU_ASSERT(buf[13] == 0);
925 
926 	/* Valid NSID, only NGUID defined */
927 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
928 	ns.opts.nguid[0] = 0x22;
929 	ns.opts.nguid[15] = 0xEE;
930 	memset(&rsp, 0, sizeof(rsp));
931 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
932 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
933 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
934 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
935 	CU_ASSERT(buf[1] == 16);
936 	CU_ASSERT(buf[4] == 0x22);
937 	CU_ASSERT(buf[19] == 0xEE);
938 	CU_ASSERT(buf[21] == 0);
939 
940 	/* Valid NSID, both EUI64 and NGUID defined */
941 	ns.opts.eui64[0] = 0x11;
942 	ns.opts.eui64[7] = 0xFF;
943 	ns.opts.nguid[0] = 0x22;
944 	ns.opts.nguid[15] = 0xEE;
945 	memset(&rsp, 0, sizeof(rsp));
946 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
947 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
948 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
949 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
950 	CU_ASSERT(buf[1] == 8);
951 	CU_ASSERT(buf[4] == 0x11);
952 	CU_ASSERT(buf[11] == 0xFF);
953 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
954 	CU_ASSERT(buf[13] == 16);
955 	CU_ASSERT(buf[16] == 0x22);
956 	CU_ASSERT(buf[31] == 0xEE);
957 	CU_ASSERT(buf[33] == 0);
958 
959 	/* Valid NSID, EUI64, NGUID, and UUID defined */
960 	ns.opts.eui64[0] = 0x11;
961 	ns.opts.eui64[7] = 0xFF;
962 	ns.opts.nguid[0] = 0x22;
963 	ns.opts.nguid[15] = 0xEE;
964 	ns.opts.uuid.u.raw[0] = 0x33;
965 	ns.opts.uuid.u.raw[15] = 0xDD;
966 	memset(&rsp, 0, sizeof(rsp));
967 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
968 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
969 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
970 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
971 	CU_ASSERT(buf[1] == 8);
972 	CU_ASSERT(buf[4] == 0x11);
973 	CU_ASSERT(buf[11] == 0xFF);
974 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
975 	CU_ASSERT(buf[13] == 16);
976 	CU_ASSERT(buf[16] == 0x22);
977 	CU_ASSERT(buf[31] == 0xEE);
978 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
979 	CU_ASSERT(buf[33] == 16);
980 	CU_ASSERT(buf[36] == 0x33);
981 	CU_ASSERT(buf[51] == 0xDD);
982 	CU_ASSERT(buf[53] == 0);
983 }
984 
985 static void
986 test_identify_ns(void)
987 {
988 	struct spdk_nvmf_subsystem subsystem = {};
989 	struct spdk_nvmf_transport transport = {};
990 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
991 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
992 	struct spdk_nvme_cmd cmd = {};
993 	struct spdk_nvme_cpl rsp = {};
994 	struct spdk_nvme_ns_data nsdata = {};
995 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
996 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
997 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
998 
999 	subsystem.ns = ns_arr;
1000 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1001 
1002 	/* Invalid NSID 0 */
1003 	cmd.nsid = 0;
1004 	memset(&nsdata, 0, sizeof(nsdata));
1005 	memset(&rsp, 0, sizeof(rsp));
1006 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1007 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1008 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1009 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1010 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1011 
1012 	/* Valid NSID 1 */
1013 	cmd.nsid = 1;
1014 	memset(&nsdata, 0, sizeof(nsdata));
1015 	memset(&rsp, 0, sizeof(rsp));
1016 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1017 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1018 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1019 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1020 	CU_ASSERT(nsdata.nsze == 1234);
1021 
1022 	/* Valid but inactive NSID 2 */
1023 	cmd.nsid = 2;
1024 	memset(&nsdata, 0, sizeof(nsdata));
1025 	memset(&rsp, 0, sizeof(rsp));
1026 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1027 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1028 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1029 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1030 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1031 
1032 	/* Valid NSID 3 */
1033 	cmd.nsid = 3;
1034 	memset(&nsdata, 0, sizeof(nsdata));
1035 	memset(&rsp, 0, sizeof(rsp));
1036 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1037 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1038 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1039 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1040 	CU_ASSERT(nsdata.nsze == 5678);
1041 
1042 	/* Invalid NSID 4 */
1043 	cmd.nsid = 4;
1044 	memset(&nsdata, 0, sizeof(nsdata));
1045 	memset(&rsp, 0, sizeof(rsp));
1046 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1047 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1048 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1049 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1050 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1051 
1052 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1053 	cmd.nsid = 0xFFFFFFFF;
1054 	memset(&nsdata, 0, sizeof(nsdata));
1055 	memset(&rsp, 0, sizeof(rsp));
1056 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1057 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1058 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1059 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1060 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1061 }
1062 
1063 static void
1064 test_identify_ns_iocs_specific(void)
1065 {
1066 	struct spdk_nvmf_subsystem subsystem = {};
1067 	struct spdk_nvmf_transport transport = {};
1068 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport };
1069 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1070 	struct spdk_nvme_cmd cmd = {};
1071 	struct spdk_nvme_cpl rsp = {};
1072 	struct spdk_nvme_zns_ns_data nsdata = {};
1073 	struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}};
1074 	struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}};
1075 	struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]};
1076 
1077 	subsystem.ns = ns_arr;
1078 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1079 
1080 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1081 
1082 	/* Invalid ZNS NSID 0 */
1083 	cmd.nsid = 0;
1084 	memset(&nsdata, 0xFF, sizeof(nsdata));
1085 	memset(&rsp, 0, sizeof(rsp));
1086 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1087 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1088 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1089 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1090 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1091 
1092 	/* Valid ZNS NSID 1 */
1093 	cmd.nsid = 1;
1094 	memset(&nsdata, 0xFF, sizeof(nsdata));
1095 	memset(&rsp, 0, sizeof(rsp));
1096 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1097 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1098 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1099 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1100 	CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1);
1101 	CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1);
1102 	CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1);
1103 	CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE);
1104 	nsdata.ozcs.read_across_zone_boundaries = 0;
1105 	nsdata.mar = 0;
1106 	nsdata.mor = 0;
1107 	nsdata.lbafe[0].zsze = 0;
1108 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1109 
1110 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1111 
1112 	/* Valid NVM NSID 2 */
1113 	cmd.nsid = 2;
1114 	memset(&nsdata, 0xFF, sizeof(nsdata));
1115 	memset(&rsp, 0, sizeof(rsp));
1116 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1117 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1118 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1119 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1120 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1121 
1122 	/* Invalid NVM NSID 3 */
1123 	cmd.nsid = 0;
1124 	memset(&nsdata, 0xFF, sizeof(nsdata));
1125 	memset(&rsp, 0, sizeof(rsp));
1126 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1127 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1128 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1129 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1130 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1131 }
1132 
1133 static void
1134 test_set_get_features(void)
1135 {
1136 	struct spdk_nvmf_subsystem subsystem = {};
1137 	struct spdk_nvmf_qpair admin_qpair = {};
1138 	enum spdk_nvme_ana_state ana_state[3];
1139 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1140 	struct spdk_nvmf_ctrlr ctrlr = {
1141 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1142 	};
1143 	union nvmf_h2c_msg cmd = {};
1144 	union nvmf_c2h_msg rsp = {};
1145 	struct spdk_nvmf_ns ns[3];
1146 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1147 	struct spdk_nvmf_request req;
1148 	int rc;
1149 
1150 	ns[0].anagrpid = 1;
1151 	ns[2].anagrpid = 3;
1152 	subsystem.ns = ns_arr;
1153 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1154 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1155 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1156 	admin_qpair.ctrlr = &ctrlr;
1157 	req.qpair = &admin_qpair;
1158 	cmd.nvme_cmd.nsid = 1;
1159 	req.cmd = &cmd;
1160 	req.rsp = &rsp;
1161 
1162 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1163 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1164 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1165 	ns[0].ptpl_file = "testcfg";
1166 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1167 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1168 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1169 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1170 	CU_ASSERT(ns[0].ptpl_activated == true);
1171 
1172 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1173 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1174 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1175 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1176 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1177 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1178 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1179 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1180 
1181 
1182 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1183 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1184 	cmd.nvme_cmd.cdw11 = 0x42;
1185 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1186 
1187 	rc = nvmf_ctrlr_get_features(&req);
1188 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1189 
1190 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1191 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1192 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1193 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1194 
1195 	rc = nvmf_ctrlr_get_features(&req);
1196 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1197 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1198 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1199 
1200 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1201 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1202 	cmd.nvme_cmd.cdw11 = 0x42;
1203 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1204 
1205 	rc = nvmf_ctrlr_set_features(&req);
1206 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1207 
1208 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1209 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1210 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1211 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1212 
1213 	rc = nvmf_ctrlr_set_features(&req);
1214 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1215 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1216 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1217 
1218 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1219 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1220 	cmd.nvme_cmd.cdw11 = 0x42;
1221 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1222 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1223 
1224 	rc = nvmf_ctrlr_set_features(&req);
1225 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1226 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1227 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1228 
1229 
1230 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1231 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1232 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1233 
1234 	rc = nvmf_ctrlr_get_features(&req);
1235 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1236 
1237 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1238 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1239 	cmd.nvme_cmd.cdw11 = 0x42;
1240 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1241 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1242 
1243 	rc = nvmf_ctrlr_set_features(&req);
1244 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1245 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1246 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1247 
1248 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1249 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1250 	cmd.nvme_cmd.cdw11 = 0x42;
1251 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1252 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1253 
1254 	rc = nvmf_ctrlr_set_features(&req);
1255 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1256 }
1257 
1258 /*
1259  * Reservation Unit Test Configuration
1260  *       --------             --------    --------
1261  *      | Host A |           | Host B |  | Host C |
1262  *       --------             --------    --------
1263  *      /        \               |           |
1264  *  --------   --------       -------     -------
1265  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1266  *  --------   --------       -------     -------
1267  *    \           \              /           /
1268  *     \           \            /           /
1269  *      \           \          /           /
1270  *      --------------------------------------
1271  *     |            NAMESPACE 1               |
1272  *      --------------------------------------
1273  */
1274 
1275 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1276 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1277 
1278 static void
1279 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1280 {
1281 	/* Host A has two controllers */
1282 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1283 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1284 
1285 	/* Host B has 1 controller */
1286 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1287 
1288 	/* Host C has 1 controller */
1289 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1290 
1291 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1292 	g_ns_info.rtype = rtype;
1293 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1294 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1295 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1296 }
1297 
1298 static void
1299 test_reservation_write_exclusive(void)
1300 {
1301 	struct spdk_nvmf_request req = {};
1302 	union nvmf_h2c_msg cmd = {};
1303 	union nvmf_c2h_msg rsp = {};
1304 	int rc;
1305 
1306 	req.cmd = &cmd;
1307 	req.rsp = &rsp;
1308 
1309 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1310 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1311 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1312 
1313 	/* Test Case: Issue a Read command from Host A and Host B */
1314 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1315 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1316 	SPDK_CU_ASSERT_FATAL(rc == 0);
1317 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1318 	SPDK_CU_ASSERT_FATAL(rc == 0);
1319 
1320 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1321 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1322 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1323 	SPDK_CU_ASSERT_FATAL(rc == 0);
1324 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1325 	SPDK_CU_ASSERT_FATAL(rc < 0);
1326 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1327 
1328 	/* Test Case: Issue a Write command from Host C */
1329 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1330 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1331 	SPDK_CU_ASSERT_FATAL(rc < 0);
1332 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1333 
1334 	/* Test Case: Issue a Read command from Host B */
1335 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1336 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1337 	SPDK_CU_ASSERT_FATAL(rc == 0);
1338 
1339 	/* Unregister Host C */
1340 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1341 
1342 	/* Test Case: Read and Write commands from non-registrant Host C */
1343 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1344 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1345 	SPDK_CU_ASSERT_FATAL(rc < 0);
1346 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1347 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1348 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1349 	SPDK_CU_ASSERT_FATAL(rc == 0);
1350 }
1351 
1352 static void
1353 test_reservation_exclusive_access(void)
1354 {
1355 	struct spdk_nvmf_request req = {};
1356 	union nvmf_h2c_msg cmd = {};
1357 	union nvmf_c2h_msg rsp = {};
1358 	int rc;
1359 
1360 	req.cmd = &cmd;
1361 	req.rsp = &rsp;
1362 
1363 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1364 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1365 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1366 
1367 	/* Test Case: Issue a Read command from Host B */
1368 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1369 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1370 	SPDK_CU_ASSERT_FATAL(rc < 0);
1371 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1372 
1373 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1374 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1375 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1376 	SPDK_CU_ASSERT_FATAL(rc == 0);
1377 }
1378 
1379 static void
1380 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1381 {
1382 	struct spdk_nvmf_request req = {};
1383 	union nvmf_h2c_msg cmd = {};
1384 	union nvmf_c2h_msg rsp = {};
1385 	int rc;
1386 
1387 	req.cmd = &cmd;
1388 	req.rsp = &rsp;
1389 
1390 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1391 	ut_reservation_init(rtype);
1392 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1393 
1394 	/* Test Case: Issue a Read command from Host A and Host C */
1395 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1396 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1397 	SPDK_CU_ASSERT_FATAL(rc == 0);
1398 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1399 	SPDK_CU_ASSERT_FATAL(rc == 0);
1400 
1401 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1402 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1403 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1404 	SPDK_CU_ASSERT_FATAL(rc == 0);
1405 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1406 	SPDK_CU_ASSERT_FATAL(rc == 0);
1407 
1408 	/* Unregister Host C */
1409 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1410 
1411 	/* Test Case: Read and Write commands from non-registrant Host C */
1412 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1413 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1414 	SPDK_CU_ASSERT_FATAL(rc == 0);
1415 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1416 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1417 	SPDK_CU_ASSERT_FATAL(rc < 0);
1418 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1419 }
1420 
1421 static void
1422 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1423 {
1424 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1425 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1426 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1427 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1428 }
1429 
1430 static void
1431 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1432 {
1433 	struct spdk_nvmf_request req = {};
1434 	union nvmf_h2c_msg cmd = {};
1435 	union nvmf_c2h_msg rsp = {};
1436 	int rc;
1437 
1438 	req.cmd = &cmd;
1439 	req.rsp = &rsp;
1440 
1441 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1442 	ut_reservation_init(rtype);
1443 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1444 
1445 	/* Test Case: Issue a Write command from Host B */
1446 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1447 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1448 	SPDK_CU_ASSERT_FATAL(rc == 0);
1449 
1450 	/* Unregister Host B */
1451 	memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
1452 
1453 	/* Test Case: Issue a Read command from Host B */
1454 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1455 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1456 	SPDK_CU_ASSERT_FATAL(rc < 0);
1457 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1458 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1459 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1460 	SPDK_CU_ASSERT_FATAL(rc < 0);
1461 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1462 }
1463 
1464 static void
1465 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1466 {
1467 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1468 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1469 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1470 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1471 }
1472 
1473 static void
1474 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1475 {
1476 	STAILQ_INIT(&ctrlr->async_events);
1477 }
1478 
1479 static void
1480 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1481 {
1482 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1483 
1484 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1485 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1486 		free(event);
1487 	}
1488 }
1489 
1490 static int
1491 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1492 {
1493 	int num = 0;
1494 	struct spdk_nvmf_async_event_completion *event;
1495 
1496 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1497 		num++;
1498 	}
1499 	return num;
1500 }
1501 
1502 static void
1503 test_reservation_notification_log_page(void)
1504 {
1505 	struct spdk_nvmf_ctrlr ctrlr;
1506 	struct spdk_nvmf_qpair qpair;
1507 	struct spdk_nvmf_ns ns;
1508 	struct spdk_nvmf_request req = {};
1509 	union nvmf_h2c_msg cmd = {};
1510 	union nvmf_c2h_msg rsp = {};
1511 	union spdk_nvme_async_event_completion event = {};
1512 	struct spdk_nvme_reservation_notification_log logs[3];
1513 	struct iovec iov;
1514 
1515 	memset(&ctrlr, 0, sizeof(ctrlr));
1516 	ctrlr.thread = spdk_get_thread();
1517 	TAILQ_INIT(&ctrlr.log_head);
1518 	init_pending_async_events(&ctrlr);
1519 	ns.nsid = 1;
1520 
1521 	/* Test Case: Mask all the reservation notifications */
1522 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1523 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1524 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1525 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1526 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1527 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1528 					  SPDK_NVME_RESERVATION_RELEASED);
1529 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1530 					  SPDK_NVME_RESERVATION_PREEMPTED);
1531 	poll_threads();
1532 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1533 
1534 	/* Test Case: Unmask all the reservation notifications,
1535 	 * 3 log pages are generated, and AER was triggered.
1536 	 */
1537 	ns.mask = 0;
1538 	ctrlr.num_avail_log_pages = 0;
1539 	req.cmd = &cmd;
1540 	req.rsp = &rsp;
1541 	ctrlr.aer_req[0] = &req;
1542 	ctrlr.nr_aer_reqs = 1;
1543 	req.qpair = &qpair;
1544 	TAILQ_INIT(&qpair.outstanding);
1545 	qpair.ctrlr = NULL;
1546 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1547 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1548 
1549 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1550 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1551 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1552 					  SPDK_NVME_RESERVATION_RELEASED);
1553 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1554 					  SPDK_NVME_RESERVATION_PREEMPTED);
1555 	poll_threads();
1556 	event.raw = rsp.nvme_cpl.cdw0;
1557 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1558 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1559 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1560 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1561 
1562 	/* Test Case: Get Log Page to clear the log pages */
1563 	iov.iov_base = &logs[0];
1564 	iov.iov_len = sizeof(logs);
1565 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1566 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1567 
1568 	cleanup_pending_async_events(&ctrlr);
1569 }
1570 
1571 static void
1572 test_get_dif_ctx(void)
1573 {
1574 	struct spdk_nvmf_subsystem subsystem = {};
1575 	struct spdk_nvmf_request req = {};
1576 	struct spdk_nvmf_qpair qpair = {};
1577 	struct spdk_nvmf_ctrlr ctrlr = {};
1578 	struct spdk_nvmf_ns ns = {};
1579 	struct spdk_nvmf_ns *_ns = NULL;
1580 	struct spdk_bdev bdev = {};
1581 	union nvmf_h2c_msg cmd = {};
1582 	struct spdk_dif_ctx dif_ctx = {};
1583 	bool ret;
1584 
1585 	ctrlr.subsys = &subsystem;
1586 
1587 	qpair.ctrlr = &ctrlr;
1588 
1589 	req.qpair = &qpair;
1590 	req.cmd = &cmd;
1591 
1592 	ns.bdev = &bdev;
1593 
1594 	ctrlr.dif_insert_or_strip = false;
1595 
1596 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1597 	CU_ASSERT(ret == false);
1598 
1599 	ctrlr.dif_insert_or_strip = true;
1600 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1601 
1602 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1603 	CU_ASSERT(ret == false);
1604 
1605 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1606 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1607 
1608 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1609 	CU_ASSERT(ret == false);
1610 
1611 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1612 
1613 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1614 	CU_ASSERT(ret == false);
1615 
1616 	qpair.qid = 1;
1617 
1618 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1619 	CU_ASSERT(ret == false);
1620 
1621 	cmd.nvme_cmd.nsid = 1;
1622 
1623 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1624 	CU_ASSERT(ret == false);
1625 
1626 	subsystem.max_nsid = 1;
1627 	subsystem.ns = &_ns;
1628 	subsystem.ns[0] = &ns;
1629 
1630 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1631 	CU_ASSERT(ret == false);
1632 
1633 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1634 
1635 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1636 	CU_ASSERT(ret == true);
1637 }
1638 
1639 static void
1640 test_identify_ctrlr(void)
1641 {
1642 	struct spdk_nvmf_tgt tgt = {};
1643 	struct spdk_nvmf_subsystem subsystem = {
1644 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1645 		.tgt = &tgt,
1646 	};
1647 	struct spdk_nvmf_transport_ops tops = {};
1648 	struct spdk_nvmf_transport transport = {
1649 		.ops = &tops,
1650 		.opts = {
1651 			.in_capsule_data_size = 4096,
1652 		},
1653 	};
1654 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1655 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1656 	struct spdk_nvme_ctrlr_data cdata = {};
1657 	uint32_t expected_ioccsz;
1658 
1659 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1660 
1661 	/* Check ioccsz, TCP transport */
1662 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1663 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1664 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1665 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1666 
1667 	/* Check ioccsz, RDMA transport */
1668 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1669 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1670 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1671 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1672 
1673 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1674 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1675 	ctrlr.dif_insert_or_strip = true;
1676 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1677 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1678 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1679 }
1680 
1681 static void
1682 test_identify_ctrlr_iocs_specific(void)
1683 {
1684 	struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 };
1685 	struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 };
1686 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop };
1687 	struct spdk_nvme_cmd cmd = {};
1688 	struct spdk_nvme_cpl rsp = {};
1689 	struct spdk_nvme_zns_ctrlr_data ctrlr_data = {};
1690 
1691 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1692 
1693 	/* ZNS max_zone_append_size_kib no limit */
1694 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1695 	memset(&rsp, 0, sizeof(rsp));
1696 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1697 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1698 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1699 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1700 	CU_ASSERT(ctrlr_data.zasl == 0);
1701 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1702 
1703 	/* ZNS max_zone_append_size_kib = 4096 */
1704 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1705 	memset(&rsp, 0, sizeof(rsp));
1706 	subsystem.max_zone_append_size_kib = 4096;
1707 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1708 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1709 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1710 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1711 	CU_ASSERT(ctrlr_data.zasl == 0);
1712 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1713 
1714 	/* ZNS max_zone_append_size_kib = 60000 */
1715 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1716 	memset(&rsp, 0, sizeof(rsp));
1717 	subsystem.max_zone_append_size_kib = 60000;
1718 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1719 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1720 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1721 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1722 	CU_ASSERT(ctrlr_data.zasl == 3);
1723 	ctrlr_data.zasl = 0;
1724 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1725 
1726 	/* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */
1727 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1728 	memset(&rsp, 0, sizeof(rsp));
1729 	ctrlr.vcprop.cap.bits.mpsmin = 2;
1730 	subsystem.max_zone_append_size_kib = 60000;
1731 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1732 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1733 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1734 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1735 	CU_ASSERT(ctrlr_data.zasl == 1);
1736 	ctrlr_data.zasl = 0;
1737 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1738 	ctrlr.vcprop.cap.bits.mpsmin = 0;
1739 
1740 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1741 
1742 	/* NVM */
1743 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1744 	memset(&rsp, 0, sizeof(rsp));
1745 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1746 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1747 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1748 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1749 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1750 }
1751 
1752 static int
1753 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1754 {
1755 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1756 
1757 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1758 };
1759 
1760 static void
1761 test_custom_admin_cmd(void)
1762 {
1763 	struct spdk_nvmf_subsystem subsystem;
1764 	struct spdk_nvmf_qpair qpair;
1765 	struct spdk_nvmf_ctrlr ctrlr;
1766 	struct spdk_nvmf_request req;
1767 	struct spdk_nvmf_ns *ns_ptrs[1];
1768 	struct spdk_nvmf_ns ns;
1769 	union nvmf_h2c_msg cmd;
1770 	union nvmf_c2h_msg rsp;
1771 	struct spdk_bdev bdev;
1772 	uint8_t buf[4096];
1773 	int rc;
1774 
1775 	memset(&subsystem, 0, sizeof(subsystem));
1776 	ns_ptrs[0] = &ns;
1777 	subsystem.ns = ns_ptrs;
1778 	subsystem.max_nsid = 1;
1779 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1780 
1781 	memset(&ns, 0, sizeof(ns));
1782 	ns.opts.nsid = 1;
1783 	ns.bdev = &bdev;
1784 
1785 	memset(&qpair, 0, sizeof(qpair));
1786 	qpair.ctrlr = &ctrlr;
1787 
1788 	memset(&ctrlr, 0, sizeof(ctrlr));
1789 	ctrlr.subsys = &subsystem;
1790 	ctrlr.vcprop.cc.bits.en = 1;
1791 	ctrlr.thread = spdk_get_thread();
1792 
1793 	memset(&req, 0, sizeof(req));
1794 	req.qpair = &qpair;
1795 	req.cmd = &cmd;
1796 	req.rsp = &rsp;
1797 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1798 	req.data = buf;
1799 	req.length = sizeof(buf);
1800 	spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length);
1801 
1802 	memset(&cmd, 0, sizeof(cmd));
1803 	cmd.nvme_cmd.opc = 0xc1;
1804 	cmd.nvme_cmd.nsid = 0;
1805 	memset(&rsp, 0, sizeof(rsp));
1806 
1807 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1808 
1809 	/* Ensure that our hdlr is being called */
1810 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1811 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1812 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1813 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1814 }
1815 
1816 static void
1817 test_fused_compare_and_write(void)
1818 {
1819 	struct spdk_nvmf_request req = {};
1820 	struct spdk_nvmf_qpair qpair = {};
1821 	struct spdk_nvme_cmd cmd = {};
1822 	union nvmf_c2h_msg rsp = {};
1823 	struct spdk_nvmf_ctrlr ctrlr = {};
1824 	struct spdk_nvmf_subsystem subsystem = {};
1825 	struct spdk_nvmf_ns ns = {};
1826 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1827 	enum spdk_nvme_ana_state ana_state[1];
1828 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1829 	struct spdk_bdev bdev = {};
1830 
1831 	struct spdk_nvmf_poll_group group = {};
1832 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1833 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1834 	struct spdk_io_channel io_ch = {};
1835 
1836 	ns.bdev = &bdev;
1837 	ns.anagrpid = 1;
1838 
1839 	subsystem.id = 0;
1840 	subsystem.max_nsid = 1;
1841 	subsys_ns[0] = &ns;
1842 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1843 
1844 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1845 
1846 	/* Enable controller */
1847 	ctrlr.vcprop.cc.bits.en = 1;
1848 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1849 	ctrlr.listener = &listener;
1850 
1851 	group.num_sgroups = 1;
1852 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1853 	sgroups.num_ns = 1;
1854 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1855 	ns_info.channel = &io_ch;
1856 	sgroups.ns_info = &ns_info;
1857 	TAILQ_INIT(&sgroups.queued);
1858 	group.sgroups = &sgroups;
1859 	TAILQ_INIT(&qpair.outstanding);
1860 
1861 	qpair.ctrlr = &ctrlr;
1862 	qpair.group = &group;
1863 	qpair.qid = 1;
1864 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1865 
1866 	cmd.nsid = 1;
1867 
1868 	req.qpair = &qpair;
1869 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1870 	req.rsp = &rsp;
1871 
1872 	/* SUCCESS/SUCCESS */
1873 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1874 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1875 
1876 	spdk_nvmf_request_exec(&req);
1877 	CU_ASSERT(qpair.first_fused_req != NULL);
1878 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1879 
1880 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1881 	cmd.opc = SPDK_NVME_OPC_WRITE;
1882 
1883 	spdk_nvmf_request_exec(&req);
1884 	CU_ASSERT(qpair.first_fused_req == NULL);
1885 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1886 
1887 	/* Wrong sequence */
1888 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1889 	cmd.opc = SPDK_NVME_OPC_WRITE;
1890 
1891 	spdk_nvmf_request_exec(&req);
1892 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1893 	CU_ASSERT(qpair.first_fused_req == NULL);
1894 
1895 	/* Write as FUSE_FIRST (Wrong op code) */
1896 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1897 	cmd.opc = SPDK_NVME_OPC_WRITE;
1898 
1899 	spdk_nvmf_request_exec(&req);
1900 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1901 	CU_ASSERT(qpair.first_fused_req == NULL);
1902 
1903 	/* Compare as FUSE_SECOND (Wrong op code) */
1904 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1905 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1906 
1907 	spdk_nvmf_request_exec(&req);
1908 	CU_ASSERT(qpair.first_fused_req != NULL);
1909 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1910 
1911 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1912 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1913 
1914 	spdk_nvmf_request_exec(&req);
1915 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1916 	CU_ASSERT(qpair.first_fused_req == NULL);
1917 }
1918 
1919 static void
1920 test_multi_async_event_reqs(void)
1921 {
1922 	struct spdk_nvmf_subsystem subsystem = {};
1923 	struct spdk_nvmf_qpair qpair = {};
1924 	struct spdk_nvmf_ctrlr ctrlr = {};
1925 	struct spdk_nvmf_request req[5] = {};
1926 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1927 	struct spdk_nvmf_ns ns = {};
1928 	union nvmf_h2c_msg cmd[5] = {};
1929 	union nvmf_c2h_msg rsp[5] = {};
1930 
1931 	struct spdk_nvmf_poll_group group = {};
1932 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1933 
1934 	int i;
1935 
1936 	ns_ptrs[0] = &ns;
1937 	subsystem.ns = ns_ptrs;
1938 	subsystem.max_nsid = 1;
1939 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1940 
1941 	ns.opts.nsid = 1;
1942 	group.sgroups = &sgroups;
1943 
1944 	qpair.ctrlr = &ctrlr;
1945 	qpair.group = &group;
1946 	TAILQ_INIT(&qpair.outstanding);
1947 
1948 	ctrlr.subsys = &subsystem;
1949 	ctrlr.vcprop.cc.bits.en = 1;
1950 	ctrlr.thread = spdk_get_thread();
1951 
1952 	for (i = 0; i < 5; i++) {
1953 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1954 		cmd[i].nvme_cmd.nsid = 1;
1955 		cmd[i].nvme_cmd.cid = i;
1956 
1957 		req[i].qpair = &qpair;
1958 		req[i].cmd = &cmd[i];
1959 		req[i].rsp = &rsp[i];
1960 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1961 	}
1962 
1963 	/* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */
1964 	sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS;
1965 	for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
1966 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1967 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
1968 	}
1969 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1970 
1971 	/* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */
1972 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1973 	CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS);
1974 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
1975 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1976 
1977 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
1978 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
1979 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1980 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1981 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
1982 
1983 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
1984 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1985 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1986 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
1987 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
1988 
1989 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
1990 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
1991 }
1992 
1993 static void
1994 test_get_ana_log_page_one_ns_per_anagrp(void)
1995 {
1996 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
1997 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
1998 	uint32_t ana_group[3];
1999 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
2000 	struct spdk_nvmf_ctrlr ctrlr = {};
2001 	enum spdk_nvme_ana_state ana_state[3];
2002 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2003 	struct spdk_nvmf_ns ns[3];
2004 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
2005 	uint64_t offset;
2006 	uint32_t length;
2007 	int i;
2008 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2009 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2010 	struct iovec iov, iovs[2];
2011 	struct spdk_nvme_ana_page *ana_hdr;
2012 	char _ana_desc[UT_ANA_DESC_SIZE];
2013 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2014 
2015 	subsystem.ns = ns_arr;
2016 	subsystem.max_nsid = 3;
2017 	for (i = 0; i < 3; i++) {
2018 		subsystem.ana_group[i] = 1;
2019 	}
2020 	ctrlr.subsys = &subsystem;
2021 	ctrlr.listener = &listener;
2022 
2023 	for (i = 0; i < 3; i++) {
2024 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2025 	}
2026 
2027 	for (i = 0; i < 3; i++) {
2028 		ns_arr[i]->nsid = i + 1;
2029 		ns_arr[i]->anagrpid = i + 1;
2030 	}
2031 
2032 	/* create expected page */
2033 	ana_hdr = (void *)&expected_page[0];
2034 	ana_hdr->num_ana_group_desc = 3;
2035 	ana_hdr->change_count = 0;
2036 
2037 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2038 	ana_desc = (void *)_ana_desc;
2039 	offset = sizeof(struct spdk_nvme_ana_page);
2040 
2041 	for (i = 0; i < 3; i++) {
2042 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
2043 		ana_desc->ana_group_id = ns_arr[i]->nsid;
2044 		ana_desc->num_of_nsid = 1;
2045 		ana_desc->change_count = 0;
2046 		ana_desc->ana_state = ctrlr.listener->ana_state[i];
2047 		ana_desc->nsid[0] = ns_arr[i]->nsid;
2048 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
2049 		offset += UT_ANA_DESC_SIZE;
2050 	}
2051 
2052 	/* read entire actual log page */
2053 	offset = 0;
2054 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2055 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2056 		iov.iov_base = &actual_page[offset];
2057 		iov.iov_len = length;
2058 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2059 		offset += length;
2060 	}
2061 
2062 	/* compare expected page and actual page */
2063 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2064 
2065 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2066 	offset = 0;
2067 	iovs[0].iov_base = &actual_page[offset];
2068 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2069 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2070 	iovs[1].iov_base = &actual_page[offset];
2071 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
2072 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2073 
2074 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2075 
2076 #undef UT_ANA_DESC_SIZE
2077 #undef UT_ANA_LOG_PAGE_SIZE
2078 }
2079 
2080 static void
2081 test_get_ana_log_page_multi_ns_per_anagrp(void)
2082 {
2083 #define UT_ANA_LOG_PAGE_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
2084 				 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2085 				 sizeof(uint32_t) * 5)
2086 	struct spdk_nvmf_ns ns[5];
2087 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
2088 	uint32_t ana_group[5] = {0};
2089 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
2090 	enum spdk_nvme_ana_state ana_state[5];
2091 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
2092 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
2093 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2094 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2095 	struct iovec iov, iovs[2];
2096 	struct spdk_nvme_ana_page *ana_hdr;
2097 	char _ana_desc[UT_ANA_LOG_PAGE_SIZE];
2098 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2099 	uint64_t offset;
2100 	uint32_t length;
2101 	int i;
2102 
2103 	subsystem.max_nsid = 5;
2104 	subsystem.ana_group[1] = 3;
2105 	subsystem.ana_group[2] = 2;
2106 	for (i = 0; i < 5; i++) {
2107 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2108 	}
2109 
2110 	for (i = 0; i < 5; i++) {
2111 		ns_arr[i]->nsid = i + 1;
2112 	}
2113 	ns_arr[0]->anagrpid = 2;
2114 	ns_arr[1]->anagrpid = 3;
2115 	ns_arr[2]->anagrpid = 2;
2116 	ns_arr[3]->anagrpid = 3;
2117 	ns_arr[4]->anagrpid = 2;
2118 
2119 	/* create expected page */
2120 	ana_hdr = (void *)&expected_page[0];
2121 	ana_hdr->num_ana_group_desc = 2;
2122 	ana_hdr->change_count = 0;
2123 
2124 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2125 	ana_desc = (void *)_ana_desc;
2126 	offset = sizeof(struct spdk_nvme_ana_page);
2127 
2128 	memset(_ana_desc, 0, sizeof(_ana_desc));
2129 	ana_desc->ana_group_id = 2;
2130 	ana_desc->num_of_nsid = 3;
2131 	ana_desc->change_count = 0;
2132 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2133 	ana_desc->nsid[0] = 1;
2134 	ana_desc->nsid[1] = 3;
2135 	ana_desc->nsid[2] = 5;
2136 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2137 	       sizeof(uint32_t) * 3);
2138 	offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3;
2139 
2140 	memset(_ana_desc, 0, sizeof(_ana_desc));
2141 	ana_desc->ana_group_id = 3;
2142 	ana_desc->num_of_nsid = 2;
2143 	ana_desc->change_count = 0;
2144 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2145 	ana_desc->nsid[0] = 2;
2146 	ana_desc->nsid[1] = 4;
2147 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2148 	       sizeof(uint32_t) * 2);
2149 
2150 	/* read entire actual log page, and compare expected page and actual page. */
2151 	offset = 0;
2152 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2153 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2154 		iov.iov_base = &actual_page[offset];
2155 		iov.iov_len = length;
2156 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2157 		offset += length;
2158 	}
2159 
2160 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2161 
2162 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2163 	offset = 0;
2164 	iovs[0].iov_base = &actual_page[offset];
2165 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2166 	offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2167 	iovs[1].iov_base = &actual_page[offset];
2168 	iovs[1].iov_len = sizeof(uint32_t) * 5;
2169 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2170 
2171 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2172 
2173 #undef UT_ANA_LOG_PAGE_SIZE
2174 }
2175 static void
2176 test_multi_async_events(void)
2177 {
2178 	struct spdk_nvmf_subsystem subsystem = {};
2179 	struct spdk_nvmf_qpair qpair = {};
2180 	struct spdk_nvmf_ctrlr ctrlr = {};
2181 	struct spdk_nvmf_request req[4] = {};
2182 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2183 	struct spdk_nvmf_ns ns = {};
2184 	union nvmf_h2c_msg cmd[4] = {};
2185 	union nvmf_c2h_msg rsp[4] = {};
2186 	union spdk_nvme_async_event_completion event = {};
2187 	struct spdk_nvmf_poll_group group = {};
2188 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2189 	int i;
2190 
2191 	ns_ptrs[0] = &ns;
2192 	subsystem.ns = ns_ptrs;
2193 	subsystem.max_nsid = 1;
2194 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2195 
2196 	ns.opts.nsid = 1;
2197 	group.sgroups = &sgroups;
2198 
2199 	qpair.ctrlr = &ctrlr;
2200 	qpair.group = &group;
2201 	TAILQ_INIT(&qpair.outstanding);
2202 
2203 	ctrlr.subsys = &subsystem;
2204 	ctrlr.vcprop.cc.bits.en = 1;
2205 	ctrlr.thread = spdk_get_thread();
2206 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2207 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2208 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2209 	init_pending_async_events(&ctrlr);
2210 
2211 	/* Target queue pending events when there is no outstanding AER request */
2212 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2213 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2214 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2215 
2216 	for (i = 0; i < 4; i++) {
2217 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2218 		cmd[i].nvme_cmd.nsid = 1;
2219 		cmd[i].nvme_cmd.cid = i;
2220 
2221 		req[i].qpair = &qpair;
2222 		req[i].cmd = &cmd[i];
2223 		req[i].rsp = &rsp[i];
2224 
2225 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2226 
2227 		sgroups.mgmt_io_outstanding = 1;
2228 		if (i < 3) {
2229 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2230 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2231 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2232 		} else {
2233 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2234 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2235 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2236 		}
2237 	}
2238 
2239 	event.raw = rsp[0].nvme_cpl.cdw0;
2240 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2241 	event.raw = rsp[1].nvme_cpl.cdw0;
2242 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2243 	event.raw = rsp[2].nvme_cpl.cdw0;
2244 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2245 
2246 	cleanup_pending_async_events(&ctrlr);
2247 }
2248 
2249 static void
2250 test_rae(void)
2251 {
2252 	struct spdk_nvmf_subsystem subsystem = {};
2253 	struct spdk_nvmf_qpair qpair = {};
2254 	struct spdk_nvmf_ctrlr ctrlr = {};
2255 	struct spdk_nvmf_request req[3] = {};
2256 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2257 	struct spdk_nvmf_ns ns = {};
2258 	union nvmf_h2c_msg cmd[3] = {};
2259 	union nvmf_c2h_msg rsp[3] = {};
2260 	union spdk_nvme_async_event_completion event = {};
2261 	struct spdk_nvmf_poll_group group = {};
2262 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2263 	int i;
2264 	char data[4096];
2265 
2266 	ns_ptrs[0] = &ns;
2267 	subsystem.ns = ns_ptrs;
2268 	subsystem.max_nsid = 1;
2269 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2270 
2271 	ns.opts.nsid = 1;
2272 	group.sgroups = &sgroups;
2273 
2274 	qpair.ctrlr = &ctrlr;
2275 	qpair.group = &group;
2276 	TAILQ_INIT(&qpair.outstanding);
2277 
2278 	ctrlr.subsys = &subsystem;
2279 	ctrlr.vcprop.cc.bits.en = 1;
2280 	ctrlr.thread = spdk_get_thread();
2281 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2282 	init_pending_async_events(&ctrlr);
2283 
2284 	/* Target queue pending events when there is no outstanding AER request */
2285 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2286 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2287 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2288 	/* only one event will be queued before RAE is clear */
2289 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2290 
2291 	req[0].qpair = &qpair;
2292 	req[0].cmd = &cmd[0];
2293 	req[0].rsp = &rsp[0];
2294 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2295 	cmd[0].nvme_cmd.nsid = 1;
2296 	cmd[0].nvme_cmd.cid = 0;
2297 
2298 	for (i = 1; i < 3; i++) {
2299 		req[i].qpair = &qpair;
2300 		req[i].cmd = &cmd[i];
2301 		req[i].rsp = &rsp[i];
2302 		req[i].data = &data;
2303 		req[i].length = sizeof(data);
2304 		spdk_iov_one(req[i].iov, &req[i].iovcnt, &data, req[i].length);
2305 
2306 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2307 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2308 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2309 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2310 			spdk_nvme_bytes_to_numd(req[i].length);
2311 		cmd[i].nvme_cmd.cid = i;
2312 	}
2313 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2314 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2315 
2316 	/* consume the pending event */
2317 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2318 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2319 	event.raw = rsp[0].nvme_cpl.cdw0;
2320 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2321 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2322 
2323 	/* get log with RAE set */
2324 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2325 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2326 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2327 
2328 	/* will not generate new event until RAE is clear */
2329 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2330 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2331 
2332 	/* get log with RAE clear */
2333 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2334 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2335 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2336 
2337 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2338 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2339 
2340 	cleanup_pending_async_events(&ctrlr);
2341 }
2342 
2343 static void
2344 test_nvmf_ctrlr_create_destruct(void)
2345 {
2346 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2347 	struct spdk_nvmf_poll_group group = {};
2348 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2349 	struct spdk_nvmf_transport transport = {};
2350 	struct spdk_nvmf_transport_ops tops = {};
2351 	struct spdk_nvmf_subsystem subsystem = {};
2352 	struct spdk_nvmf_request req = {};
2353 	struct spdk_nvmf_qpair qpair = {};
2354 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2355 	struct spdk_nvmf_tgt tgt = {};
2356 	union nvmf_h2c_msg cmd = {};
2357 	union nvmf_c2h_msg rsp = {};
2358 	const uint8_t hostid[16] = {
2359 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2360 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2361 	};
2362 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2363 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2364 
2365 	group.thread = spdk_get_thread();
2366 	transport.ops = &tops;
2367 	transport.opts.max_aq_depth = 32;
2368 	transport.opts.max_queue_depth = 64;
2369 	transport.opts.max_qpairs_per_ctrlr = 3;
2370 	transport.opts.dif_insert_or_strip = true;
2371 	transport.tgt = &tgt;
2372 	qpair.transport = &transport;
2373 	qpair.group = &group;
2374 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2375 	TAILQ_INIT(&qpair.outstanding);
2376 
2377 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2378 	connect_data.cntlid = 0xFFFF;
2379 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2380 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2381 
2382 	subsystem.thread = spdk_get_thread();
2383 	subsystem.id = 1;
2384 	TAILQ_INIT(&subsystem.ctrlrs);
2385 	subsystem.tgt = &tgt;
2386 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2387 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2388 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2389 
2390 	group.sgroups = sgroups;
2391 
2392 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2393 	cmd.connect_cmd.cid = 1;
2394 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2395 	cmd.connect_cmd.recfmt = 0;
2396 	cmd.connect_cmd.qid = 0;
2397 	cmd.connect_cmd.sqsize = 31;
2398 	cmd.connect_cmd.cattr = 0;
2399 	cmd.connect_cmd.kato = 120000;
2400 
2401 	req.qpair = &qpair;
2402 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2403 	req.data = &connect_data;
2404 	req.length = sizeof(connect_data);
2405 	spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length);
2406 	req.cmd = &cmd;
2407 	req.rsp = &rsp;
2408 
2409 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2410 	sgroups[subsystem.id].mgmt_io_outstanding++;
2411 
2412 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base);
2413 	poll_threads();
2414 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2415 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2416 	CU_ASSERT(ctrlr->subsys == &subsystem);
2417 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2418 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2419 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2420 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2421 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2422 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2423 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2424 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2425 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2426 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2427 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2428 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2429 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2430 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2431 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2432 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2433 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2434 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2435 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2436 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2437 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2438 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2439 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2440 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2441 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2442 
2443 	ctrlr->in_destruct = true;
2444 	nvmf_ctrlr_destruct(ctrlr);
2445 	poll_threads();
2446 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2447 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2448 }
2449 
2450 static void
2451 test_nvmf_ctrlr_use_zcopy(void)
2452 {
2453 	struct spdk_nvmf_subsystem subsystem = {};
2454 	struct spdk_nvmf_transport transport = {};
2455 	struct spdk_nvmf_request req = {};
2456 	struct spdk_nvmf_qpair qpair = {};
2457 	struct spdk_nvmf_ctrlr ctrlr = {};
2458 	union nvmf_h2c_msg cmd = {};
2459 	struct spdk_nvmf_ns ns = {};
2460 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2461 	struct spdk_bdev bdev = {};
2462 	struct spdk_nvmf_poll_group group = {};
2463 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2464 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2465 	struct spdk_io_channel io_ch = {};
2466 	int opc;
2467 
2468 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2469 	ns.bdev = &bdev;
2470 
2471 	subsystem.id = 0;
2472 	subsystem.max_nsid = 1;
2473 	subsys_ns[0] = &ns;
2474 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2475 
2476 	ctrlr.subsys = &subsystem;
2477 
2478 	transport.opts.zcopy = true;
2479 
2480 	qpair.ctrlr = &ctrlr;
2481 	qpair.group = &group;
2482 	qpair.qid = 1;
2483 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2484 	qpair.transport = &transport;
2485 
2486 	group.thread = spdk_get_thread();
2487 	group.num_sgroups = 1;
2488 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2489 	sgroups.num_ns = 1;
2490 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2491 	ns_info.channel = &io_ch;
2492 	sgroups.ns_info = &ns_info;
2493 	TAILQ_INIT(&sgroups.queued);
2494 	group.sgroups = &sgroups;
2495 	TAILQ_INIT(&qpair.outstanding);
2496 
2497 	req.qpair = &qpair;
2498 	req.cmd = &cmd;
2499 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2500 
2501 	/* Admin queue */
2502 	qpair.qid = 0;
2503 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2504 	qpair.qid = 1;
2505 
2506 	/* Invalid Opcodes */
2507 	for (opc = 0; opc <= 255; opc++) {
2508 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2509 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2510 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2511 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2512 		}
2513 	}
2514 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2515 
2516 	/* Fused WRITE */
2517 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2518 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2519 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2520 
2521 	/* Non bdev */
2522 	cmd.nvme_cmd.nsid = 4;
2523 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2524 	cmd.nvme_cmd.nsid = 1;
2525 
2526 	/* ZCOPY Not supported */
2527 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2528 	ns.zcopy = true;
2529 
2530 	/* ZCOPY disabled on transport level */
2531 	transport.opts.zcopy = false;
2532 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2533 	transport.opts.zcopy = true;
2534 
2535 	/* Success */
2536 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2537 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2538 }
2539 
2540 static void
2541 qpair_state_change_done(void *cb_arg, int status)
2542 {
2543 }
2544 
2545 static void
2546 test_spdk_nvmf_request_zcopy_start(void)
2547 {
2548 	struct spdk_nvmf_request req = {};
2549 	struct spdk_nvmf_qpair qpair = {};
2550 	struct spdk_nvmf_transport transport = {};
2551 	struct spdk_nvme_cmd cmd = {};
2552 	union nvmf_c2h_msg rsp = {};
2553 	struct spdk_nvmf_ctrlr ctrlr = {};
2554 	struct spdk_nvmf_subsystem subsystem = {};
2555 	struct spdk_nvmf_ns ns = {};
2556 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2557 	enum spdk_nvme_ana_state ana_state[1];
2558 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2559 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2560 
2561 	struct spdk_nvmf_poll_group group = {};
2562 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2563 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2564 	struct spdk_io_channel io_ch = {};
2565 
2566 	ns.bdev = &bdev;
2567 	ns.zcopy = true;
2568 	ns.anagrpid = 1;
2569 
2570 	subsystem.id = 0;
2571 	subsystem.max_nsid = 1;
2572 	subsys_ns[0] = &ns;
2573 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2574 
2575 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2576 
2577 	/* Enable controller */
2578 	ctrlr.vcprop.cc.bits.en = 1;
2579 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2580 	ctrlr.listener = &listener;
2581 
2582 	transport.opts.zcopy = true;
2583 
2584 	group.thread = spdk_get_thread();
2585 	group.num_sgroups = 1;
2586 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2587 	sgroups.num_ns = 1;
2588 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2589 	ns_info.channel = &io_ch;
2590 	sgroups.ns_info = &ns_info;
2591 	TAILQ_INIT(&sgroups.queued);
2592 	group.sgroups = &sgroups;
2593 	TAILQ_INIT(&qpair.outstanding);
2594 
2595 	qpair.ctrlr = &ctrlr;
2596 	qpair.group = &group;
2597 	qpair.transport = &transport;
2598 	qpair.qid = 1;
2599 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2600 
2601 	cmd.nsid = 1;
2602 
2603 	req.qpair = &qpair;
2604 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2605 	req.rsp = &rsp;
2606 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2607 	cmd.opc = SPDK_NVME_OPC_READ;
2608 
2609 	/* Fail because no controller */
2610 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2611 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2612 	qpair.ctrlr = NULL;
2613 	spdk_nvmf_request_zcopy_start(&req);
2614 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2615 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2616 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2617 	qpair.ctrlr = &ctrlr;
2618 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2619 
2620 	/* Fail because bad NSID */
2621 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2622 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2623 	cmd.nsid = 0;
2624 	spdk_nvmf_request_zcopy_start(&req);
2625 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2626 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2627 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2628 	cmd.nsid = 1;
2629 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2630 
2631 	/* Fail because bad Channel */
2632 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2633 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2634 	ns_info.channel = NULL;
2635 	spdk_nvmf_request_zcopy_start(&req);
2636 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2637 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2638 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2639 	ns_info.channel = &io_ch;
2640 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2641 
2642 	/* Queue the requet because NSID is not active */
2643 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2644 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2645 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2646 	spdk_nvmf_request_zcopy_start(&req);
2647 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2648 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2649 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2650 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2651 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2652 
2653 	/* Fail because QPair is not active */
2654 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2655 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2656 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2657 	qpair.state_cb = qpair_state_change_done;
2658 	spdk_nvmf_request_zcopy_start(&req);
2659 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2660 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2661 	qpair.state_cb = NULL;
2662 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2663 
2664 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2665 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2666 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2667 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2668 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2669 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2670 	spdk_nvmf_request_zcopy_start(&req);
2671 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2672 	cmd.cdw10 = 0;
2673 	cmd.cdw12 = 0;
2674 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2675 
2676 	/* Success */
2677 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2678 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2679 	spdk_nvmf_request_zcopy_start(&req);
2680 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2681 }
2682 
2683 static void
2684 test_zcopy_read(void)
2685 {
2686 	struct spdk_nvmf_request req = {};
2687 	struct spdk_nvmf_qpair qpair = {};
2688 	struct spdk_nvmf_transport transport = {};
2689 	struct spdk_nvme_cmd cmd = {};
2690 	union nvmf_c2h_msg rsp = {};
2691 	struct spdk_nvmf_ctrlr ctrlr = {};
2692 	struct spdk_nvmf_subsystem subsystem = {};
2693 	struct spdk_nvmf_ns ns = {};
2694 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2695 	enum spdk_nvme_ana_state ana_state[1];
2696 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2697 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2698 
2699 	struct spdk_nvmf_poll_group group = {};
2700 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2701 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2702 	struct spdk_io_channel io_ch = {};
2703 
2704 	ns.bdev = &bdev;
2705 	ns.zcopy = true;
2706 	ns.anagrpid = 1;
2707 
2708 	subsystem.id = 0;
2709 	subsystem.max_nsid = 1;
2710 	subsys_ns[0] = &ns;
2711 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2712 
2713 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2714 
2715 	/* Enable controller */
2716 	ctrlr.vcprop.cc.bits.en = 1;
2717 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2718 	ctrlr.listener = &listener;
2719 
2720 	transport.opts.zcopy = true;
2721 
2722 	group.thread = spdk_get_thread();
2723 	group.num_sgroups = 1;
2724 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2725 	sgroups.num_ns = 1;
2726 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2727 	ns_info.channel = &io_ch;
2728 	sgroups.ns_info = &ns_info;
2729 	TAILQ_INIT(&sgroups.queued);
2730 	group.sgroups = &sgroups;
2731 	TAILQ_INIT(&qpair.outstanding);
2732 
2733 	qpair.ctrlr = &ctrlr;
2734 	qpair.group = &group;
2735 	qpair.transport = &transport;
2736 	qpair.qid = 1;
2737 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2738 
2739 	cmd.nsid = 1;
2740 
2741 	req.qpair = &qpair;
2742 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2743 	req.rsp = &rsp;
2744 	cmd.opc = SPDK_NVME_OPC_READ;
2745 
2746 	/* Prepare for zcopy */
2747 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2748 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2749 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2750 	CU_ASSERT(ns_info.io_outstanding == 0);
2751 
2752 	/* Perform the zcopy start */
2753 	spdk_nvmf_request_zcopy_start(&req);
2754 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2755 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2756 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2757 	CU_ASSERT(ns_info.io_outstanding == 1);
2758 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2759 
2760 	/* Perform the zcopy end */
2761 	spdk_nvmf_request_zcopy_end(&req, false);
2762 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2763 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2764 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2765 	CU_ASSERT(ns_info.io_outstanding == 0);
2766 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2767 }
2768 
2769 static void
2770 test_zcopy_write(void)
2771 {
2772 	struct spdk_nvmf_request req = {};
2773 	struct spdk_nvmf_qpair qpair = {};
2774 	struct spdk_nvmf_transport transport = {};
2775 	struct spdk_nvme_cmd cmd = {};
2776 	union nvmf_c2h_msg rsp = {};
2777 	struct spdk_nvmf_ctrlr ctrlr = {};
2778 	struct spdk_nvmf_subsystem subsystem = {};
2779 	struct spdk_nvmf_ns ns = {};
2780 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2781 	enum spdk_nvme_ana_state ana_state[1];
2782 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2783 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2784 
2785 	struct spdk_nvmf_poll_group group = {};
2786 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2787 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2788 	struct spdk_io_channel io_ch = {};
2789 
2790 	ns.bdev = &bdev;
2791 	ns.zcopy = true;
2792 	ns.anagrpid = 1;
2793 
2794 	subsystem.id = 0;
2795 	subsystem.max_nsid = 1;
2796 	subsys_ns[0] = &ns;
2797 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2798 
2799 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2800 
2801 	/* Enable controller */
2802 	ctrlr.vcprop.cc.bits.en = 1;
2803 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2804 	ctrlr.listener = &listener;
2805 
2806 	transport.opts.zcopy = true;
2807 
2808 	group.thread = spdk_get_thread();
2809 	group.num_sgroups = 1;
2810 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2811 	sgroups.num_ns = 1;
2812 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2813 	ns_info.channel = &io_ch;
2814 	sgroups.ns_info = &ns_info;
2815 	TAILQ_INIT(&sgroups.queued);
2816 	group.sgroups = &sgroups;
2817 	TAILQ_INIT(&qpair.outstanding);
2818 
2819 	qpair.ctrlr = &ctrlr;
2820 	qpair.group = &group;
2821 	qpair.transport = &transport;
2822 	qpair.qid = 1;
2823 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2824 
2825 	cmd.nsid = 1;
2826 
2827 	req.qpair = &qpair;
2828 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2829 	req.rsp = &rsp;
2830 	cmd.opc = SPDK_NVME_OPC_WRITE;
2831 
2832 	/* Prepare for zcopy */
2833 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2834 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2835 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2836 	CU_ASSERT(ns_info.io_outstanding == 0);
2837 
2838 	/* Perform the zcopy start */
2839 	spdk_nvmf_request_zcopy_start(&req);
2840 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2841 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2842 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2843 	CU_ASSERT(ns_info.io_outstanding == 1);
2844 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2845 
2846 	/* Perform the zcopy end */
2847 	spdk_nvmf_request_zcopy_end(&req, true);
2848 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2849 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2850 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2851 	CU_ASSERT(ns_info.io_outstanding == 0);
2852 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2853 }
2854 
2855 static void
2856 test_nvmf_property_set(void)
2857 {
2858 	int rc;
2859 	struct spdk_nvmf_request req = {};
2860 	struct spdk_nvmf_qpair qpair = {};
2861 	struct spdk_nvmf_ctrlr ctrlr = {};
2862 	union nvmf_h2c_msg cmd = {};
2863 	union nvmf_c2h_msg rsp = {};
2864 
2865 	req.qpair = &qpair;
2866 	qpair.ctrlr = &ctrlr;
2867 	req.cmd = &cmd;
2868 	req.rsp = &rsp;
2869 
2870 	/* Invalid parameters */
2871 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2872 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2873 
2874 	rc = nvmf_property_set(&req);
2875 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2876 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2877 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2878 
2879 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2880 
2881 	rc = nvmf_property_get(&req);
2882 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2883 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2884 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2885 
2886 	/* Set cc with same property size */
2887 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2888 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2889 
2890 	rc = nvmf_property_set(&req);
2891 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2892 
2893 	/* Emulate cc data */
2894 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2895 
2896 	rc = nvmf_property_get(&req);
2897 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2898 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2899 
2900 	/* Set asq with different property size */
2901 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2902 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2903 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2904 
2905 	rc = nvmf_property_set(&req);
2906 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2907 
2908 	/* Emulate asq data */
2909 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2910 
2911 	rc = nvmf_property_get(&req);
2912 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2913 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2914 }
2915 
2916 static void
2917 test_nvmf_ctrlr_get_features_host_behavior_support(void)
2918 {
2919 	int rc;
2920 	struct spdk_nvmf_request req = {};
2921 	struct spdk_nvmf_qpair qpair = {};
2922 	struct spdk_nvmf_ctrlr ctrlr = {};
2923 	struct spdk_nvme_host_behavior behavior = {};
2924 	union nvmf_h2c_msg cmd = {};
2925 	union nvmf_c2h_msg rsp = {};
2926 
2927 	qpair.ctrlr = &ctrlr;
2928 	req.qpair = &qpair;
2929 	req.cmd = &cmd;
2930 	req.rsp = &rsp;
2931 
2932 	/* Invalid data */
2933 	req.data = NULL;
2934 	req.length = sizeof(struct spdk_nvme_host_behavior);
2935 	req.iovcnt = 0;
2936 
2937 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2938 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2939 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2940 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2941 	CU_ASSERT(req.data == NULL);
2942 
2943 	/* Wrong structure length */
2944 	req.data = &behavior;
2945 	req.length = sizeof(struct spdk_nvme_host_behavior) - 1;
2946 	spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length);
2947 
2948 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2949 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2950 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2951 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2952 
2953 	/* Get Features Host Behavior Support Success */
2954 	req.data = &behavior;
2955 	req.length = sizeof(struct spdk_nvme_host_behavior);
2956 	spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length);
2957 
2958 	ctrlr.acre_enabled = true;
2959 	behavior.acre = false;
2960 
2961 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2962 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2963 	CU_ASSERT(behavior.acre == true);
2964 }
2965 
2966 static void
2967 test_nvmf_ctrlr_set_features_host_behavior_support(void)
2968 {
2969 	int rc;
2970 	struct spdk_nvmf_request req = {};
2971 	struct spdk_nvmf_qpair qpair = {};
2972 	struct spdk_nvmf_ctrlr ctrlr = {};
2973 	struct spdk_nvme_host_behavior host_behavior = {};
2974 	union nvmf_h2c_msg cmd = {};
2975 	union nvmf_c2h_msg rsp = {};
2976 
2977 	qpair.ctrlr = &ctrlr;
2978 	req.qpair = &qpair;
2979 	req.cmd = &cmd;
2980 	req.rsp = &rsp;
2981 	req.iov[0].iov_base = &host_behavior;
2982 	req.iov[0].iov_len = sizeof(host_behavior);
2983 
2984 	/* Invalid iovcnt */
2985 	req.iovcnt = 0;
2986 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2987 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2988 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
2989 
2990 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
2991 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2992 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2993 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2994 
2995 	/* Invalid iov_len */
2996 	req.iovcnt = 1;
2997 	req.iov[0].iov_len = 0;
2998 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2999 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3000 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3001 
3002 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3003 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3004 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3005 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3006 
3007 	/* acre is false */
3008 	host_behavior.acre = 0;
3009 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3010 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3011 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3012 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3013 
3014 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3015 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3016 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3017 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3018 	CU_ASSERT(ctrlr.acre_enabled == false);
3019 
3020 	/* acre is true */
3021 	host_behavior.acre = 1;
3022 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3023 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3024 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3025 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3026 
3027 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3028 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3029 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3030 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3031 	CU_ASSERT(ctrlr.acre_enabled == true);
3032 
3033 	/* Invalid acre */
3034 	host_behavior.acre = 2;
3035 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3036 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3037 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3038 
3039 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3040 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3041 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3042 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3043 }
3044 
3045 int
3046 main(int argc, char **argv)
3047 {
3048 	CU_pSuite	suite = NULL;
3049 	unsigned int	num_failures;
3050 
3051 	CU_set_error_action(CUEA_ABORT);
3052 	CU_initialize_registry();
3053 
3054 	suite = CU_add_suite("nvmf", NULL, NULL);
3055 	CU_ADD_TEST(suite, test_get_log_page);
3056 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
3057 	CU_ADD_TEST(suite, test_connect);
3058 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
3059 	CU_ADD_TEST(suite, test_identify_ns);
3060 	CU_ADD_TEST(suite, test_identify_ns_iocs_specific);
3061 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
3062 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
3063 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
3064 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
3065 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
3066 	CU_ADD_TEST(suite, test_get_dif_ctx);
3067 	CU_ADD_TEST(suite, test_set_get_features);
3068 	CU_ADD_TEST(suite, test_identify_ctrlr);
3069 	CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific);
3070 	CU_ADD_TEST(suite, test_custom_admin_cmd);
3071 	CU_ADD_TEST(suite, test_fused_compare_and_write);
3072 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
3073 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
3074 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
3075 	CU_ADD_TEST(suite, test_multi_async_events);
3076 	CU_ADD_TEST(suite, test_rae);
3077 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
3078 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
3079 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
3080 	CU_ADD_TEST(suite, test_zcopy_read);
3081 	CU_ADD_TEST(suite, test_zcopy_write);
3082 	CU_ADD_TEST(suite, test_nvmf_property_set);
3083 	CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support);
3084 	CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support);
3085 
3086 	allocate_threads(1);
3087 	set_thread(0);
3088 
3089 	CU_basic_set_mode(CU_BRM_VERBOSE);
3090 	CU_basic_run_tests();
3091 	num_failures = CU_get_number_of_failures();
3092 	CU_cleanup_registry();
3093 
3094 	free_threads();
3095 
3096 	return num_failures;
3097 }
3098