xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision 42fd001310188f0635a3953f3b0ea0b33a840902)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/bdev_zone.h"
8 #include "spdk/nvme_spec.h"
9 #include "spdk/stdinc.h"
10 
11 #include "spdk_internal/cunit.h"
12 #include "spdk_internal/mock.h"
13 #include "thread/thread_internal.h"
14 
15 #include "common/lib/ut_multithread.c"
16 #include "nvmf/ctrlr.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 struct spdk_bdev {
21 	int ut_mock;
22 	uint64_t blockcnt;
23 	uint32_t blocklen;
24 	bool zoned;
25 	uint32_t zone_size;
26 	uint32_t max_open_zones;
27 	uint32_t max_active_zones;
28 };
29 
30 #define MAX_OPEN_ZONES 12
31 #define MAX_ACTIVE_ZONES 34
32 #define ZONE_SIZE 56
33 
34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
36 
37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
39 		0x8877665544332211UL;
40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
41 
42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
43 	    struct spdk_nvmf_subsystem *,
44 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
45 	    NULL);
46 
47 DEFINE_STUB(spdk_nvmf_poll_group_create,
48 	    struct spdk_nvmf_poll_group *,
49 	    (struct spdk_nvmf_tgt *tgt),
50 	    NULL);
51 
52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
53 	    const char *,
54 	    (const struct spdk_nvmf_subsystem *subsystem),
55 	    subsystem_default_sn);
56 
57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
58 	    const char *,
59 	    (const struct spdk_nvmf_subsystem *subsystem),
60 	    subsystem_default_mn);
61 
62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
63 	    bool,
64 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
65 	    true);
66 
67 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
68 	    int,
69 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
70 	    0);
71 
72 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
73 	    struct spdk_nvmf_ctrlr *,
74 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
75 	    NULL);
76 
77 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
78 	    bool,
79 	    (struct spdk_nvmf_ctrlr *ctrlr),
80 	    false);
81 
82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
83 	    bool,
84 	    (struct spdk_nvmf_ctrlr *ctrlr),
85 	    false);
86 
87 DEFINE_STUB(nvmf_ctrlr_copy_supported,
88 	    bool,
89 	    (struct spdk_nvmf_ctrlr *ctrlr),
90 	    false);
91 
92 DEFINE_STUB_V(nvmf_get_discovery_log_page,
93 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
94 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
95 
96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
97 	    int,
98 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
99 	    0);
100 
101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
102 	    bool,
103 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
104 	    true);
105 
106 DEFINE_STUB(nvmf_subsystem_find_listener,
107 	    struct spdk_nvmf_subsystem_listener *,
108 	    (struct spdk_nvmf_subsystem *subsystem,
109 	     const struct spdk_nvme_transport_id *trid),
110 	    (void *)0x1);
111 
112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
113 	    int,
114 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
115 	     struct spdk_nvmf_request *req),
116 	    0);
117 
118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
119 	    int,
120 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
121 	     struct spdk_nvmf_request *req),
122 	    0);
123 
124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
134 	    0);
135 
136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *req),
140 	    0);
141 
142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *req),
146 	    0);
147 
148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
161 	    int,
162 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct spdk_nvmf_request *req),
164 	    0);
165 
166 DEFINE_STUB(nvmf_transport_req_complete,
167 	    int,
168 	    (struct spdk_nvmf_request *req),
169 	    0);
170 
171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
172 
173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
174 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
175 	     struct spdk_dif_ctx *dif_ctx),
176 	    true);
177 
178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
179 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
180 
181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
183 
184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
185 		struct spdk_nvmf_ctrlr *ctrlr));
186 
187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
188 	    int,
189 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
190 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
191 	    0);
192 
193 DEFINE_STUB(nvmf_transport_req_free,
194 	    int,
195 	    (struct spdk_nvmf_request *req),
196 	    0);
197 
198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
199 	    int,
200 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
201 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
202 	    0);
203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
204 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
206 
207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev),
208 	    MAX_ACTIVE_ZONES);
209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES);
210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE);
211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
212 
213 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t,
214 	    (const struct spdk_nvme_ns_data *nsdata), 0);
215 
216 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false);
217 
218 int
219 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
220 {
221 	return 0;
222 }
223 
224 void
225 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
226 			    bool dif_insert_or_strip)
227 {
228 	uint64_t num_blocks;
229 
230 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
231 	num_blocks = ns->bdev->blockcnt;
232 	nsdata->nsze = num_blocks;
233 	nsdata->ncap = num_blocks;
234 	nsdata->nuse = num_blocks;
235 	nsdata->nlbaf = 0;
236 	nsdata->flbas.format = 0;
237 	nsdata->flbas.msb_format = 0;
238 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
239 }
240 
241 struct spdk_nvmf_ns *
242 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
243 {
244 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
245 	return subsystem->ns[0];
246 }
247 
248 struct spdk_nvmf_ns *
249 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
250 				struct spdk_nvmf_ns *prev_ns)
251 {
252 	uint32_t nsid;
253 
254 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
255 	nsid = prev_ns->nsid;
256 
257 	if (nsid >= subsystem->max_nsid) {
258 		return NULL;
259 	}
260 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
261 		if (subsystem->ns[nsid - 1]) {
262 			return subsystem->ns[nsid - 1];
263 		}
264 	}
265 	return NULL;
266 }
267 
268 bool
269 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
270 {
271 	return true;
272 }
273 
274 int
275 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
276 			    struct spdk_bdev_desc *desc,
277 			    struct spdk_io_channel *ch,
278 			    struct spdk_nvmf_request *req)
279 {
280 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
281 	uint64_t start_lba;
282 	uint64_t num_blocks;
283 
284 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
285 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
286 
287 	if ((start_lba + num_blocks) > bdev->blockcnt) {
288 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
289 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
290 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
291 	}
292 
293 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
294 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
295 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
296 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
297 	} else {
298 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
299 	}
300 
301 
302 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
303 }
304 
305 void
306 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
307 {
308 	req->zcopy_bdev_io = NULL;
309 	spdk_nvmf_request_complete(req);
310 }
311 
312 bool
313 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns)
314 {
315 	return ns->ptpl_file != NULL;
316 }
317 
318 static void
319 test_get_log_page(void)
320 {
321 	struct spdk_nvmf_subsystem subsystem = {};
322 	struct spdk_nvmf_request req = {};
323 	struct spdk_nvmf_qpair qpair = {};
324 	struct spdk_nvmf_ctrlr ctrlr = {};
325 	union nvmf_h2c_msg cmd = {};
326 	union nvmf_c2h_msg rsp = {};
327 	char data[4096];
328 
329 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
330 
331 	ctrlr.subsys = &subsystem;
332 
333 	qpair.ctrlr = &ctrlr;
334 
335 	req.qpair = &qpair;
336 	req.cmd = &cmd;
337 	req.rsp = &rsp;
338 	req.length = sizeof(data);
339 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &data, req.length);
340 
341 	/* Get Log Page - all valid */
342 	memset(&cmd, 0, sizeof(cmd));
343 	memset(&rsp, 0, sizeof(rsp));
344 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
345 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
346 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
347 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
348 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
349 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
350 
351 	/* Get Log Page with invalid log ID */
352 	memset(&cmd, 0, sizeof(cmd));
353 	memset(&rsp, 0, sizeof(rsp));
354 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
355 	cmd.nvme_cmd.cdw10 = 0;
356 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
357 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
358 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
359 
360 	/* Get Log Page with invalid offset (not dword aligned) */
361 	memset(&cmd, 0, sizeof(cmd));
362 	memset(&rsp, 0, sizeof(rsp));
363 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
364 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
365 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
366 	cmd.nvme_cmd.cdw12 = 2;
367 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
368 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
369 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
370 
371 	/* Get Log Page without data buffer */
372 	memset(&cmd, 0, sizeof(cmd));
373 	memset(&rsp, 0, sizeof(rsp));
374 	req.iovcnt = 0;
375 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
376 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
377 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
378 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
379 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
380 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
381 }
382 
383 static void
384 test_process_fabrics_cmd(void)
385 {
386 	struct	spdk_nvmf_request req = {};
387 	int	ret;
388 	struct	spdk_nvmf_qpair req_qpair = {};
389 	union	nvmf_h2c_msg  req_cmd = {};
390 	union	nvmf_c2h_msg   req_rsp = {};
391 
392 	req.qpair = &req_qpair;
393 	req.cmd  = &req_cmd;
394 	req.rsp  = &req_rsp;
395 	req.qpair->ctrlr = NULL;
396 
397 	/* No ctrlr and invalid command check */
398 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
399 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
400 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
401 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
402 }
403 
404 static bool
405 nvme_status_success(const struct spdk_nvme_status *status)
406 {
407 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
408 }
409 
410 static void
411 test_connect(void)
412 {
413 	struct spdk_nvmf_fabric_connect_data connect_data;
414 	struct spdk_nvmf_poll_group group;
415 	struct spdk_nvmf_subsystem_poll_group *sgroups;
416 	struct spdk_nvmf_transport transport;
417 	struct spdk_nvmf_transport_ops tops = {};
418 	struct spdk_nvmf_subsystem subsystem;
419 	struct spdk_nvmf_request req;
420 	struct spdk_nvmf_qpair admin_qpair;
421 	struct spdk_nvmf_qpair qpair;
422 	struct spdk_nvmf_ctrlr ctrlr;
423 	struct spdk_nvmf_tgt tgt;
424 	union nvmf_h2c_msg cmd;
425 	union nvmf_c2h_msg rsp;
426 	const uint8_t hostid[16] = {
427 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
428 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
429 	};
430 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
431 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
432 	int rc;
433 
434 	memset(&group, 0, sizeof(group));
435 	group.thread = spdk_get_thread();
436 
437 	memset(&ctrlr, 0, sizeof(ctrlr));
438 	ctrlr.subsys = &subsystem;
439 	ctrlr.qpair_mask = spdk_bit_array_create(3);
440 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
441 	ctrlr.vcprop.cc.bits.en = 1;
442 	ctrlr.vcprop.cc.bits.iosqes = 6;
443 	ctrlr.vcprop.cc.bits.iocqes = 4;
444 
445 	memset(&admin_qpair, 0, sizeof(admin_qpair));
446 	admin_qpair.group = &group;
447 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
448 
449 	memset(&tgt, 0, sizeof(tgt));
450 	memset(&transport, 0, sizeof(transport));
451 	transport.ops = &tops;
452 	transport.opts.max_aq_depth = 32;
453 	transport.opts.max_queue_depth = 64;
454 	transport.opts.max_qpairs_per_ctrlr = 3;
455 	transport.tgt = &tgt;
456 
457 	memset(&qpair, 0, sizeof(qpair));
458 	qpair.transport = &transport;
459 	qpair.group = &group;
460 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
461 	TAILQ_INIT(&qpair.outstanding);
462 
463 	memset(&connect_data, 0, sizeof(connect_data));
464 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
465 	connect_data.cntlid = 0xFFFF;
466 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
467 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
468 
469 	memset(&subsystem, 0, sizeof(subsystem));
470 	subsystem.thread = spdk_get_thread();
471 	subsystem.id = 1;
472 	TAILQ_INIT(&subsystem.ctrlrs);
473 	subsystem.tgt = &tgt;
474 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
475 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
476 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
477 
478 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
479 	group.sgroups = sgroups;
480 
481 	memset(&cmd, 0, sizeof(cmd));
482 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
483 	cmd.connect_cmd.cid = 1;
484 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
485 	cmd.connect_cmd.recfmt = 0;
486 	cmd.connect_cmd.qid = 0;
487 	cmd.connect_cmd.sqsize = 31;
488 	cmd.connect_cmd.cattr = 0;
489 	cmd.connect_cmd.kato = 120000;
490 
491 	memset(&req, 0, sizeof(req));
492 	req.qpair = &qpair;
493 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
494 	req.length = sizeof(connect_data);
495 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
496 	req.cmd = &cmd;
497 	req.rsp = &rsp;
498 
499 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
500 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
501 
502 	/* Valid admin connect command */
503 	memset(&rsp, 0, sizeof(rsp));
504 	sgroups[subsystem.id].mgmt_io_outstanding++;
505 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
506 	rc = nvmf_ctrlr_cmd_connect(&req);
507 	poll_threads();
508 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
509 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
510 	CU_ASSERT(qpair.ctrlr != NULL);
511 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
512 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
513 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
514 	free(qpair.ctrlr);
515 	qpair.ctrlr = NULL;
516 
517 	/* Valid admin connect command with kato = 0 */
518 	cmd.connect_cmd.kato = 0;
519 	memset(&rsp, 0, sizeof(rsp));
520 	sgroups[subsystem.id].mgmt_io_outstanding++;
521 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
522 	rc = nvmf_ctrlr_cmd_connect(&req);
523 	poll_threads();
524 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
525 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
526 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
527 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
528 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
529 	free(qpair.ctrlr);
530 	qpair.ctrlr = NULL;
531 	cmd.connect_cmd.kato = 120000;
532 
533 	/* Invalid data length */
534 	memset(&rsp, 0, sizeof(rsp));
535 	req.length = sizeof(connect_data) - 1;
536 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
537 	rc = nvmf_ctrlr_cmd_connect(&req);
538 	poll_threads();
539 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
540 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
541 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
542 	CU_ASSERT(qpair.ctrlr == NULL);
543 	req.length = sizeof(connect_data);
544 
545 	/* Invalid recfmt */
546 	memset(&rsp, 0, sizeof(rsp));
547 	cmd.connect_cmd.recfmt = 1234;
548 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
549 	rc = nvmf_ctrlr_cmd_connect(&req);
550 	poll_threads();
551 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
552 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
553 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
554 	CU_ASSERT(qpair.ctrlr == NULL);
555 	cmd.connect_cmd.recfmt = 0;
556 
557 	/* Subsystem not found */
558 	memset(&rsp, 0, sizeof(rsp));
559 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
560 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
561 	rc = nvmf_ctrlr_cmd_connect(&req);
562 	poll_threads();
563 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
564 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
565 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
566 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
567 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
568 	CU_ASSERT(qpair.ctrlr == NULL);
569 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
570 
571 	/* Unterminated hostnqn */
572 	memset(&rsp, 0, sizeof(rsp));
573 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
574 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
575 	rc = nvmf_ctrlr_cmd_connect(&req);
576 	poll_threads();
577 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
578 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
579 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
580 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
581 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
582 	CU_ASSERT(qpair.ctrlr == NULL);
583 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
584 
585 	/* Host not allowed */
586 	memset(&rsp, 0, sizeof(rsp));
587 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
588 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
589 	rc = nvmf_ctrlr_cmd_connect(&req);
590 	poll_threads();
591 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
592 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
593 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
594 	CU_ASSERT(qpair.ctrlr == NULL);
595 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
596 
597 	/* Invalid sqsize == 0 */
598 	memset(&rsp, 0, sizeof(rsp));
599 	cmd.connect_cmd.sqsize = 0;
600 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
601 	rc = nvmf_ctrlr_cmd_connect(&req);
602 	poll_threads();
603 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
604 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
605 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
606 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
607 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
608 	CU_ASSERT(qpair.ctrlr == NULL);
609 	cmd.connect_cmd.sqsize = 31;
610 
611 	/* Invalid admin sqsize > max_aq_depth */
612 	memset(&rsp, 0, sizeof(rsp));
613 	cmd.connect_cmd.sqsize = 32;
614 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
615 	rc = nvmf_ctrlr_cmd_connect(&req);
616 	poll_threads();
617 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
618 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
619 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
620 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
621 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
622 	CU_ASSERT(qpair.ctrlr == NULL);
623 	cmd.connect_cmd.sqsize = 31;
624 
625 	/* Invalid I/O sqsize > max_queue_depth */
626 	memset(&rsp, 0, sizeof(rsp));
627 	cmd.connect_cmd.qid = 1;
628 	cmd.connect_cmd.sqsize = 64;
629 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
630 	rc = nvmf_ctrlr_cmd_connect(&req);
631 	poll_threads();
632 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
633 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
634 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
635 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
636 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
637 	CU_ASSERT(qpair.ctrlr == NULL);
638 	cmd.connect_cmd.qid = 0;
639 	cmd.connect_cmd.sqsize = 31;
640 
641 	/* Invalid cntlid for admin queue */
642 	memset(&rsp, 0, sizeof(rsp));
643 	connect_data.cntlid = 0x1234;
644 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
645 	rc = nvmf_ctrlr_cmd_connect(&req);
646 	poll_threads();
647 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
648 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
649 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
650 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
651 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
652 	CU_ASSERT(qpair.ctrlr == NULL);
653 	connect_data.cntlid = 0xFFFF;
654 
655 	ctrlr.admin_qpair = &admin_qpair;
656 	ctrlr.subsys = &subsystem;
657 
658 	/* Valid I/O queue connect command */
659 	memset(&rsp, 0, sizeof(rsp));
660 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
661 	cmd.connect_cmd.qid = 1;
662 	cmd.connect_cmd.sqsize = 63;
663 	sgroups[subsystem.id].mgmt_io_outstanding++;
664 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
665 	rc = nvmf_ctrlr_cmd_connect(&req);
666 	poll_threads();
667 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
668 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
669 	CU_ASSERT(qpair.ctrlr == &ctrlr);
670 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
671 	qpair.ctrlr = NULL;
672 	cmd.connect_cmd.sqsize = 31;
673 
674 	/* Non-existent controller */
675 	memset(&rsp, 0, sizeof(rsp));
676 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
677 	sgroups[subsystem.id].mgmt_io_outstanding++;
678 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
679 	rc = nvmf_ctrlr_cmd_connect(&req);
680 	poll_threads();
681 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
682 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
683 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
684 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
685 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
686 	CU_ASSERT(qpair.ctrlr == NULL);
687 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
688 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
689 
690 	/* I/O connect to discovery controller */
691 	memset(&rsp, 0, sizeof(rsp));
692 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
693 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
694 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
695 	sgroups[subsystem.id].mgmt_io_outstanding++;
696 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
697 	rc = nvmf_ctrlr_cmd_connect(&req);
698 	poll_threads();
699 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
700 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
701 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
702 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
703 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
704 	CU_ASSERT(qpair.ctrlr == NULL);
705 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
706 
707 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
708 	cmd.connect_cmd.qid = 0;
709 	cmd.connect_cmd.kato = 120000;
710 	memset(&rsp, 0, sizeof(rsp));
711 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
712 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
713 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
714 	sgroups[subsystem.id].mgmt_io_outstanding++;
715 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
716 	rc = nvmf_ctrlr_cmd_connect(&req);
717 	poll_threads();
718 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
719 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
720 	CU_ASSERT(qpair.ctrlr != NULL);
721 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
722 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
723 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
724 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
725 	free(qpair.ctrlr);
726 	qpair.ctrlr = NULL;
727 
728 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
729 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
730 	 */
731 	cmd.connect_cmd.kato = 0;
732 	memset(&rsp, 0, sizeof(rsp));
733 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
734 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
735 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
736 	sgroups[subsystem.id].mgmt_io_outstanding++;
737 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
738 	rc = nvmf_ctrlr_cmd_connect(&req);
739 	poll_threads();
740 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
741 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
742 	CU_ASSERT(qpair.ctrlr != NULL);
743 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
744 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
745 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
746 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
747 	free(qpair.ctrlr);
748 	qpair.ctrlr = NULL;
749 	cmd.connect_cmd.qid = 1;
750 	cmd.connect_cmd.kato = 120000;
751 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
752 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, false);
753 
754 	/* I/O connect to disabled controller */
755 	memset(&rsp, 0, sizeof(rsp));
756 	ctrlr.vcprop.cc.bits.en = 0;
757 	sgroups[subsystem.id].mgmt_io_outstanding++;
758 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
759 	rc = nvmf_ctrlr_cmd_connect(&req);
760 	poll_threads();
761 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
762 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
763 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
764 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
765 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
766 	CU_ASSERT(qpair.ctrlr == NULL);
767 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
768 	ctrlr.vcprop.cc.bits.en = 1;
769 
770 	/* I/O connect with invalid IOSQES */
771 	memset(&rsp, 0, sizeof(rsp));
772 	ctrlr.vcprop.cc.bits.iosqes = 3;
773 	sgroups[subsystem.id].mgmt_io_outstanding++;
774 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
775 	rc = nvmf_ctrlr_cmd_connect(&req);
776 	poll_threads();
777 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
778 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
779 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
780 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
781 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
782 	CU_ASSERT(qpair.ctrlr == NULL);
783 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
784 	ctrlr.vcprop.cc.bits.iosqes = 6;
785 
786 	/* I/O connect with invalid IOCQES */
787 	memset(&rsp, 0, sizeof(rsp));
788 	ctrlr.vcprop.cc.bits.iocqes = 3;
789 	sgroups[subsystem.id].mgmt_io_outstanding++;
790 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
791 	rc = nvmf_ctrlr_cmd_connect(&req);
792 	poll_threads();
793 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
794 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
795 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
796 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
797 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
798 	CU_ASSERT(qpair.ctrlr == NULL);
799 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
800 	ctrlr.vcprop.cc.bits.iocqes = 4;
801 
802 	/* I/O connect with qid that is too large */
803 	memset(&rsp, 0, sizeof(rsp));
804 	cmd.connect_cmd.qid = 3;
805 	sgroups[subsystem.id].mgmt_io_outstanding++;
806 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
807 	rc = nvmf_ctrlr_cmd_connect(&req);
808 	poll_threads();
809 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
810 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
811 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
812 	CU_ASSERT(qpair.ctrlr == NULL);
813 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
814 
815 	/* I/O connect with duplicate queue ID */
816 	memset(&rsp, 0, sizeof(rsp));
817 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
818 	cmd.connect_cmd.qid = 1;
819 	sgroups[subsystem.id].mgmt_io_outstanding++;
820 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
821 	rc = nvmf_ctrlr_cmd_connect(&req);
822 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
823 	poll_threads();
824 	/* First time, it will detect duplicate QID and schedule a retry.  So for
825 	 * now we should expect the response to still be all zeroes.
826 	 */
827 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
828 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
829 
830 	/* Now advance the clock, so that the retry poller executes. */
831 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
832 	poll_threads();
833 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
834 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
835 	CU_ASSERT(qpair.ctrlr == NULL);
836 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
837 
838 	/* I/O connect with temporarily duplicate queue ID. This covers race
839 	 * where qpair_mask bit may not yet be cleared, even though initiator
840 	 * has closed the connection.  See issue #2955. */
841 	memset(&rsp, 0, sizeof(rsp));
842 	sgroups[subsystem.id].mgmt_io_outstanding++;
843 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
844 	rc = nvmf_ctrlr_cmd_connect(&req);
845 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
846 	poll_threads();
847 	/* First time, it will detect duplicate QID and schedule a retry.  So for
848 	 * now we should expect the response to still be all zeroes.
849 	 */
850 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
851 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
852 
853 	/* Now advance the clock, so that the retry poller executes. */
854 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
855 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
856 	poll_threads();
857 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
858 	CU_ASSERT(qpair.ctrlr == &ctrlr);
859 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
860 	qpair.ctrlr = NULL;
861 
862 	/* I/O connect when admin qpair is being destroyed */
863 	admin_qpair.group = NULL;
864 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
865 	memset(&rsp, 0, sizeof(rsp));
866 	sgroups[subsystem.id].mgmt_io_outstanding++;
867 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
868 	rc = nvmf_ctrlr_cmd_connect(&req);
869 	poll_threads();
870 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
871 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
872 	CU_ASSERT(qpair.ctrlr == NULL);
873 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
874 	admin_qpair.group = &group;
875 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
876 
877 	/* Clean up globals */
878 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
879 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
880 
881 	spdk_bit_array_free(&ctrlr.qpair_mask);
882 	free(sgroups);
883 }
884 
885 static void
886 test_get_ns_id_desc_list(void)
887 {
888 	struct spdk_nvmf_subsystem subsystem;
889 	struct spdk_nvmf_qpair qpair;
890 	struct spdk_nvmf_ctrlr ctrlr;
891 	struct spdk_nvmf_request req;
892 	struct spdk_nvmf_ns *ns_ptrs[1];
893 	struct spdk_nvmf_ns ns;
894 	union nvmf_h2c_msg cmd;
895 	union nvmf_c2h_msg rsp;
896 	struct spdk_bdev bdev;
897 	uint8_t buf[4096];
898 
899 	memset(&subsystem, 0, sizeof(subsystem));
900 	ns_ptrs[0] = &ns;
901 	subsystem.ns = ns_ptrs;
902 	subsystem.max_nsid = 1;
903 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
904 
905 	memset(&ns, 0, sizeof(ns));
906 	ns.opts.nsid = 1;
907 	ns.bdev = &bdev;
908 
909 	memset(&qpair, 0, sizeof(qpair));
910 	qpair.ctrlr = &ctrlr;
911 
912 	memset(&ctrlr, 0, sizeof(ctrlr));
913 	ctrlr.subsys = &subsystem;
914 	ctrlr.vcprop.cc.bits.en = 1;
915 	ctrlr.thread = spdk_get_thread();
916 
917 	memset(&req, 0, sizeof(req));
918 	req.qpair = &qpair;
919 	req.cmd = &cmd;
920 	req.rsp = &rsp;
921 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
922 	req.length = sizeof(buf);
923 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
924 
925 	memset(&cmd, 0, sizeof(cmd));
926 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
927 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
928 
929 	/* Invalid NSID */
930 	cmd.nvme_cmd.nsid = 0;
931 	memset(&rsp, 0, sizeof(rsp));
932 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
933 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
934 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
935 
936 	/* Valid NSID, but ns has no IDs defined */
937 	cmd.nvme_cmd.nsid = 1;
938 	memset(&rsp, 0, sizeof(rsp));
939 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
940 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
941 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
942 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
943 
944 	/* Valid NSID, only EUI64 defined */
945 	ns.opts.eui64[0] = 0x11;
946 	ns.opts.eui64[7] = 0xFF;
947 	memset(&rsp, 0, sizeof(rsp));
948 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
949 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
950 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
951 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
952 	CU_ASSERT(buf[1] == 8);
953 	CU_ASSERT(buf[4] == 0x11);
954 	CU_ASSERT(buf[11] == 0xFF);
955 	CU_ASSERT(buf[13] == 0);
956 
957 	/* Valid NSID, only NGUID defined */
958 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
959 	ns.opts.nguid[0] = 0x22;
960 	ns.opts.nguid[15] = 0xEE;
961 	memset(&rsp, 0, sizeof(rsp));
962 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
963 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
964 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
965 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
966 	CU_ASSERT(buf[1] == 16);
967 	CU_ASSERT(buf[4] == 0x22);
968 	CU_ASSERT(buf[19] == 0xEE);
969 	CU_ASSERT(buf[21] == 0);
970 
971 	/* Valid NSID, both EUI64 and NGUID defined */
972 	ns.opts.eui64[0] = 0x11;
973 	ns.opts.eui64[7] = 0xFF;
974 	ns.opts.nguid[0] = 0x22;
975 	ns.opts.nguid[15] = 0xEE;
976 	memset(&rsp, 0, sizeof(rsp));
977 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
978 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
979 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
980 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
981 	CU_ASSERT(buf[1] == 8);
982 	CU_ASSERT(buf[4] == 0x11);
983 	CU_ASSERT(buf[11] == 0xFF);
984 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
985 	CU_ASSERT(buf[13] == 16);
986 	CU_ASSERT(buf[16] == 0x22);
987 	CU_ASSERT(buf[31] == 0xEE);
988 	CU_ASSERT(buf[33] == 0);
989 
990 	/* Valid NSID, EUI64, NGUID, and UUID defined */
991 	ns.opts.eui64[0] = 0x11;
992 	ns.opts.eui64[7] = 0xFF;
993 	ns.opts.nguid[0] = 0x22;
994 	ns.opts.nguid[15] = 0xEE;
995 	ns.opts.uuid.u.raw[0] = 0x33;
996 	ns.opts.uuid.u.raw[15] = 0xDD;
997 	memset(&rsp, 0, sizeof(rsp));
998 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
999 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1000 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1001 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
1002 	CU_ASSERT(buf[1] == 8);
1003 	CU_ASSERT(buf[4] == 0x11);
1004 	CU_ASSERT(buf[11] == 0xFF);
1005 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
1006 	CU_ASSERT(buf[13] == 16);
1007 	CU_ASSERT(buf[16] == 0x22);
1008 	CU_ASSERT(buf[31] == 0xEE);
1009 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
1010 	CU_ASSERT(buf[33] == 16);
1011 	CU_ASSERT(buf[36] == 0x33);
1012 	CU_ASSERT(buf[51] == 0xDD);
1013 	CU_ASSERT(buf[53] == 0);
1014 }
1015 
1016 static void
1017 test_identify_ns(void)
1018 {
1019 	struct spdk_nvmf_subsystem subsystem = {};
1020 	struct spdk_nvmf_transport transport = {};
1021 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1022 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1023 	struct spdk_nvme_cmd cmd = {};
1024 	struct spdk_nvme_cpl rsp = {};
1025 	struct spdk_nvme_ns_data nsdata = {};
1026 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
1027 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
1028 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1029 
1030 	subsystem.ns = ns_arr;
1031 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1032 
1033 	/* Invalid NSID 0 */
1034 	cmd.nsid = 0;
1035 	memset(&nsdata, 0, sizeof(nsdata));
1036 	memset(&rsp, 0, sizeof(rsp));
1037 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1038 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1039 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1040 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1041 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1042 
1043 	/* Valid NSID 1 */
1044 	cmd.nsid = 1;
1045 	memset(&nsdata, 0, sizeof(nsdata));
1046 	memset(&rsp, 0, sizeof(rsp));
1047 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1048 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1049 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1050 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1051 	CU_ASSERT(nsdata.nsze == 1234);
1052 
1053 	/* Valid but inactive NSID 2 */
1054 	cmd.nsid = 2;
1055 	memset(&nsdata, 0, sizeof(nsdata));
1056 	memset(&rsp, 0, sizeof(rsp));
1057 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1058 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1059 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1060 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1061 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1062 
1063 	/* Valid NSID 3 */
1064 	cmd.nsid = 3;
1065 	memset(&nsdata, 0, sizeof(nsdata));
1066 	memset(&rsp, 0, sizeof(rsp));
1067 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1068 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1069 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1070 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1071 	CU_ASSERT(nsdata.nsze == 5678);
1072 
1073 	/* Invalid NSID 4 */
1074 	cmd.nsid = 4;
1075 	memset(&nsdata, 0, sizeof(nsdata));
1076 	memset(&rsp, 0, sizeof(rsp));
1077 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1078 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1079 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1080 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1081 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1082 
1083 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1084 	cmd.nsid = 0xFFFFFFFF;
1085 	memset(&nsdata, 0, sizeof(nsdata));
1086 	memset(&rsp, 0, sizeof(rsp));
1087 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1088 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1089 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1090 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1091 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1092 }
1093 
1094 static void
1095 test_identify_ns_iocs_specific(void)
1096 {
1097 	struct spdk_nvmf_subsystem subsystem = {};
1098 	struct spdk_nvmf_transport transport = {};
1099 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport };
1100 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1101 	struct spdk_nvme_cmd cmd = {};
1102 	struct spdk_nvme_cpl rsp = {};
1103 	struct spdk_nvme_zns_ns_data nsdata = {};
1104 	struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}};
1105 	struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}};
1106 	struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]};
1107 
1108 	subsystem.ns = ns_arr;
1109 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1110 
1111 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1112 
1113 	/* Invalid ZNS NSID 0 */
1114 	cmd.nsid = 0;
1115 	memset(&nsdata, 0xFF, sizeof(nsdata));
1116 	memset(&rsp, 0, sizeof(rsp));
1117 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1118 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1119 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1120 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1121 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1122 
1123 	/* Valid ZNS NSID 1 */
1124 	cmd.nsid = 1;
1125 	memset(&nsdata, 0xFF, sizeof(nsdata));
1126 	memset(&rsp, 0, sizeof(rsp));
1127 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1128 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1129 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1130 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1131 	CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1);
1132 	CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1);
1133 	CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1);
1134 	CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE);
1135 	nsdata.ozcs.read_across_zone_boundaries = 0;
1136 	nsdata.mar = 0;
1137 	nsdata.mor = 0;
1138 	nsdata.lbafe[0].zsze = 0;
1139 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1140 
1141 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1142 
1143 	/* Valid NVM NSID 2 */
1144 	cmd.nsid = 2;
1145 	memset(&nsdata, 0xFF, sizeof(nsdata));
1146 	memset(&rsp, 0, sizeof(rsp));
1147 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1148 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1149 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1150 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1151 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1152 
1153 	/* Invalid NVM NSID 3 */
1154 	cmd.nsid = 0;
1155 	memset(&nsdata, 0xFF, sizeof(nsdata));
1156 	memset(&rsp, 0, sizeof(rsp));
1157 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1158 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1159 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1160 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1161 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1162 }
1163 
1164 static void
1165 test_set_get_features(void)
1166 {
1167 	struct spdk_nvmf_subsystem subsystem = {};
1168 	struct spdk_nvmf_qpair admin_qpair = {};
1169 	enum spdk_nvme_ana_state ana_state[3];
1170 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1171 	struct spdk_nvmf_ctrlr ctrlr = {
1172 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1173 	};
1174 	union nvmf_h2c_msg cmd = {};
1175 	union nvmf_c2h_msg rsp = {};
1176 	struct spdk_nvmf_ns ns[3];
1177 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1178 	struct spdk_nvmf_request req;
1179 	int rc;
1180 
1181 	ns[0].anagrpid = 1;
1182 	ns[2].anagrpid = 3;
1183 	subsystem.ns = ns_arr;
1184 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1185 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1186 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1187 	admin_qpair.ctrlr = &ctrlr;
1188 	req.qpair = &admin_qpair;
1189 	cmd.nvme_cmd.nsid = 1;
1190 	req.cmd = &cmd;
1191 	req.rsp = &rsp;
1192 
1193 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1194 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1195 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1196 	ns[0].ptpl_file = "testcfg";
1197 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1198 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1199 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1200 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1201 	CU_ASSERT(ns[0].ptpl_activated == true);
1202 
1203 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1204 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1205 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1206 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1207 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1208 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1209 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1210 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1211 
1212 
1213 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1214 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1215 	cmd.nvme_cmd.cdw11 = 0x42;
1216 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1217 
1218 	rc = nvmf_ctrlr_get_features(&req);
1219 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1220 
1221 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1222 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1223 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1224 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1225 
1226 	rc = nvmf_ctrlr_get_features(&req);
1227 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1228 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1229 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1230 
1231 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1232 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1233 	cmd.nvme_cmd.cdw11 = 0x42;
1234 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1235 
1236 	rc = nvmf_ctrlr_set_features(&req);
1237 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1238 
1239 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1240 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1241 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1242 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1243 
1244 	rc = nvmf_ctrlr_set_features(&req);
1245 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1246 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1247 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1248 
1249 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1250 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1251 	cmd.nvme_cmd.cdw11 = 0x42;
1252 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1253 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1254 
1255 	rc = nvmf_ctrlr_set_features(&req);
1256 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1257 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1258 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1259 
1260 
1261 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1262 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1263 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1264 
1265 	rc = nvmf_ctrlr_get_features(&req);
1266 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1267 
1268 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1269 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1270 	cmd.nvme_cmd.cdw11 = 0x42;
1271 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1272 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1273 
1274 	rc = nvmf_ctrlr_set_features(&req);
1275 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1276 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1277 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1278 
1279 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1280 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1281 	cmd.nvme_cmd.cdw11 = 0x42;
1282 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1283 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1284 
1285 	rc = nvmf_ctrlr_set_features(&req);
1286 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1287 }
1288 
1289 /*
1290  * Reservation Unit Test Configuration
1291  *       --------             --------    --------
1292  *      | Host A |           | Host B |  | Host C |
1293  *       --------             --------    --------
1294  *      /        \               |           |
1295  *  --------   --------       -------     -------
1296  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1297  *  --------   --------       -------     -------
1298  *    \           \              /           /
1299  *     \           \            /           /
1300  *      \           \          /           /
1301  *      --------------------------------------
1302  *     |            NAMESPACE 1               |
1303  *      --------------------------------------
1304  */
1305 
1306 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1307 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1308 
1309 static void
1310 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1311 {
1312 	/* Host A has two controllers */
1313 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1314 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1315 
1316 	/* Host B has 1 controller */
1317 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1318 
1319 	/* Host C has 1 controller */
1320 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1321 
1322 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1323 	g_ns_info.rtype = rtype;
1324 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1325 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1326 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1327 }
1328 
1329 static void
1330 test_reservation_write_exclusive(void)
1331 {
1332 	struct spdk_nvmf_request req = {};
1333 	union nvmf_h2c_msg cmd = {};
1334 	union nvmf_c2h_msg rsp = {};
1335 	int rc;
1336 
1337 	req.cmd = &cmd;
1338 	req.rsp = &rsp;
1339 
1340 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1341 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1342 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1343 
1344 	/* Test Case: Issue a Read command from Host A and Host B */
1345 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1346 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1347 	SPDK_CU_ASSERT_FATAL(rc == 0);
1348 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1349 	SPDK_CU_ASSERT_FATAL(rc == 0);
1350 
1351 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1352 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1353 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1354 	SPDK_CU_ASSERT_FATAL(rc == 0);
1355 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1356 	SPDK_CU_ASSERT_FATAL(rc < 0);
1357 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1358 
1359 	/* Test Case: Issue a Write command from Host C */
1360 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1361 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1362 	SPDK_CU_ASSERT_FATAL(rc < 0);
1363 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1364 
1365 	/* Test Case: Issue a Read command from Host B */
1366 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1367 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1368 	SPDK_CU_ASSERT_FATAL(rc == 0);
1369 
1370 	/* Unregister Host C */
1371 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1372 
1373 	/* Test Case: Read and Write commands from non-registrant Host C */
1374 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1375 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1376 	SPDK_CU_ASSERT_FATAL(rc < 0);
1377 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1378 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1379 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1380 	SPDK_CU_ASSERT_FATAL(rc == 0);
1381 }
1382 
1383 static void
1384 test_reservation_exclusive_access(void)
1385 {
1386 	struct spdk_nvmf_request req = {};
1387 	union nvmf_h2c_msg cmd = {};
1388 	union nvmf_c2h_msg rsp = {};
1389 	int rc;
1390 
1391 	req.cmd = &cmd;
1392 	req.rsp = &rsp;
1393 
1394 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1395 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1396 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1397 
1398 	/* Test Case: Issue a Read command from Host B */
1399 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1400 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1401 	SPDK_CU_ASSERT_FATAL(rc < 0);
1402 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1403 
1404 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1405 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1406 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1407 	SPDK_CU_ASSERT_FATAL(rc == 0);
1408 }
1409 
1410 static void
1411 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1412 {
1413 	struct spdk_nvmf_request req = {};
1414 	union nvmf_h2c_msg cmd = {};
1415 	union nvmf_c2h_msg rsp = {};
1416 	int rc;
1417 
1418 	req.cmd = &cmd;
1419 	req.rsp = &rsp;
1420 
1421 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1422 	ut_reservation_init(rtype);
1423 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1424 
1425 	/* Test Case: Issue a Read command from Host A and Host C */
1426 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1427 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1428 	SPDK_CU_ASSERT_FATAL(rc == 0);
1429 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1430 	SPDK_CU_ASSERT_FATAL(rc == 0);
1431 
1432 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1433 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1434 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1435 	SPDK_CU_ASSERT_FATAL(rc == 0);
1436 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1437 	SPDK_CU_ASSERT_FATAL(rc == 0);
1438 
1439 	/* Unregister Host C */
1440 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1441 
1442 	/* Test Case: Read and Write commands from non-registrant Host C */
1443 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1444 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1445 	SPDK_CU_ASSERT_FATAL(rc == 0);
1446 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1447 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1448 	SPDK_CU_ASSERT_FATAL(rc < 0);
1449 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1450 }
1451 
1452 static void
1453 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1454 {
1455 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1456 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1457 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1458 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1459 }
1460 
1461 static void
1462 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1463 {
1464 	struct spdk_nvmf_request req = {};
1465 	union nvmf_h2c_msg cmd = {};
1466 	union nvmf_c2h_msg rsp = {};
1467 	int rc;
1468 
1469 	req.cmd = &cmd;
1470 	req.rsp = &rsp;
1471 
1472 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1473 	ut_reservation_init(rtype);
1474 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1475 
1476 	/* Test Case: Issue a Write command from Host B */
1477 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1478 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1479 	SPDK_CU_ASSERT_FATAL(rc == 0);
1480 
1481 	/* Unregister Host B */
1482 	spdk_uuid_set_null(&g_ns_info.reg_hostid[1]);
1483 
1484 	/* Test Case: Issue a Read command from Host B */
1485 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1486 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1487 	SPDK_CU_ASSERT_FATAL(rc < 0);
1488 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1489 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1490 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1491 	SPDK_CU_ASSERT_FATAL(rc < 0);
1492 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1493 }
1494 
1495 static void
1496 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1497 {
1498 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1499 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1500 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1501 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1502 }
1503 
1504 static void
1505 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1506 {
1507 	STAILQ_INIT(&ctrlr->async_events);
1508 }
1509 
1510 static void
1511 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1512 {
1513 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1514 
1515 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1516 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1517 		free(event);
1518 	}
1519 }
1520 
1521 static int
1522 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1523 {
1524 	int num = 0;
1525 	struct spdk_nvmf_async_event_completion *event;
1526 
1527 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1528 		num++;
1529 	}
1530 	return num;
1531 }
1532 
1533 static void
1534 test_reservation_notification_log_page(void)
1535 {
1536 	struct spdk_nvmf_ctrlr ctrlr;
1537 	struct spdk_nvmf_qpair qpair;
1538 	struct spdk_nvmf_ns ns;
1539 	struct spdk_nvmf_request req = {};
1540 	union nvmf_h2c_msg cmd = {};
1541 	union nvmf_c2h_msg rsp = {};
1542 	union spdk_nvme_async_event_completion event = {};
1543 	struct spdk_nvme_reservation_notification_log logs[3];
1544 	struct iovec iov;
1545 
1546 	memset(&ctrlr, 0, sizeof(ctrlr));
1547 	ctrlr.thread = spdk_get_thread();
1548 	TAILQ_INIT(&ctrlr.log_head);
1549 	init_pending_async_events(&ctrlr);
1550 	ns.nsid = 1;
1551 
1552 	/* Test Case: Mask all the reservation notifications */
1553 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1554 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1555 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1556 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1557 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1558 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1559 					  SPDK_NVME_RESERVATION_RELEASED);
1560 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1561 					  SPDK_NVME_RESERVATION_PREEMPTED);
1562 	poll_threads();
1563 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1564 
1565 	/* Test Case: Unmask all the reservation notifications,
1566 	 * 3 log pages are generated, and AER was triggered.
1567 	 */
1568 	ns.mask = 0;
1569 	ctrlr.num_avail_log_pages = 0;
1570 	req.cmd = &cmd;
1571 	req.rsp = &rsp;
1572 	ctrlr.aer_req[0] = &req;
1573 	ctrlr.nr_aer_reqs = 1;
1574 	req.qpair = &qpair;
1575 	TAILQ_INIT(&qpair.outstanding);
1576 	qpair.ctrlr = NULL;
1577 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1578 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1579 
1580 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1581 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1582 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1583 					  SPDK_NVME_RESERVATION_RELEASED);
1584 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1585 					  SPDK_NVME_RESERVATION_PREEMPTED);
1586 	poll_threads();
1587 	event.raw = rsp.nvme_cpl.cdw0;
1588 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1589 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1590 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1591 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1592 
1593 	/* Test Case: Get Log Page to clear the log pages */
1594 	iov.iov_base = &logs[0];
1595 	iov.iov_len = sizeof(logs);
1596 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1597 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1598 
1599 	cleanup_pending_async_events(&ctrlr);
1600 }
1601 
1602 static void
1603 test_get_dif_ctx(void)
1604 {
1605 	struct spdk_nvmf_subsystem subsystem = {};
1606 	struct spdk_nvmf_request req = {};
1607 	struct spdk_nvmf_qpair qpair = {};
1608 	struct spdk_nvmf_ctrlr ctrlr = {};
1609 	struct spdk_nvmf_ns ns = {};
1610 	struct spdk_nvmf_ns *_ns = NULL;
1611 	struct spdk_bdev bdev = {};
1612 	union nvmf_h2c_msg cmd = {};
1613 	struct spdk_dif_ctx dif_ctx = {};
1614 	bool ret;
1615 
1616 	ctrlr.subsys = &subsystem;
1617 
1618 	qpair.ctrlr = &ctrlr;
1619 
1620 	req.qpair = &qpair;
1621 	req.cmd = &cmd;
1622 
1623 	ns.bdev = &bdev;
1624 
1625 	ctrlr.dif_insert_or_strip = false;
1626 
1627 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1628 	CU_ASSERT(ret == false);
1629 
1630 	ctrlr.dif_insert_or_strip = true;
1631 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1632 
1633 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1634 	CU_ASSERT(ret == false);
1635 
1636 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1637 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1638 
1639 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1640 	CU_ASSERT(ret == false);
1641 
1642 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1643 
1644 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1645 	CU_ASSERT(ret == false);
1646 
1647 	qpair.qid = 1;
1648 
1649 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1650 	CU_ASSERT(ret == false);
1651 
1652 	cmd.nvme_cmd.nsid = 1;
1653 
1654 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1655 	CU_ASSERT(ret == false);
1656 
1657 	subsystem.max_nsid = 1;
1658 	subsystem.ns = &_ns;
1659 	subsystem.ns[0] = &ns;
1660 
1661 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1662 	CU_ASSERT(ret == false);
1663 
1664 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1665 
1666 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1667 	CU_ASSERT(ret == true);
1668 }
1669 
1670 static void
1671 test_identify_ctrlr(void)
1672 {
1673 	struct spdk_nvmf_tgt tgt = {};
1674 	struct spdk_nvmf_subsystem subsystem = {
1675 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1676 		.tgt = &tgt,
1677 	};
1678 	struct spdk_nvmf_transport_ops tops = {};
1679 	struct spdk_nvmf_transport transport = {
1680 		.ops = &tops,
1681 		.opts = {
1682 			.in_capsule_data_size = 4096,
1683 		},
1684 	};
1685 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1686 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1687 	struct spdk_nvme_ctrlr_data cdata = {};
1688 	uint32_t expected_ioccsz;
1689 
1690 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1691 
1692 	/* Check ioccsz, TCP transport */
1693 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1694 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1695 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1696 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1697 
1698 	/* Check ioccsz, RDMA transport */
1699 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1700 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1701 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1702 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1703 
1704 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1705 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1706 	ctrlr.dif_insert_or_strip = true;
1707 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1708 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1709 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1710 }
1711 
1712 static void
1713 test_identify_ctrlr_iocs_specific(void)
1714 {
1715 	struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 };
1716 	struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 };
1717 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop };
1718 	struct spdk_nvme_cmd cmd = {};
1719 	struct spdk_nvme_cpl rsp = {};
1720 	struct spdk_nvme_zns_ctrlr_data ctrlr_data = {};
1721 	struct spdk_nvme_nvm_ctrlr_data cdata_nvm = {};
1722 
1723 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1724 
1725 	/* ZNS max_zone_append_size_kib no limit */
1726 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1727 	memset(&rsp, 0, sizeof(rsp));
1728 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1729 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1730 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1731 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1732 	CU_ASSERT(ctrlr_data.zasl == 0);
1733 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1734 
1735 	/* ZNS max_zone_append_size_kib = 4096 */
1736 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1737 	memset(&rsp, 0, sizeof(rsp));
1738 	subsystem.max_zone_append_size_kib = 4096;
1739 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1740 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1741 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1742 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1743 	CU_ASSERT(ctrlr_data.zasl == 0);
1744 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1745 
1746 	/* ZNS max_zone_append_size_kib = 60000 */
1747 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1748 	memset(&rsp, 0, sizeof(rsp));
1749 	subsystem.max_zone_append_size_kib = 60000;
1750 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1751 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1752 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1753 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1754 	CU_ASSERT(ctrlr_data.zasl == 3);
1755 	ctrlr_data.zasl = 0;
1756 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1757 
1758 	/* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */
1759 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1760 	memset(&rsp, 0, sizeof(rsp));
1761 	ctrlr.vcprop.cap.bits.mpsmin = 2;
1762 	subsystem.max_zone_append_size_kib = 60000;
1763 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1764 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1765 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1766 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1767 	CU_ASSERT(ctrlr_data.zasl == 1);
1768 	ctrlr_data.zasl = 0;
1769 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1770 	ctrlr.vcprop.cap.bits.mpsmin = 0;
1771 
1772 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1773 
1774 	/* NVM max_discard_size_kib = 1024;
1775 	 * max_write_zeroes_size_kib = 1024;
1776 	 * mpsmin = 0;
1777 	 */
1778 	memset(&cdata_nvm, 0xFF, sizeof(cdata_nvm));
1779 	memset(&rsp, 0, sizeof(rsp));
1780 	subsystem.max_discard_size_kib = (uint64_t)1024;
1781 	subsystem.max_write_zeroes_size_kib = (uint64_t)1024;
1782 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1783 			&cdata_nvm, sizeof(cdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1784 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1785 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1786 	CU_ASSERT(cdata_nvm.wzsl == 8);
1787 	CU_ASSERT(cdata_nvm.dmrsl == 2048);
1788 	CU_ASSERT(cdata_nvm.dmrl == 1);
1789 }
1790 
1791 static int
1792 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1793 {
1794 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1795 
1796 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1797 };
1798 
1799 static void
1800 test_custom_admin_cmd(void)
1801 {
1802 	struct spdk_nvmf_subsystem subsystem;
1803 	struct spdk_nvmf_qpair qpair;
1804 	struct spdk_nvmf_ctrlr ctrlr;
1805 	struct spdk_nvmf_request req;
1806 	struct spdk_nvmf_ns *ns_ptrs[1];
1807 	struct spdk_nvmf_ns ns;
1808 	union nvmf_h2c_msg cmd;
1809 	union nvmf_c2h_msg rsp;
1810 	struct spdk_bdev bdev;
1811 	uint8_t buf[4096];
1812 	int rc;
1813 
1814 	memset(&subsystem, 0, sizeof(subsystem));
1815 	ns_ptrs[0] = &ns;
1816 	subsystem.ns = ns_ptrs;
1817 	subsystem.max_nsid = 1;
1818 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1819 
1820 	memset(&ns, 0, sizeof(ns));
1821 	ns.opts.nsid = 1;
1822 	ns.bdev = &bdev;
1823 
1824 	memset(&qpair, 0, sizeof(qpair));
1825 	qpair.ctrlr = &ctrlr;
1826 
1827 	memset(&ctrlr, 0, sizeof(ctrlr));
1828 	ctrlr.subsys = &subsystem;
1829 	ctrlr.vcprop.cc.bits.en = 1;
1830 	ctrlr.thread = spdk_get_thread();
1831 
1832 	memset(&req, 0, sizeof(req));
1833 	req.qpair = &qpair;
1834 	req.cmd = &cmd;
1835 	req.rsp = &rsp;
1836 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1837 	req.length = sizeof(buf);
1838 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
1839 
1840 	memset(&cmd, 0, sizeof(cmd));
1841 	cmd.nvme_cmd.opc = 0xc1;
1842 	cmd.nvme_cmd.nsid = 0;
1843 	memset(&rsp, 0, sizeof(rsp));
1844 
1845 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1846 
1847 	/* Ensure that our hdlr is being called */
1848 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1849 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1850 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1851 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1852 }
1853 
1854 static void
1855 test_fused_compare_and_write(void)
1856 {
1857 	struct spdk_nvmf_request req = {};
1858 	struct spdk_nvmf_qpair qpair = {};
1859 	struct spdk_nvme_cmd cmd = {};
1860 	union nvmf_c2h_msg rsp = {};
1861 	struct spdk_nvmf_ctrlr ctrlr = {};
1862 	struct spdk_nvmf_subsystem subsystem = {};
1863 	struct spdk_nvmf_ns ns = {};
1864 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1865 	enum spdk_nvme_ana_state ana_state[1];
1866 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1867 	struct spdk_bdev bdev = {};
1868 
1869 	struct spdk_nvmf_poll_group group = {};
1870 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1871 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1872 	struct spdk_io_channel io_ch = {};
1873 
1874 	ns.bdev = &bdev;
1875 	ns.anagrpid = 1;
1876 
1877 	subsystem.id = 0;
1878 	subsystem.max_nsid = 1;
1879 	subsys_ns[0] = &ns;
1880 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1881 
1882 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1883 
1884 	/* Enable controller */
1885 	ctrlr.vcprop.cc.bits.en = 1;
1886 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1887 	ctrlr.listener = &listener;
1888 
1889 	group.num_sgroups = 1;
1890 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1891 	sgroups.num_ns = 1;
1892 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1893 	ns_info.channel = &io_ch;
1894 	sgroups.ns_info = &ns_info;
1895 	TAILQ_INIT(&sgroups.queued);
1896 	group.sgroups = &sgroups;
1897 	TAILQ_INIT(&qpair.outstanding);
1898 
1899 	qpair.ctrlr = &ctrlr;
1900 	qpair.group = &group;
1901 	qpair.qid = 1;
1902 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1903 
1904 	cmd.nsid = 1;
1905 
1906 	req.qpair = &qpair;
1907 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1908 	req.rsp = &rsp;
1909 
1910 	/* SUCCESS/SUCCESS */
1911 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1912 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1913 
1914 	spdk_nvmf_request_exec(&req);
1915 	CU_ASSERT(qpair.first_fused_req != NULL);
1916 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1917 
1918 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1919 	cmd.opc = SPDK_NVME_OPC_WRITE;
1920 
1921 	spdk_nvmf_request_exec(&req);
1922 	CU_ASSERT(qpair.first_fused_req == NULL);
1923 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1924 
1925 	/* Wrong sequence */
1926 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1927 	cmd.opc = SPDK_NVME_OPC_WRITE;
1928 
1929 	spdk_nvmf_request_exec(&req);
1930 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1931 	CU_ASSERT(qpair.first_fused_req == NULL);
1932 
1933 	/* Write as FUSE_FIRST (Wrong op code) */
1934 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1935 	cmd.opc = SPDK_NVME_OPC_WRITE;
1936 
1937 	spdk_nvmf_request_exec(&req);
1938 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1939 	CU_ASSERT(qpair.first_fused_req == NULL);
1940 
1941 	/* Compare as FUSE_SECOND (Wrong op code) */
1942 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1943 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1944 
1945 	spdk_nvmf_request_exec(&req);
1946 	CU_ASSERT(qpair.first_fused_req != NULL);
1947 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1948 
1949 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1950 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1951 
1952 	spdk_nvmf_request_exec(&req);
1953 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1954 	CU_ASSERT(qpair.first_fused_req == NULL);
1955 }
1956 
1957 static void
1958 test_multi_async_event_reqs(void)
1959 {
1960 	struct spdk_nvmf_subsystem subsystem = {};
1961 	struct spdk_nvmf_qpair qpair = {};
1962 	struct spdk_nvmf_ctrlr ctrlr = {};
1963 	struct spdk_nvmf_request req[5] = {};
1964 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1965 	struct spdk_nvmf_ns ns = {};
1966 	union nvmf_h2c_msg cmd[5] = {};
1967 	union nvmf_c2h_msg rsp[5] = {};
1968 
1969 	struct spdk_nvmf_poll_group group = {};
1970 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1971 
1972 	int i;
1973 
1974 	ns_ptrs[0] = &ns;
1975 	subsystem.ns = ns_ptrs;
1976 	subsystem.max_nsid = 1;
1977 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1978 
1979 	ns.opts.nsid = 1;
1980 	group.sgroups = &sgroups;
1981 
1982 	qpair.ctrlr = &ctrlr;
1983 	qpair.group = &group;
1984 	TAILQ_INIT(&qpair.outstanding);
1985 
1986 	ctrlr.subsys = &subsystem;
1987 	ctrlr.vcprop.cc.bits.en = 1;
1988 	ctrlr.thread = spdk_get_thread();
1989 
1990 	for (i = 0; i < 5; i++) {
1991 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1992 		cmd[i].nvme_cmd.nsid = 1;
1993 		cmd[i].nvme_cmd.cid = i;
1994 
1995 		req[i].qpair = &qpair;
1996 		req[i].cmd = &cmd[i];
1997 		req[i].rsp = &rsp[i];
1998 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1999 	}
2000 
2001 	/* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */
2002 	sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS;
2003 	for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
2004 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2005 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
2006 	}
2007 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2008 
2009 	/* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */
2010 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2011 	CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS);
2012 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
2013 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
2014 
2015 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
2016 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
2017 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2018 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2019 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
2020 
2021 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
2022 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2023 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2024 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
2025 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
2026 
2027 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
2028 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
2029 }
2030 
2031 static void
2032 test_get_ana_log_page_one_ns_per_anagrp(void)
2033 {
2034 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
2035 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
2036 	uint32_t ana_group[3];
2037 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
2038 	struct spdk_nvmf_ctrlr ctrlr = {};
2039 	enum spdk_nvme_ana_state ana_state[3];
2040 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2041 	struct spdk_nvmf_ns ns[3];
2042 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
2043 	uint64_t offset;
2044 	uint32_t length;
2045 	int i;
2046 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2047 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2048 	struct iovec iov, iovs[2];
2049 	struct spdk_nvme_ana_page *ana_hdr;
2050 	char _ana_desc[UT_ANA_DESC_SIZE];
2051 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2052 
2053 	subsystem.ns = ns_arr;
2054 	subsystem.max_nsid = 3;
2055 	for (i = 0; i < 3; i++) {
2056 		subsystem.ana_group[i] = 1;
2057 	}
2058 	ctrlr.subsys = &subsystem;
2059 	ctrlr.listener = &listener;
2060 
2061 	for (i = 0; i < 3; i++) {
2062 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2063 	}
2064 
2065 	for (i = 0; i < 3; i++) {
2066 		ns_arr[i]->nsid = i + 1;
2067 		ns_arr[i]->anagrpid = i + 1;
2068 	}
2069 
2070 	/* create expected page */
2071 	ana_hdr = (void *)&expected_page[0];
2072 	ana_hdr->num_ana_group_desc = 3;
2073 	ana_hdr->change_count = 0;
2074 
2075 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2076 	ana_desc = (void *)_ana_desc;
2077 	offset = sizeof(struct spdk_nvme_ana_page);
2078 
2079 	for (i = 0; i < 3; i++) {
2080 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
2081 		ana_desc->ana_group_id = ns_arr[i]->nsid;
2082 		ana_desc->num_of_nsid = 1;
2083 		ana_desc->change_count = 0;
2084 		ana_desc->ana_state = ctrlr.listener->ana_state[i];
2085 		ana_desc->nsid[0] = ns_arr[i]->nsid;
2086 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
2087 		offset += UT_ANA_DESC_SIZE;
2088 	}
2089 
2090 	/* read entire actual log page */
2091 	offset = 0;
2092 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2093 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2094 		iov.iov_base = &actual_page[offset];
2095 		iov.iov_len = length;
2096 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2097 		offset += length;
2098 	}
2099 
2100 	/* compare expected page and actual page */
2101 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2102 
2103 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2104 	offset = 0;
2105 	iovs[0].iov_base = &actual_page[offset];
2106 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2107 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2108 	iovs[1].iov_base = &actual_page[offset];
2109 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
2110 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2111 
2112 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2113 
2114 #undef UT_ANA_DESC_SIZE
2115 #undef UT_ANA_LOG_PAGE_SIZE
2116 }
2117 
2118 static void
2119 test_get_ana_log_page_multi_ns_per_anagrp(void)
2120 {
2121 #define UT_ANA_LOG_PAGE_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
2122 				 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2123 				 sizeof(uint32_t) * 5)
2124 	struct spdk_nvmf_ns ns[5];
2125 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
2126 	uint32_t ana_group[5] = {0};
2127 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
2128 	enum spdk_nvme_ana_state ana_state[5];
2129 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
2130 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
2131 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2132 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2133 	struct iovec iov, iovs[2];
2134 	struct spdk_nvme_ana_page *ana_hdr;
2135 	char _ana_desc[UT_ANA_LOG_PAGE_SIZE];
2136 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2137 	uint64_t offset;
2138 	uint32_t length;
2139 	int i;
2140 
2141 	subsystem.max_nsid = 5;
2142 	subsystem.ana_group[1] = 3;
2143 	subsystem.ana_group[2] = 2;
2144 	for (i = 0; i < 5; i++) {
2145 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2146 	}
2147 
2148 	for (i = 0; i < 5; i++) {
2149 		ns_arr[i]->nsid = i + 1;
2150 	}
2151 	ns_arr[0]->anagrpid = 2;
2152 	ns_arr[1]->anagrpid = 3;
2153 	ns_arr[2]->anagrpid = 2;
2154 	ns_arr[3]->anagrpid = 3;
2155 	ns_arr[4]->anagrpid = 2;
2156 
2157 	/* create expected page */
2158 	ana_hdr = (void *)&expected_page[0];
2159 	ana_hdr->num_ana_group_desc = 2;
2160 	ana_hdr->change_count = 0;
2161 
2162 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2163 	ana_desc = (void *)_ana_desc;
2164 	offset = sizeof(struct spdk_nvme_ana_page);
2165 
2166 	memset(_ana_desc, 0, sizeof(_ana_desc));
2167 	ana_desc->ana_group_id = 2;
2168 	ana_desc->num_of_nsid = 3;
2169 	ana_desc->change_count = 0;
2170 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2171 	ana_desc->nsid[0] = 1;
2172 	ana_desc->nsid[1] = 3;
2173 	ana_desc->nsid[2] = 5;
2174 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2175 	       sizeof(uint32_t) * 3);
2176 	offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3;
2177 
2178 	memset(_ana_desc, 0, sizeof(_ana_desc));
2179 	ana_desc->ana_group_id = 3;
2180 	ana_desc->num_of_nsid = 2;
2181 	ana_desc->change_count = 0;
2182 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2183 	ana_desc->nsid[0] = 2;
2184 	ana_desc->nsid[1] = 4;
2185 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2186 	       sizeof(uint32_t) * 2);
2187 
2188 	/* read entire actual log page, and compare expected page and actual page. */
2189 	offset = 0;
2190 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2191 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2192 		iov.iov_base = &actual_page[offset];
2193 		iov.iov_len = length;
2194 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2195 		offset += length;
2196 	}
2197 
2198 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2199 
2200 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2201 	offset = 0;
2202 	iovs[0].iov_base = &actual_page[offset];
2203 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2204 	offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2205 	iovs[1].iov_base = &actual_page[offset];
2206 	iovs[1].iov_len = sizeof(uint32_t) * 5;
2207 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2208 
2209 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2210 
2211 #undef UT_ANA_LOG_PAGE_SIZE
2212 }
2213 static void
2214 test_multi_async_events(void)
2215 {
2216 	struct spdk_nvmf_subsystem subsystem = {};
2217 	struct spdk_nvmf_qpair qpair = {};
2218 	struct spdk_nvmf_ctrlr ctrlr = {};
2219 	struct spdk_nvmf_request req[4] = {};
2220 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2221 	struct spdk_nvmf_ns ns = {};
2222 	union nvmf_h2c_msg cmd[4] = {};
2223 	union nvmf_c2h_msg rsp[4] = {};
2224 	union spdk_nvme_async_event_completion event = {};
2225 	struct spdk_nvmf_poll_group group = {};
2226 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2227 	int i;
2228 
2229 	ns_ptrs[0] = &ns;
2230 	subsystem.ns = ns_ptrs;
2231 	subsystem.max_nsid = 1;
2232 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2233 
2234 	ns.opts.nsid = 1;
2235 	group.sgroups = &sgroups;
2236 
2237 	qpair.ctrlr = &ctrlr;
2238 	qpair.group = &group;
2239 	TAILQ_INIT(&qpair.outstanding);
2240 
2241 	ctrlr.subsys = &subsystem;
2242 	ctrlr.vcprop.cc.bits.en = 1;
2243 	ctrlr.thread = spdk_get_thread();
2244 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2245 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2246 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2247 	init_pending_async_events(&ctrlr);
2248 
2249 	/* Target queue pending events when there is no outstanding AER request */
2250 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2251 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2252 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2253 
2254 	for (i = 0; i < 4; i++) {
2255 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2256 		cmd[i].nvme_cmd.nsid = 1;
2257 		cmd[i].nvme_cmd.cid = i;
2258 
2259 		req[i].qpair = &qpair;
2260 		req[i].cmd = &cmd[i];
2261 		req[i].rsp = &rsp[i];
2262 
2263 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2264 
2265 		sgroups.mgmt_io_outstanding = 1;
2266 		if (i < 3) {
2267 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2268 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2269 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2270 		} else {
2271 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2272 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2273 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2274 		}
2275 	}
2276 
2277 	event.raw = rsp[0].nvme_cpl.cdw0;
2278 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2279 	event.raw = rsp[1].nvme_cpl.cdw0;
2280 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2281 	event.raw = rsp[2].nvme_cpl.cdw0;
2282 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2283 
2284 	cleanup_pending_async_events(&ctrlr);
2285 }
2286 
2287 static void
2288 test_rae(void)
2289 {
2290 	struct spdk_nvmf_subsystem subsystem = {};
2291 	struct spdk_nvmf_qpair qpair = {};
2292 	struct spdk_nvmf_ctrlr ctrlr = {};
2293 	struct spdk_nvmf_request req[3] = {};
2294 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2295 	struct spdk_nvmf_ns ns = {};
2296 	union nvmf_h2c_msg cmd[3] = {};
2297 	union nvmf_c2h_msg rsp[3] = {};
2298 	union spdk_nvme_async_event_completion event = {};
2299 	struct spdk_nvmf_poll_group group = {};
2300 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2301 	int i;
2302 	char data[4096];
2303 
2304 	ns_ptrs[0] = &ns;
2305 	subsystem.ns = ns_ptrs;
2306 	subsystem.max_nsid = 1;
2307 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2308 
2309 	ns.opts.nsid = 1;
2310 	group.sgroups = &sgroups;
2311 
2312 	qpair.ctrlr = &ctrlr;
2313 	qpair.group = &group;
2314 	TAILQ_INIT(&qpair.outstanding);
2315 
2316 	ctrlr.subsys = &subsystem;
2317 	ctrlr.vcprop.cc.bits.en = 1;
2318 	ctrlr.thread = spdk_get_thread();
2319 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2320 	init_pending_async_events(&ctrlr);
2321 
2322 	/* Target queue pending events when there is no outstanding AER request */
2323 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2324 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2325 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2326 	/* only one event will be queued before RAE is clear */
2327 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2328 
2329 	req[0].qpair = &qpair;
2330 	req[0].cmd = &cmd[0];
2331 	req[0].rsp = &rsp[0];
2332 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2333 	cmd[0].nvme_cmd.nsid = 1;
2334 	cmd[0].nvme_cmd.cid = 0;
2335 
2336 	for (i = 1; i < 3; i++) {
2337 		req[i].qpair = &qpair;
2338 		req[i].cmd = &cmd[i];
2339 		req[i].rsp = &rsp[i];
2340 		req[i].length = sizeof(data);
2341 		SPDK_IOV_ONE(req[i].iov, &req[i].iovcnt, &data, req[i].length);
2342 
2343 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2344 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2345 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2346 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2347 			spdk_nvme_bytes_to_numd(req[i].length);
2348 		cmd[i].nvme_cmd.cid = i;
2349 	}
2350 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2351 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2352 
2353 	/* consume the pending event */
2354 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2355 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2356 	event.raw = rsp[0].nvme_cpl.cdw0;
2357 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2358 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2359 
2360 	/* get log with RAE set */
2361 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2362 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2363 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2364 
2365 	/* will not generate new event until RAE is clear */
2366 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2367 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2368 
2369 	/* get log with RAE clear */
2370 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2371 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2372 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2373 
2374 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2375 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2376 
2377 	cleanup_pending_async_events(&ctrlr);
2378 }
2379 
2380 static void
2381 test_nvmf_ctrlr_create_destruct(void)
2382 {
2383 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2384 	struct spdk_nvmf_poll_group group = {};
2385 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2386 	struct spdk_nvmf_transport transport = {};
2387 	struct spdk_nvmf_transport_ops tops = {};
2388 	struct spdk_nvmf_subsystem subsystem = {};
2389 	struct spdk_nvmf_request req = {};
2390 	struct spdk_nvmf_qpair qpair = {};
2391 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2392 	struct spdk_nvmf_tgt tgt = {};
2393 	union nvmf_h2c_msg cmd = {};
2394 	union nvmf_c2h_msg rsp = {};
2395 	const uint8_t hostid[16] = {
2396 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2397 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2398 	};
2399 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2400 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2401 
2402 	group.thread = spdk_get_thread();
2403 	transport.ops = &tops;
2404 	transport.opts.max_aq_depth = 32;
2405 	transport.opts.max_queue_depth = 64;
2406 	transport.opts.max_qpairs_per_ctrlr = 3;
2407 	transport.opts.dif_insert_or_strip = true;
2408 	transport.tgt = &tgt;
2409 	qpair.transport = &transport;
2410 	qpair.group = &group;
2411 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2412 	TAILQ_INIT(&qpair.outstanding);
2413 
2414 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2415 	connect_data.cntlid = 0xFFFF;
2416 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2417 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2418 
2419 	subsystem.thread = spdk_get_thread();
2420 	subsystem.id = 1;
2421 	TAILQ_INIT(&subsystem.ctrlrs);
2422 	subsystem.tgt = &tgt;
2423 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2424 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2425 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2426 
2427 	group.sgroups = sgroups;
2428 
2429 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2430 	cmd.connect_cmd.cid = 1;
2431 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2432 	cmd.connect_cmd.recfmt = 0;
2433 	cmd.connect_cmd.qid = 0;
2434 	cmd.connect_cmd.sqsize = 31;
2435 	cmd.connect_cmd.cattr = 0;
2436 	cmd.connect_cmd.kato = 120000;
2437 
2438 	req.qpair = &qpair;
2439 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2440 	req.length = sizeof(connect_data);
2441 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
2442 	req.cmd = &cmd;
2443 	req.rsp = &rsp;
2444 
2445 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2446 	sgroups[subsystem.id].mgmt_io_outstanding++;
2447 
2448 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base);
2449 	poll_threads();
2450 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2451 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2452 	CU_ASSERT(ctrlr->subsys == &subsystem);
2453 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2454 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2455 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2456 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2457 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2458 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2459 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2460 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2461 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2462 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2463 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2464 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2465 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2466 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2467 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2468 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2469 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2470 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2471 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2472 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2473 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2474 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2475 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2476 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2477 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2478 
2479 	ctrlr->in_destruct = true;
2480 	nvmf_ctrlr_destruct(ctrlr);
2481 	poll_threads();
2482 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2483 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2484 }
2485 
2486 static void
2487 test_nvmf_ctrlr_use_zcopy(void)
2488 {
2489 	struct spdk_nvmf_subsystem subsystem = {};
2490 	struct spdk_nvmf_transport transport = {};
2491 	struct spdk_nvmf_request req = {};
2492 	struct spdk_nvmf_qpair qpair = {};
2493 	struct spdk_nvmf_ctrlr ctrlr = {};
2494 	union nvmf_h2c_msg cmd = {};
2495 	struct spdk_nvmf_ns ns = {};
2496 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2497 	struct spdk_bdev bdev = {};
2498 	struct spdk_nvmf_poll_group group = {};
2499 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2500 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2501 	struct spdk_io_channel io_ch = {};
2502 	int opc;
2503 
2504 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2505 	ns.bdev = &bdev;
2506 
2507 	subsystem.id = 0;
2508 	subsystem.max_nsid = 1;
2509 	subsys_ns[0] = &ns;
2510 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2511 
2512 	ctrlr.subsys = &subsystem;
2513 
2514 	transport.opts.zcopy = true;
2515 
2516 	qpair.ctrlr = &ctrlr;
2517 	qpair.group = &group;
2518 	qpair.qid = 1;
2519 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2520 	qpair.transport = &transport;
2521 
2522 	group.thread = spdk_get_thread();
2523 	group.num_sgroups = 1;
2524 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2525 	sgroups.num_ns = 1;
2526 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2527 	ns_info.channel = &io_ch;
2528 	sgroups.ns_info = &ns_info;
2529 	TAILQ_INIT(&sgroups.queued);
2530 	group.sgroups = &sgroups;
2531 	TAILQ_INIT(&qpair.outstanding);
2532 
2533 	req.qpair = &qpair;
2534 	req.cmd = &cmd;
2535 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2536 
2537 	/* Admin queue */
2538 	qpair.qid = 0;
2539 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2540 	qpair.qid = 1;
2541 
2542 	/* Invalid Opcodes */
2543 	for (opc = 0; opc <= 255; opc++) {
2544 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2545 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2546 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2547 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2548 		}
2549 	}
2550 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2551 
2552 	/* Fused WRITE */
2553 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2554 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2555 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2556 
2557 	/* Non bdev */
2558 	cmd.nvme_cmd.nsid = 4;
2559 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2560 	cmd.nvme_cmd.nsid = 1;
2561 
2562 	/* ZCOPY Not supported */
2563 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2564 	ns.zcopy = true;
2565 
2566 	/* ZCOPY disabled on transport level */
2567 	transport.opts.zcopy = false;
2568 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2569 	transport.opts.zcopy = true;
2570 
2571 	/* Success */
2572 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2573 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2574 }
2575 
2576 static void
2577 qpair_state_change_done(void *cb_arg, int status)
2578 {
2579 }
2580 
2581 static void
2582 test_spdk_nvmf_request_zcopy_start(void)
2583 {
2584 	struct spdk_nvmf_request req = {};
2585 	struct spdk_nvmf_qpair qpair = {};
2586 	struct spdk_nvmf_transport transport = {};
2587 	struct spdk_nvme_cmd cmd = {};
2588 	union nvmf_c2h_msg rsp = {};
2589 	struct spdk_nvmf_ctrlr ctrlr = {};
2590 	struct spdk_nvmf_subsystem subsystem = {};
2591 	struct spdk_nvmf_ns ns = {};
2592 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2593 	enum spdk_nvme_ana_state ana_state[1];
2594 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2595 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2596 
2597 	struct spdk_nvmf_poll_group group = {};
2598 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2599 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2600 	struct spdk_io_channel io_ch = {};
2601 
2602 	ns.bdev = &bdev;
2603 	ns.zcopy = true;
2604 	ns.anagrpid = 1;
2605 
2606 	subsystem.id = 0;
2607 	subsystem.max_nsid = 1;
2608 	subsys_ns[0] = &ns;
2609 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2610 
2611 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2612 
2613 	/* Enable controller */
2614 	ctrlr.vcprop.cc.bits.en = 1;
2615 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2616 	ctrlr.listener = &listener;
2617 
2618 	transport.opts.zcopy = true;
2619 
2620 	group.thread = spdk_get_thread();
2621 	group.num_sgroups = 1;
2622 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2623 	sgroups.num_ns = 1;
2624 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2625 	ns_info.channel = &io_ch;
2626 	sgroups.ns_info = &ns_info;
2627 	TAILQ_INIT(&sgroups.queued);
2628 	group.sgroups = &sgroups;
2629 	TAILQ_INIT(&qpair.outstanding);
2630 
2631 	qpair.ctrlr = &ctrlr;
2632 	qpair.group = &group;
2633 	qpair.transport = &transport;
2634 	qpair.qid = 1;
2635 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2636 
2637 	cmd.nsid = 1;
2638 
2639 	req.qpair = &qpair;
2640 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2641 	req.rsp = &rsp;
2642 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2643 	cmd.opc = SPDK_NVME_OPC_READ;
2644 
2645 	/* Fail because no controller */
2646 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2647 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2648 	qpair.ctrlr = NULL;
2649 	spdk_nvmf_request_zcopy_start(&req);
2650 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2651 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2652 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2653 	qpair.ctrlr = &ctrlr;
2654 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2655 
2656 	/* Fail because bad NSID */
2657 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2658 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2659 	cmd.nsid = 0;
2660 	spdk_nvmf_request_zcopy_start(&req);
2661 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2662 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2663 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2664 	cmd.nsid = 1;
2665 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2666 
2667 	/* Fail because bad Channel */
2668 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2669 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2670 	ns_info.channel = NULL;
2671 	spdk_nvmf_request_zcopy_start(&req);
2672 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2673 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2674 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2675 	ns_info.channel = &io_ch;
2676 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2677 
2678 	/* Queue the requet because NSID is not active */
2679 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2680 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2681 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2682 	spdk_nvmf_request_zcopy_start(&req);
2683 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2684 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2685 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2686 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2687 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2688 
2689 	/* Fail because QPair is not active */
2690 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2691 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2692 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2693 	qpair.state_cb = qpair_state_change_done;
2694 	spdk_nvmf_request_zcopy_start(&req);
2695 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2696 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2697 	qpair.state_cb = NULL;
2698 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2699 
2700 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2701 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2702 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2703 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2704 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2705 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2706 	spdk_nvmf_request_zcopy_start(&req);
2707 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2708 	cmd.cdw10 = 0;
2709 	cmd.cdw12 = 0;
2710 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2711 
2712 	/* Success */
2713 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2714 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2715 	spdk_nvmf_request_zcopy_start(&req);
2716 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2717 }
2718 
2719 static void
2720 test_zcopy_read(void)
2721 {
2722 	struct spdk_nvmf_request req = {};
2723 	struct spdk_nvmf_qpair qpair = {};
2724 	struct spdk_nvmf_transport transport = {};
2725 	struct spdk_nvme_cmd cmd = {};
2726 	union nvmf_c2h_msg rsp = {};
2727 	struct spdk_nvmf_ctrlr ctrlr = {};
2728 	struct spdk_nvmf_subsystem subsystem = {};
2729 	struct spdk_nvmf_ns ns = {};
2730 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2731 	enum spdk_nvme_ana_state ana_state[1];
2732 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2733 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2734 
2735 	struct spdk_nvmf_poll_group group = {};
2736 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2737 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2738 	struct spdk_io_channel io_ch = {};
2739 
2740 	ns.bdev = &bdev;
2741 	ns.zcopy = true;
2742 	ns.anagrpid = 1;
2743 
2744 	subsystem.id = 0;
2745 	subsystem.max_nsid = 1;
2746 	subsys_ns[0] = &ns;
2747 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2748 
2749 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2750 
2751 	/* Enable controller */
2752 	ctrlr.vcprop.cc.bits.en = 1;
2753 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2754 	ctrlr.listener = &listener;
2755 
2756 	transport.opts.zcopy = true;
2757 
2758 	group.thread = spdk_get_thread();
2759 	group.num_sgroups = 1;
2760 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2761 	sgroups.num_ns = 1;
2762 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2763 	ns_info.channel = &io_ch;
2764 	sgroups.ns_info = &ns_info;
2765 	TAILQ_INIT(&sgroups.queued);
2766 	group.sgroups = &sgroups;
2767 	TAILQ_INIT(&qpair.outstanding);
2768 
2769 	qpair.ctrlr = &ctrlr;
2770 	qpair.group = &group;
2771 	qpair.transport = &transport;
2772 	qpair.qid = 1;
2773 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2774 
2775 	cmd.nsid = 1;
2776 
2777 	req.qpair = &qpair;
2778 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2779 	req.rsp = &rsp;
2780 	cmd.opc = SPDK_NVME_OPC_READ;
2781 
2782 	/* Prepare for zcopy */
2783 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2784 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2785 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2786 	CU_ASSERT(ns_info.io_outstanding == 0);
2787 
2788 	/* Perform the zcopy start */
2789 	spdk_nvmf_request_zcopy_start(&req);
2790 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2791 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2792 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2793 	CU_ASSERT(ns_info.io_outstanding == 1);
2794 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2795 
2796 	/* Perform the zcopy end */
2797 	spdk_nvmf_request_zcopy_end(&req, false);
2798 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2799 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2800 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2801 	CU_ASSERT(ns_info.io_outstanding == 0);
2802 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2803 }
2804 
2805 static void
2806 test_zcopy_write(void)
2807 {
2808 	struct spdk_nvmf_request req = {};
2809 	struct spdk_nvmf_qpair qpair = {};
2810 	struct spdk_nvmf_transport transport = {};
2811 	struct spdk_nvme_cmd cmd = {};
2812 	union nvmf_c2h_msg rsp = {};
2813 	struct spdk_nvmf_ctrlr ctrlr = {};
2814 	struct spdk_nvmf_subsystem subsystem = {};
2815 	struct spdk_nvmf_ns ns = {};
2816 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2817 	enum spdk_nvme_ana_state ana_state[1];
2818 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2819 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2820 
2821 	struct spdk_nvmf_poll_group group = {};
2822 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2823 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2824 	struct spdk_io_channel io_ch = {};
2825 
2826 	ns.bdev = &bdev;
2827 	ns.zcopy = true;
2828 	ns.anagrpid = 1;
2829 
2830 	subsystem.id = 0;
2831 	subsystem.max_nsid = 1;
2832 	subsys_ns[0] = &ns;
2833 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2834 
2835 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2836 
2837 	/* Enable controller */
2838 	ctrlr.vcprop.cc.bits.en = 1;
2839 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2840 	ctrlr.listener = &listener;
2841 
2842 	transport.opts.zcopy = true;
2843 
2844 	group.thread = spdk_get_thread();
2845 	group.num_sgroups = 1;
2846 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2847 	sgroups.num_ns = 1;
2848 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2849 	ns_info.channel = &io_ch;
2850 	sgroups.ns_info = &ns_info;
2851 	TAILQ_INIT(&sgroups.queued);
2852 	group.sgroups = &sgroups;
2853 	TAILQ_INIT(&qpair.outstanding);
2854 
2855 	qpair.ctrlr = &ctrlr;
2856 	qpair.group = &group;
2857 	qpair.transport = &transport;
2858 	qpair.qid = 1;
2859 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2860 
2861 	cmd.nsid = 1;
2862 
2863 	req.qpair = &qpair;
2864 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2865 	req.rsp = &rsp;
2866 	cmd.opc = SPDK_NVME_OPC_WRITE;
2867 
2868 	/* Prepare for zcopy */
2869 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2870 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2871 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2872 	CU_ASSERT(ns_info.io_outstanding == 0);
2873 
2874 	/* Perform the zcopy start */
2875 	spdk_nvmf_request_zcopy_start(&req);
2876 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2877 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2878 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2879 	CU_ASSERT(ns_info.io_outstanding == 1);
2880 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2881 
2882 	/* Perform the zcopy end */
2883 	spdk_nvmf_request_zcopy_end(&req, true);
2884 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2885 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2886 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2887 	CU_ASSERT(ns_info.io_outstanding == 0);
2888 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2889 }
2890 
2891 static void
2892 test_nvmf_property_set(void)
2893 {
2894 	int rc;
2895 	struct spdk_nvmf_request req = {};
2896 	struct spdk_nvmf_qpair qpair = {};
2897 	struct spdk_nvmf_ctrlr ctrlr = {};
2898 	union nvmf_h2c_msg cmd = {};
2899 	union nvmf_c2h_msg rsp = {};
2900 
2901 	req.qpair = &qpair;
2902 	qpair.ctrlr = &ctrlr;
2903 	req.cmd = &cmd;
2904 	req.rsp = &rsp;
2905 
2906 	/* Invalid parameters */
2907 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2908 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2909 
2910 	rc = nvmf_property_set(&req);
2911 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2912 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2913 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2914 
2915 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2916 
2917 	rc = nvmf_property_get(&req);
2918 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2919 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2920 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2921 
2922 	/* Set cc with same property size */
2923 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2924 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2925 
2926 	rc = nvmf_property_set(&req);
2927 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2928 
2929 	/* Emulate cc data */
2930 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2931 
2932 	rc = nvmf_property_get(&req);
2933 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2934 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2935 
2936 	/* Set asq with different property size */
2937 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2938 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2939 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2940 
2941 	rc = nvmf_property_set(&req);
2942 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2943 
2944 	/* Emulate asq data */
2945 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2946 
2947 	rc = nvmf_property_get(&req);
2948 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2949 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2950 }
2951 
2952 static void
2953 test_nvmf_ctrlr_get_features_host_behavior_support(void)
2954 {
2955 	int rc;
2956 	struct spdk_nvmf_request req = {};
2957 	struct spdk_nvmf_qpair qpair = {};
2958 	struct spdk_nvmf_ctrlr ctrlr = {};
2959 	struct spdk_nvme_host_behavior behavior = {};
2960 	union nvmf_h2c_msg cmd = {};
2961 	union nvmf_c2h_msg rsp = {};
2962 
2963 	qpair.ctrlr = &ctrlr;
2964 	req.qpair = &qpair;
2965 	req.cmd = &cmd;
2966 	req.rsp = &rsp;
2967 
2968 	/* Invalid data */
2969 	req.length = sizeof(struct spdk_nvme_host_behavior);
2970 	req.iovcnt = 0;
2971 
2972 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2973 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2974 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2975 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2976 
2977 	/* Wrong structure length */
2978 	req.length = sizeof(struct spdk_nvme_host_behavior) - 1;
2979 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
2980 
2981 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2982 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2983 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2984 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2985 
2986 	/* Get Features Host Behavior Support Success */
2987 	req.length = sizeof(struct spdk_nvme_host_behavior);
2988 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
2989 
2990 	ctrlr.acre_enabled = true;
2991 	behavior.acre = false;
2992 
2993 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2994 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2995 	CU_ASSERT(behavior.acre == true);
2996 }
2997 
2998 static void
2999 test_nvmf_ctrlr_set_features_host_behavior_support(void)
3000 {
3001 	int rc;
3002 	struct spdk_nvmf_request req = {};
3003 	struct spdk_nvmf_qpair qpair = {};
3004 	struct spdk_nvmf_ctrlr ctrlr = {};
3005 	struct spdk_nvme_host_behavior host_behavior = {};
3006 	union nvmf_h2c_msg cmd = {};
3007 	union nvmf_c2h_msg rsp = {};
3008 
3009 	qpair.ctrlr = &ctrlr;
3010 	req.qpair = &qpair;
3011 	req.cmd = &cmd;
3012 	req.rsp = &rsp;
3013 	req.iov[0].iov_base = &host_behavior;
3014 	req.iov[0].iov_len = sizeof(host_behavior);
3015 
3016 	/* Invalid iovcnt */
3017 	req.iovcnt = 0;
3018 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3019 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3020 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3021 
3022 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3023 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3024 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3025 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3026 
3027 	/* Invalid iov_len */
3028 	req.iovcnt = 1;
3029 	req.iov[0].iov_len = 0;
3030 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3031 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3032 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3033 
3034 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3035 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3036 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3037 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3038 
3039 	/* acre is false */
3040 	host_behavior.acre = 0;
3041 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3042 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3043 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3044 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3045 
3046 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3047 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3048 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3049 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3050 	CU_ASSERT(ctrlr.acre_enabled == false);
3051 
3052 	/* acre is true */
3053 	host_behavior.acre = 1;
3054 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3055 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3056 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3057 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3058 
3059 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3060 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3061 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3062 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3063 	CU_ASSERT(ctrlr.acre_enabled == true);
3064 
3065 	/* Invalid acre */
3066 	host_behavior.acre = 2;
3067 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3068 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3069 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3070 
3071 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3072 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3073 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3074 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3075 }
3076 
3077 int
3078 main(int argc, char **argv)
3079 {
3080 	CU_pSuite	suite = NULL;
3081 	unsigned int	num_failures;
3082 
3083 	CU_initialize_registry();
3084 
3085 	suite = CU_add_suite("nvmf", NULL, NULL);
3086 	CU_ADD_TEST(suite, test_get_log_page);
3087 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
3088 	CU_ADD_TEST(suite, test_connect);
3089 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
3090 	CU_ADD_TEST(suite, test_identify_ns);
3091 	CU_ADD_TEST(suite, test_identify_ns_iocs_specific);
3092 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
3093 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
3094 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
3095 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
3096 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
3097 	CU_ADD_TEST(suite, test_get_dif_ctx);
3098 	CU_ADD_TEST(suite, test_set_get_features);
3099 	CU_ADD_TEST(suite, test_identify_ctrlr);
3100 	CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific);
3101 	CU_ADD_TEST(suite, test_custom_admin_cmd);
3102 	CU_ADD_TEST(suite, test_fused_compare_and_write);
3103 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
3104 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
3105 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
3106 	CU_ADD_TEST(suite, test_multi_async_events);
3107 	CU_ADD_TEST(suite, test_rae);
3108 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
3109 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
3110 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
3111 	CU_ADD_TEST(suite, test_zcopy_read);
3112 	CU_ADD_TEST(suite, test_zcopy_write);
3113 	CU_ADD_TEST(suite, test_nvmf_property_set);
3114 	CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support);
3115 	CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support);
3116 
3117 	allocate_threads(1);
3118 	set_thread(0);
3119 
3120 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3121 	CU_cleanup_registry();
3122 
3123 	free_threads();
3124 
3125 	return num_failures;
3126 }
3127