xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision dbbc8e1decd1e0fff27bc916535173fc8505a1a8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/bdev_zone.h"
8 #include "spdk/nvme_spec.h"
9 #include "spdk/stdinc.h"
10 
11 #include "spdk_internal/cunit.h"
12 #include "spdk_internal/mock.h"
13 #include "thread/thread_internal.h"
14 
15 #include "common/lib/ut_multithread.c"
16 #include "nvmf/ctrlr.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 struct spdk_bdev {
21 	int ut_mock;
22 	uint64_t blockcnt;
23 	uint32_t blocklen;
24 	bool zoned;
25 	uint32_t zone_size;
26 	uint32_t max_open_zones;
27 	uint32_t max_active_zones;
28 };
29 
30 #define MAX_OPEN_ZONES 12
31 #define MAX_ACTIVE_ZONES 34
32 #define ZONE_SIZE 56
33 
34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
36 
37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
39 		0x8877665544332211UL;
40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
41 
42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
43 	    struct spdk_nvmf_subsystem *,
44 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
45 	    NULL);
46 
47 DEFINE_STUB(spdk_nvmf_poll_group_create,
48 	    struct spdk_nvmf_poll_group *,
49 	    (struct spdk_nvmf_tgt *tgt),
50 	    NULL);
51 
52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
53 	    const char *,
54 	    (const struct spdk_nvmf_subsystem *subsystem),
55 	    subsystem_default_sn);
56 
57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
58 	    const char *,
59 	    (const struct spdk_nvmf_subsystem *subsystem),
60 	    subsystem_default_mn);
61 
62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
63 	    bool,
64 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
65 	    true);
66 
67 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
68 	    int,
69 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
70 	    0);
71 
72 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
73 	    struct spdk_nvmf_ctrlr *,
74 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
75 	    NULL);
76 
77 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
78 	    bool,
79 	    (struct spdk_nvmf_ctrlr *ctrlr),
80 	    false);
81 
82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
83 	    bool,
84 	    (struct spdk_nvmf_ctrlr *ctrlr),
85 	    false);
86 
87 DEFINE_STUB(nvmf_ctrlr_copy_supported,
88 	    bool,
89 	    (struct spdk_nvmf_ctrlr *ctrlr),
90 	    false);
91 
92 DEFINE_STUB_V(nvmf_get_discovery_log_page,
93 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
94 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
95 
96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
97 	    int,
98 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
99 	    0);
100 
101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
102 	    bool,
103 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
104 	    true);
105 
106 DEFINE_STUB(nvmf_subsystem_find_listener,
107 	    struct spdk_nvmf_subsystem_listener *,
108 	    (struct spdk_nvmf_subsystem *subsystem,
109 	     const struct spdk_nvme_transport_id *trid),
110 	    (void *)0x1);
111 
112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
113 	    int,
114 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
115 	     struct spdk_nvmf_request *req),
116 	    0);
117 
118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
119 	    int,
120 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
121 	     struct spdk_nvmf_request *req),
122 	    0);
123 
124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
134 	    0);
135 
136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *req),
140 	    0);
141 
142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *req),
146 	    0);
147 
148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
161 	    int,
162 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct spdk_nvmf_request *req),
164 	    0);
165 
166 DEFINE_STUB(nvmf_transport_req_complete,
167 	    int,
168 	    (struct spdk_nvmf_request *req),
169 	    0);
170 
171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
172 
173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
174 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
175 	     struct spdk_dif_ctx *dif_ctx),
176 	    true);
177 
178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
179 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
180 
181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
183 
184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
185 		struct spdk_nvmf_ctrlr *ctrlr));
186 
187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
188 	    int,
189 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
190 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
191 	    0);
192 
193 DEFINE_STUB(nvmf_transport_req_free,
194 	    int,
195 	    (struct spdk_nvmf_request *req),
196 	    0);
197 
198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
199 	    int,
200 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
201 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
202 	    0);
203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
204 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
206 
207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev),
208 	    MAX_ACTIVE_ZONES);
209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES);
210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE);
211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
212 
213 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t,
214 	    (const struct spdk_nvme_ns_data *nsdata), 0);
215 
216 int
217 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
218 {
219 	return 0;
220 }
221 
222 void
223 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
224 			    bool dif_insert_or_strip)
225 {
226 	uint64_t num_blocks;
227 
228 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
229 	num_blocks = ns->bdev->blockcnt;
230 	nsdata->nsze = num_blocks;
231 	nsdata->ncap = num_blocks;
232 	nsdata->nuse = num_blocks;
233 	nsdata->nlbaf = 0;
234 	nsdata->flbas.format = 0;
235 	nsdata->flbas.msb_format = 0;
236 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
237 }
238 
239 struct spdk_nvmf_ns *
240 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
241 {
242 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
243 	return subsystem->ns[0];
244 }
245 
246 struct spdk_nvmf_ns *
247 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
248 				struct spdk_nvmf_ns *prev_ns)
249 {
250 	uint32_t nsid;
251 
252 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
253 	nsid = prev_ns->nsid;
254 
255 	if (nsid >= subsystem->max_nsid) {
256 		return NULL;
257 	}
258 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
259 		if (subsystem->ns[nsid - 1]) {
260 			return subsystem->ns[nsid - 1];
261 		}
262 	}
263 	return NULL;
264 }
265 
266 bool
267 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
268 {
269 	return true;
270 }
271 
272 int
273 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
274 			    struct spdk_bdev_desc *desc,
275 			    struct spdk_io_channel *ch,
276 			    struct spdk_nvmf_request *req)
277 {
278 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
279 	uint64_t start_lba;
280 	uint64_t num_blocks;
281 
282 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
283 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
284 
285 	if ((start_lba + num_blocks) > bdev->blockcnt) {
286 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
287 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
288 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
289 	}
290 
291 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
292 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
293 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
294 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
295 	} else {
296 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
297 	}
298 
299 
300 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
301 }
302 
303 void
304 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
305 {
306 	req->zcopy_bdev_io = NULL;
307 	spdk_nvmf_request_complete(req);
308 }
309 
310 static void
311 test_get_log_page(void)
312 {
313 	struct spdk_nvmf_subsystem subsystem = {};
314 	struct spdk_nvmf_request req = {};
315 	struct spdk_nvmf_qpair qpair = {};
316 	struct spdk_nvmf_ctrlr ctrlr = {};
317 	union nvmf_h2c_msg cmd = {};
318 	union nvmf_c2h_msg rsp = {};
319 	char data[4096];
320 
321 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
322 
323 	ctrlr.subsys = &subsystem;
324 
325 	qpair.ctrlr = &ctrlr;
326 
327 	req.qpair = &qpair;
328 	req.cmd = &cmd;
329 	req.rsp = &rsp;
330 	req.length = sizeof(data);
331 	spdk_iov_one(req.iov, &req.iovcnt, &data, req.length);
332 
333 	/* Get Log Page - all valid */
334 	memset(&cmd, 0, sizeof(cmd));
335 	memset(&rsp, 0, sizeof(rsp));
336 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
337 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
338 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
339 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
340 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
341 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
342 
343 	/* Get Log Page with invalid log ID */
344 	memset(&cmd, 0, sizeof(cmd));
345 	memset(&rsp, 0, sizeof(rsp));
346 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
347 	cmd.nvme_cmd.cdw10 = 0;
348 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
349 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
350 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
351 
352 	/* Get Log Page with invalid offset (not dword aligned) */
353 	memset(&cmd, 0, sizeof(cmd));
354 	memset(&rsp, 0, sizeof(rsp));
355 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
356 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
357 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
358 	cmd.nvme_cmd.cdw12 = 2;
359 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
360 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
361 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
362 
363 	/* Get Log Page without data buffer */
364 	memset(&cmd, 0, sizeof(cmd));
365 	memset(&rsp, 0, sizeof(rsp));
366 	req.iovcnt = 0;
367 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
368 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
369 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
370 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
371 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
372 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
373 }
374 
375 static void
376 test_process_fabrics_cmd(void)
377 {
378 	struct	spdk_nvmf_request req = {};
379 	int	ret;
380 	struct	spdk_nvmf_qpair req_qpair = {};
381 	union	nvmf_h2c_msg  req_cmd = {};
382 	union	nvmf_c2h_msg   req_rsp = {};
383 
384 	req.qpair = &req_qpair;
385 	req.cmd  = &req_cmd;
386 	req.rsp  = &req_rsp;
387 	req.qpair->ctrlr = NULL;
388 
389 	/* No ctrlr and invalid command check */
390 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
391 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
392 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
393 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
394 }
395 
396 static bool
397 nvme_status_success(const struct spdk_nvme_status *status)
398 {
399 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
400 }
401 
402 static void
403 test_connect(void)
404 {
405 	struct spdk_nvmf_fabric_connect_data connect_data;
406 	struct spdk_nvmf_poll_group group;
407 	struct spdk_nvmf_subsystem_poll_group *sgroups;
408 	struct spdk_nvmf_transport transport;
409 	struct spdk_nvmf_transport_ops tops = {};
410 	struct spdk_nvmf_subsystem subsystem;
411 	struct spdk_nvmf_request req;
412 	struct spdk_nvmf_qpair admin_qpair;
413 	struct spdk_nvmf_qpair qpair;
414 	struct spdk_nvmf_ctrlr ctrlr;
415 	struct spdk_nvmf_tgt tgt;
416 	union nvmf_h2c_msg cmd;
417 	union nvmf_c2h_msg rsp;
418 	const uint8_t hostid[16] = {
419 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
420 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
421 	};
422 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
423 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
424 	int rc;
425 
426 	memset(&group, 0, sizeof(group));
427 	group.thread = spdk_get_thread();
428 
429 	memset(&ctrlr, 0, sizeof(ctrlr));
430 	ctrlr.subsys = &subsystem;
431 	ctrlr.qpair_mask = spdk_bit_array_create(3);
432 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
433 	ctrlr.vcprop.cc.bits.en = 1;
434 	ctrlr.vcprop.cc.bits.iosqes = 6;
435 	ctrlr.vcprop.cc.bits.iocqes = 4;
436 
437 	memset(&admin_qpair, 0, sizeof(admin_qpair));
438 	admin_qpair.group = &group;
439 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
440 
441 	memset(&tgt, 0, sizeof(tgt));
442 	memset(&transport, 0, sizeof(transport));
443 	transport.ops = &tops;
444 	transport.opts.max_aq_depth = 32;
445 	transport.opts.max_queue_depth = 64;
446 	transport.opts.max_qpairs_per_ctrlr = 3;
447 	transport.tgt = &tgt;
448 
449 	memset(&qpair, 0, sizeof(qpair));
450 	qpair.transport = &transport;
451 	qpair.group = &group;
452 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
453 	TAILQ_INIT(&qpair.outstanding);
454 
455 	memset(&connect_data, 0, sizeof(connect_data));
456 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
457 	connect_data.cntlid = 0xFFFF;
458 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
459 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
460 
461 	memset(&subsystem, 0, sizeof(subsystem));
462 	subsystem.thread = spdk_get_thread();
463 	subsystem.id = 1;
464 	TAILQ_INIT(&subsystem.ctrlrs);
465 	subsystem.tgt = &tgt;
466 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
467 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
468 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
469 
470 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
471 	group.sgroups = sgroups;
472 
473 	memset(&cmd, 0, sizeof(cmd));
474 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
475 	cmd.connect_cmd.cid = 1;
476 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
477 	cmd.connect_cmd.recfmt = 0;
478 	cmd.connect_cmd.qid = 0;
479 	cmd.connect_cmd.sqsize = 31;
480 	cmd.connect_cmd.cattr = 0;
481 	cmd.connect_cmd.kato = 120000;
482 
483 	memset(&req, 0, sizeof(req));
484 	req.qpair = &qpair;
485 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
486 	req.length = sizeof(connect_data);
487 	spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length);
488 	req.cmd = &cmd;
489 	req.rsp = &rsp;
490 
491 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
492 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
493 
494 	/* Valid admin connect command */
495 	memset(&rsp, 0, sizeof(rsp));
496 	sgroups[subsystem.id].mgmt_io_outstanding++;
497 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
498 	rc = nvmf_ctrlr_cmd_connect(&req);
499 	poll_threads();
500 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
501 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
502 	CU_ASSERT(qpair.ctrlr != NULL);
503 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
504 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
505 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
506 	free(qpair.ctrlr);
507 	qpair.ctrlr = NULL;
508 
509 	/* Valid admin connect command with kato = 0 */
510 	cmd.connect_cmd.kato = 0;
511 	memset(&rsp, 0, sizeof(rsp));
512 	sgroups[subsystem.id].mgmt_io_outstanding++;
513 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
514 	rc = nvmf_ctrlr_cmd_connect(&req);
515 	poll_threads();
516 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
517 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
518 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
519 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
520 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
521 	free(qpair.ctrlr);
522 	qpair.ctrlr = NULL;
523 	cmd.connect_cmd.kato = 120000;
524 
525 	/* Invalid data length */
526 	memset(&rsp, 0, sizeof(rsp));
527 	req.length = sizeof(connect_data) - 1;
528 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
529 	rc = nvmf_ctrlr_cmd_connect(&req);
530 	poll_threads();
531 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
532 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
533 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
534 	CU_ASSERT(qpair.ctrlr == NULL);
535 	req.length = sizeof(connect_data);
536 
537 	/* Invalid recfmt */
538 	memset(&rsp, 0, sizeof(rsp));
539 	cmd.connect_cmd.recfmt = 1234;
540 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
541 	rc = nvmf_ctrlr_cmd_connect(&req);
542 	poll_threads();
543 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
544 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
545 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
546 	CU_ASSERT(qpair.ctrlr == NULL);
547 	cmd.connect_cmd.recfmt = 0;
548 
549 	/* Subsystem not found */
550 	memset(&rsp, 0, sizeof(rsp));
551 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
552 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
553 	rc = nvmf_ctrlr_cmd_connect(&req);
554 	poll_threads();
555 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
556 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
557 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
558 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
559 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
560 	CU_ASSERT(qpair.ctrlr == NULL);
561 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
562 
563 	/* Unterminated hostnqn */
564 	memset(&rsp, 0, sizeof(rsp));
565 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
566 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
567 	rc = nvmf_ctrlr_cmd_connect(&req);
568 	poll_threads();
569 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
570 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
571 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
572 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
573 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
574 	CU_ASSERT(qpair.ctrlr == NULL);
575 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
576 
577 	/* Host not allowed */
578 	memset(&rsp, 0, sizeof(rsp));
579 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
580 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
581 	rc = nvmf_ctrlr_cmd_connect(&req);
582 	poll_threads();
583 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
584 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
585 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
586 	CU_ASSERT(qpair.ctrlr == NULL);
587 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
588 
589 	/* Invalid sqsize == 0 */
590 	memset(&rsp, 0, sizeof(rsp));
591 	cmd.connect_cmd.sqsize = 0;
592 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
593 	rc = nvmf_ctrlr_cmd_connect(&req);
594 	poll_threads();
595 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
596 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
597 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
598 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
599 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
600 	CU_ASSERT(qpair.ctrlr == NULL);
601 	cmd.connect_cmd.sqsize = 31;
602 
603 	/* Invalid admin sqsize > max_aq_depth */
604 	memset(&rsp, 0, sizeof(rsp));
605 	cmd.connect_cmd.sqsize = 32;
606 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
607 	rc = nvmf_ctrlr_cmd_connect(&req);
608 	poll_threads();
609 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
610 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
611 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
612 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
613 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
614 	CU_ASSERT(qpair.ctrlr == NULL);
615 	cmd.connect_cmd.sqsize = 31;
616 
617 	/* Invalid I/O sqsize > max_queue_depth */
618 	memset(&rsp, 0, sizeof(rsp));
619 	cmd.connect_cmd.qid = 1;
620 	cmd.connect_cmd.sqsize = 64;
621 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
622 	rc = nvmf_ctrlr_cmd_connect(&req);
623 	poll_threads();
624 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
625 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
626 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
627 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
628 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
629 	CU_ASSERT(qpair.ctrlr == NULL);
630 	cmd.connect_cmd.qid = 0;
631 	cmd.connect_cmd.sqsize = 31;
632 
633 	/* Invalid cntlid for admin queue */
634 	memset(&rsp, 0, sizeof(rsp));
635 	connect_data.cntlid = 0x1234;
636 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
637 	rc = nvmf_ctrlr_cmd_connect(&req);
638 	poll_threads();
639 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
640 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
641 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
642 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
643 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
644 	CU_ASSERT(qpair.ctrlr == NULL);
645 	connect_data.cntlid = 0xFFFF;
646 
647 	ctrlr.admin_qpair = &admin_qpair;
648 	ctrlr.subsys = &subsystem;
649 
650 	/* Valid I/O queue connect command */
651 	memset(&rsp, 0, sizeof(rsp));
652 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
653 	cmd.connect_cmd.qid = 1;
654 	cmd.connect_cmd.sqsize = 63;
655 	sgroups[subsystem.id].mgmt_io_outstanding++;
656 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
657 	rc = nvmf_ctrlr_cmd_connect(&req);
658 	poll_threads();
659 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
660 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
661 	CU_ASSERT(qpair.ctrlr == &ctrlr);
662 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
663 	qpair.ctrlr = NULL;
664 	cmd.connect_cmd.sqsize = 31;
665 
666 	/* Non-existent controller */
667 	memset(&rsp, 0, sizeof(rsp));
668 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
669 	sgroups[subsystem.id].mgmt_io_outstanding++;
670 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
671 	rc = nvmf_ctrlr_cmd_connect(&req);
672 	poll_threads();
673 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
674 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
675 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
676 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
677 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
678 	CU_ASSERT(qpair.ctrlr == NULL);
679 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
680 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
681 
682 	/* I/O connect to discovery controller */
683 	memset(&rsp, 0, sizeof(rsp));
684 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
685 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
686 	sgroups[subsystem.id].mgmt_io_outstanding++;
687 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
688 	rc = nvmf_ctrlr_cmd_connect(&req);
689 	poll_threads();
690 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
691 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
692 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
693 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
694 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
695 	CU_ASSERT(qpair.ctrlr == NULL);
696 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
697 
698 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
699 	cmd.connect_cmd.qid = 0;
700 	cmd.connect_cmd.kato = 120000;
701 	memset(&rsp, 0, sizeof(rsp));
702 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
703 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
704 	sgroups[subsystem.id].mgmt_io_outstanding++;
705 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
706 	rc = nvmf_ctrlr_cmd_connect(&req);
707 	poll_threads();
708 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
709 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
710 	CU_ASSERT(qpair.ctrlr != NULL);
711 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
712 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
713 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
714 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
715 	free(qpair.ctrlr);
716 	qpair.ctrlr = NULL;
717 
718 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
719 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
720 	 */
721 	cmd.connect_cmd.kato = 0;
722 	memset(&rsp, 0, sizeof(rsp));
723 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
724 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
725 	sgroups[subsystem.id].mgmt_io_outstanding++;
726 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
727 	rc = nvmf_ctrlr_cmd_connect(&req);
728 	poll_threads();
729 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
730 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
731 	CU_ASSERT(qpair.ctrlr != NULL);
732 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
733 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
734 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
735 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
736 	free(qpair.ctrlr);
737 	qpair.ctrlr = NULL;
738 	cmd.connect_cmd.qid = 1;
739 	cmd.connect_cmd.kato = 120000;
740 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
741 
742 	/* I/O connect to disabled controller */
743 	memset(&rsp, 0, sizeof(rsp));
744 	ctrlr.vcprop.cc.bits.en = 0;
745 	sgroups[subsystem.id].mgmt_io_outstanding++;
746 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
747 	rc = nvmf_ctrlr_cmd_connect(&req);
748 	poll_threads();
749 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
750 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
751 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
752 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
753 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
754 	CU_ASSERT(qpair.ctrlr == NULL);
755 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
756 	ctrlr.vcprop.cc.bits.en = 1;
757 
758 	/* I/O connect with invalid IOSQES */
759 	memset(&rsp, 0, sizeof(rsp));
760 	ctrlr.vcprop.cc.bits.iosqes = 3;
761 	sgroups[subsystem.id].mgmt_io_outstanding++;
762 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
763 	rc = nvmf_ctrlr_cmd_connect(&req);
764 	poll_threads();
765 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
766 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
767 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
768 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
769 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
770 	CU_ASSERT(qpair.ctrlr == NULL);
771 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
772 	ctrlr.vcprop.cc.bits.iosqes = 6;
773 
774 	/* I/O connect with invalid IOCQES */
775 	memset(&rsp, 0, sizeof(rsp));
776 	ctrlr.vcprop.cc.bits.iocqes = 3;
777 	sgroups[subsystem.id].mgmt_io_outstanding++;
778 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
779 	rc = nvmf_ctrlr_cmd_connect(&req);
780 	poll_threads();
781 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
782 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
783 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
784 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
785 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
786 	CU_ASSERT(qpair.ctrlr == NULL);
787 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
788 	ctrlr.vcprop.cc.bits.iocqes = 4;
789 
790 	/* I/O connect with qid that is too large */
791 	memset(&rsp, 0, sizeof(rsp));
792 	cmd.connect_cmd.qid = 3;
793 	sgroups[subsystem.id].mgmt_io_outstanding++;
794 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
795 	rc = nvmf_ctrlr_cmd_connect(&req);
796 	poll_threads();
797 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
798 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
799 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
800 	CU_ASSERT(qpair.ctrlr == NULL);
801 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
802 
803 	/* I/O connect with duplicate queue ID */
804 	memset(&rsp, 0, sizeof(rsp));
805 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
806 	cmd.connect_cmd.qid = 1;
807 	sgroups[subsystem.id].mgmt_io_outstanding++;
808 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
809 	rc = nvmf_ctrlr_cmd_connect(&req);
810 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
811 	poll_threads();
812 	/* First time, it will detect duplicate QID and schedule a retry.  So for
813 	 * now we should expect the response to still be all zeroes.
814 	 */
815 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
816 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
817 
818 	/* Now advance the clock, so that the retry poller executes. */
819 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
820 	poll_threads();
821 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
822 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
823 	CU_ASSERT(qpair.ctrlr == NULL);
824 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
825 
826 	/* I/O connect with temporarily duplicate queue ID. This covers race
827 	 * where qpair_mask bit may not yet be cleared, even though initiator
828 	 * has closed the connection.  See issue #2955. */
829 	memset(&rsp, 0, sizeof(rsp));
830 	sgroups[subsystem.id].mgmt_io_outstanding++;
831 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
832 	rc = nvmf_ctrlr_cmd_connect(&req);
833 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
834 	poll_threads();
835 	/* First time, it will detect duplicate QID and schedule a retry.  So for
836 	 * now we should expect the response to still be all zeroes.
837 	 */
838 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
839 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
840 
841 	/* Now advance the clock, so that the retry poller executes. */
842 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
843 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
844 	poll_threads();
845 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
846 	CU_ASSERT(qpair.ctrlr == &ctrlr);
847 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
848 	qpair.ctrlr = NULL;
849 
850 	/* I/O connect when admin qpair is being destroyed */
851 	admin_qpair.group = NULL;
852 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
853 	memset(&rsp, 0, sizeof(rsp));
854 	sgroups[subsystem.id].mgmt_io_outstanding++;
855 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
856 	rc = nvmf_ctrlr_cmd_connect(&req);
857 	poll_threads();
858 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
859 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
860 	CU_ASSERT(qpair.ctrlr == NULL);
861 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
862 	admin_qpair.group = &group;
863 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
864 
865 	/* Clean up globals */
866 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
867 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
868 
869 	spdk_bit_array_free(&ctrlr.qpair_mask);
870 	free(sgroups);
871 }
872 
873 static void
874 test_get_ns_id_desc_list(void)
875 {
876 	struct spdk_nvmf_subsystem subsystem;
877 	struct spdk_nvmf_qpair qpair;
878 	struct spdk_nvmf_ctrlr ctrlr;
879 	struct spdk_nvmf_request req;
880 	struct spdk_nvmf_ns *ns_ptrs[1];
881 	struct spdk_nvmf_ns ns;
882 	union nvmf_h2c_msg cmd;
883 	union nvmf_c2h_msg rsp;
884 	struct spdk_bdev bdev;
885 	uint8_t buf[4096];
886 
887 	memset(&subsystem, 0, sizeof(subsystem));
888 	ns_ptrs[0] = &ns;
889 	subsystem.ns = ns_ptrs;
890 	subsystem.max_nsid = 1;
891 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
892 
893 	memset(&ns, 0, sizeof(ns));
894 	ns.opts.nsid = 1;
895 	ns.bdev = &bdev;
896 
897 	memset(&qpair, 0, sizeof(qpair));
898 	qpair.ctrlr = &ctrlr;
899 
900 	memset(&ctrlr, 0, sizeof(ctrlr));
901 	ctrlr.subsys = &subsystem;
902 	ctrlr.vcprop.cc.bits.en = 1;
903 	ctrlr.thread = spdk_get_thread();
904 
905 	memset(&req, 0, sizeof(req));
906 	req.qpair = &qpair;
907 	req.cmd = &cmd;
908 	req.rsp = &rsp;
909 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
910 	req.length = sizeof(buf);
911 	spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length);
912 
913 	memset(&cmd, 0, sizeof(cmd));
914 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
915 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
916 
917 	/* Invalid NSID */
918 	cmd.nvme_cmd.nsid = 0;
919 	memset(&rsp, 0, sizeof(rsp));
920 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
921 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
922 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
923 
924 	/* Valid NSID, but ns has no IDs defined */
925 	cmd.nvme_cmd.nsid = 1;
926 	memset(&rsp, 0, sizeof(rsp));
927 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
928 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
929 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
930 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
931 
932 	/* Valid NSID, only EUI64 defined */
933 	ns.opts.eui64[0] = 0x11;
934 	ns.opts.eui64[7] = 0xFF;
935 	memset(&rsp, 0, sizeof(rsp));
936 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
937 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
938 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
939 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
940 	CU_ASSERT(buf[1] == 8);
941 	CU_ASSERT(buf[4] == 0x11);
942 	CU_ASSERT(buf[11] == 0xFF);
943 	CU_ASSERT(buf[13] == 0);
944 
945 	/* Valid NSID, only NGUID defined */
946 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
947 	ns.opts.nguid[0] = 0x22;
948 	ns.opts.nguid[15] = 0xEE;
949 	memset(&rsp, 0, sizeof(rsp));
950 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
951 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
952 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
953 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
954 	CU_ASSERT(buf[1] == 16);
955 	CU_ASSERT(buf[4] == 0x22);
956 	CU_ASSERT(buf[19] == 0xEE);
957 	CU_ASSERT(buf[21] == 0);
958 
959 	/* Valid NSID, both EUI64 and NGUID defined */
960 	ns.opts.eui64[0] = 0x11;
961 	ns.opts.eui64[7] = 0xFF;
962 	ns.opts.nguid[0] = 0x22;
963 	ns.opts.nguid[15] = 0xEE;
964 	memset(&rsp, 0, sizeof(rsp));
965 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
966 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
967 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
968 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
969 	CU_ASSERT(buf[1] == 8);
970 	CU_ASSERT(buf[4] == 0x11);
971 	CU_ASSERT(buf[11] == 0xFF);
972 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
973 	CU_ASSERT(buf[13] == 16);
974 	CU_ASSERT(buf[16] == 0x22);
975 	CU_ASSERT(buf[31] == 0xEE);
976 	CU_ASSERT(buf[33] == 0);
977 
978 	/* Valid NSID, EUI64, NGUID, and UUID defined */
979 	ns.opts.eui64[0] = 0x11;
980 	ns.opts.eui64[7] = 0xFF;
981 	ns.opts.nguid[0] = 0x22;
982 	ns.opts.nguid[15] = 0xEE;
983 	ns.opts.uuid.u.raw[0] = 0x33;
984 	ns.opts.uuid.u.raw[15] = 0xDD;
985 	memset(&rsp, 0, sizeof(rsp));
986 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
987 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
988 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
989 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
990 	CU_ASSERT(buf[1] == 8);
991 	CU_ASSERT(buf[4] == 0x11);
992 	CU_ASSERT(buf[11] == 0xFF);
993 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
994 	CU_ASSERT(buf[13] == 16);
995 	CU_ASSERT(buf[16] == 0x22);
996 	CU_ASSERT(buf[31] == 0xEE);
997 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
998 	CU_ASSERT(buf[33] == 16);
999 	CU_ASSERT(buf[36] == 0x33);
1000 	CU_ASSERT(buf[51] == 0xDD);
1001 	CU_ASSERT(buf[53] == 0);
1002 }
1003 
1004 static void
1005 test_identify_ns(void)
1006 {
1007 	struct spdk_nvmf_subsystem subsystem = {};
1008 	struct spdk_nvmf_transport transport = {};
1009 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1010 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1011 	struct spdk_nvme_cmd cmd = {};
1012 	struct spdk_nvme_cpl rsp = {};
1013 	struct spdk_nvme_ns_data nsdata = {};
1014 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
1015 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
1016 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1017 
1018 	subsystem.ns = ns_arr;
1019 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1020 
1021 	/* Invalid NSID 0 */
1022 	cmd.nsid = 0;
1023 	memset(&nsdata, 0, sizeof(nsdata));
1024 	memset(&rsp, 0, sizeof(rsp));
1025 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1026 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1027 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1028 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1029 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1030 
1031 	/* Valid NSID 1 */
1032 	cmd.nsid = 1;
1033 	memset(&nsdata, 0, sizeof(nsdata));
1034 	memset(&rsp, 0, sizeof(rsp));
1035 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1036 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1037 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1038 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1039 	CU_ASSERT(nsdata.nsze == 1234);
1040 
1041 	/* Valid but inactive NSID 2 */
1042 	cmd.nsid = 2;
1043 	memset(&nsdata, 0, sizeof(nsdata));
1044 	memset(&rsp, 0, sizeof(rsp));
1045 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1046 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1047 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1048 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1049 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1050 
1051 	/* Valid NSID 3 */
1052 	cmd.nsid = 3;
1053 	memset(&nsdata, 0, sizeof(nsdata));
1054 	memset(&rsp, 0, sizeof(rsp));
1055 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1056 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1057 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1058 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1059 	CU_ASSERT(nsdata.nsze == 5678);
1060 
1061 	/* Invalid NSID 4 */
1062 	cmd.nsid = 4;
1063 	memset(&nsdata, 0, sizeof(nsdata));
1064 	memset(&rsp, 0, sizeof(rsp));
1065 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1066 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1067 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1068 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1069 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1070 
1071 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1072 	cmd.nsid = 0xFFFFFFFF;
1073 	memset(&nsdata, 0, sizeof(nsdata));
1074 	memset(&rsp, 0, sizeof(rsp));
1075 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1076 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1077 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1078 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1079 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1080 }
1081 
1082 static void
1083 test_identify_ns_iocs_specific(void)
1084 {
1085 	struct spdk_nvmf_subsystem subsystem = {};
1086 	struct spdk_nvmf_transport transport = {};
1087 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport };
1088 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1089 	struct spdk_nvme_cmd cmd = {};
1090 	struct spdk_nvme_cpl rsp = {};
1091 	struct spdk_nvme_zns_ns_data nsdata = {};
1092 	struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}};
1093 	struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}};
1094 	struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]};
1095 
1096 	subsystem.ns = ns_arr;
1097 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1098 
1099 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1100 
1101 	/* Invalid ZNS NSID 0 */
1102 	cmd.nsid = 0;
1103 	memset(&nsdata, 0xFF, sizeof(nsdata));
1104 	memset(&rsp, 0, sizeof(rsp));
1105 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1106 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1107 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1108 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1109 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1110 
1111 	/* Valid ZNS NSID 1 */
1112 	cmd.nsid = 1;
1113 	memset(&nsdata, 0xFF, sizeof(nsdata));
1114 	memset(&rsp, 0, sizeof(rsp));
1115 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1116 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1117 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1118 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1119 	CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1);
1120 	CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1);
1121 	CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1);
1122 	CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE);
1123 	nsdata.ozcs.read_across_zone_boundaries = 0;
1124 	nsdata.mar = 0;
1125 	nsdata.mor = 0;
1126 	nsdata.lbafe[0].zsze = 0;
1127 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1128 
1129 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1130 
1131 	/* Valid NVM NSID 2 */
1132 	cmd.nsid = 2;
1133 	memset(&nsdata, 0xFF, sizeof(nsdata));
1134 	memset(&rsp, 0, sizeof(rsp));
1135 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1136 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1137 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1138 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1139 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1140 
1141 	/* Invalid NVM NSID 3 */
1142 	cmd.nsid = 0;
1143 	memset(&nsdata, 0xFF, sizeof(nsdata));
1144 	memset(&rsp, 0, sizeof(rsp));
1145 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1146 			&nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1147 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1148 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1149 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1150 }
1151 
1152 static void
1153 test_set_get_features(void)
1154 {
1155 	struct spdk_nvmf_subsystem subsystem = {};
1156 	struct spdk_nvmf_qpair admin_qpair = {};
1157 	enum spdk_nvme_ana_state ana_state[3];
1158 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1159 	struct spdk_nvmf_ctrlr ctrlr = {
1160 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1161 	};
1162 	union nvmf_h2c_msg cmd = {};
1163 	union nvmf_c2h_msg rsp = {};
1164 	struct spdk_nvmf_ns ns[3];
1165 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1166 	struct spdk_nvmf_request req;
1167 	int rc;
1168 
1169 	ns[0].anagrpid = 1;
1170 	ns[2].anagrpid = 3;
1171 	subsystem.ns = ns_arr;
1172 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1173 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1174 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1175 	admin_qpair.ctrlr = &ctrlr;
1176 	req.qpair = &admin_qpair;
1177 	cmd.nvme_cmd.nsid = 1;
1178 	req.cmd = &cmd;
1179 	req.rsp = &rsp;
1180 
1181 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1182 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1183 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1184 	ns[0].ptpl_file = "testcfg";
1185 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1186 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1187 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1188 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1189 	CU_ASSERT(ns[0].ptpl_activated == true);
1190 
1191 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1192 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1193 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1194 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1195 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1196 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1197 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1198 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1199 
1200 
1201 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1202 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1203 	cmd.nvme_cmd.cdw11 = 0x42;
1204 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1205 
1206 	rc = nvmf_ctrlr_get_features(&req);
1207 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1208 
1209 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1210 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1211 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1212 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1213 
1214 	rc = nvmf_ctrlr_get_features(&req);
1215 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1216 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1217 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1218 
1219 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1220 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1221 	cmd.nvme_cmd.cdw11 = 0x42;
1222 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1223 
1224 	rc = nvmf_ctrlr_set_features(&req);
1225 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1226 
1227 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1228 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1229 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1230 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1231 
1232 	rc = nvmf_ctrlr_set_features(&req);
1233 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1234 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1235 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1236 
1237 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1238 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1239 	cmd.nvme_cmd.cdw11 = 0x42;
1240 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1241 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1242 
1243 	rc = nvmf_ctrlr_set_features(&req);
1244 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1245 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1246 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1247 
1248 
1249 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1250 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1251 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1252 
1253 	rc = nvmf_ctrlr_get_features(&req);
1254 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1255 
1256 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1257 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1258 	cmd.nvme_cmd.cdw11 = 0x42;
1259 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1260 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1261 
1262 	rc = nvmf_ctrlr_set_features(&req);
1263 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1264 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1265 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1266 
1267 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1268 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1269 	cmd.nvme_cmd.cdw11 = 0x42;
1270 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1271 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1272 
1273 	rc = nvmf_ctrlr_set_features(&req);
1274 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1275 }
1276 
1277 /*
1278  * Reservation Unit Test Configuration
1279  *       --------             --------    --------
1280  *      | Host A |           | Host B |  | Host C |
1281  *       --------             --------    --------
1282  *      /        \               |           |
1283  *  --------   --------       -------     -------
1284  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1285  *  --------   --------       -------     -------
1286  *    \           \              /           /
1287  *     \           \            /           /
1288  *      \           \          /           /
1289  *      --------------------------------------
1290  *     |            NAMESPACE 1               |
1291  *      --------------------------------------
1292  */
1293 
1294 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1295 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1296 
1297 static void
1298 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1299 {
1300 	/* Host A has two controllers */
1301 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1302 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1303 
1304 	/* Host B has 1 controller */
1305 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1306 
1307 	/* Host C has 1 controller */
1308 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1309 
1310 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1311 	g_ns_info.rtype = rtype;
1312 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1313 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1314 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1315 }
1316 
1317 static void
1318 test_reservation_write_exclusive(void)
1319 {
1320 	struct spdk_nvmf_request req = {};
1321 	union nvmf_h2c_msg cmd = {};
1322 	union nvmf_c2h_msg rsp = {};
1323 	int rc;
1324 
1325 	req.cmd = &cmd;
1326 	req.rsp = &rsp;
1327 
1328 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1329 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1330 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1331 
1332 	/* Test Case: Issue a Read command from Host A and Host B */
1333 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1334 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1335 	SPDK_CU_ASSERT_FATAL(rc == 0);
1336 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1337 	SPDK_CU_ASSERT_FATAL(rc == 0);
1338 
1339 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1340 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1341 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1342 	SPDK_CU_ASSERT_FATAL(rc == 0);
1343 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1344 	SPDK_CU_ASSERT_FATAL(rc < 0);
1345 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1346 
1347 	/* Test Case: Issue a Write command from Host C */
1348 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1349 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1350 	SPDK_CU_ASSERT_FATAL(rc < 0);
1351 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1352 
1353 	/* Test Case: Issue a Read command from Host B */
1354 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1355 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1356 	SPDK_CU_ASSERT_FATAL(rc == 0);
1357 
1358 	/* Unregister Host C */
1359 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1360 
1361 	/* Test Case: Read and Write commands from non-registrant Host C */
1362 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1363 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1364 	SPDK_CU_ASSERT_FATAL(rc < 0);
1365 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1366 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1367 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1368 	SPDK_CU_ASSERT_FATAL(rc == 0);
1369 }
1370 
1371 static void
1372 test_reservation_exclusive_access(void)
1373 {
1374 	struct spdk_nvmf_request req = {};
1375 	union nvmf_h2c_msg cmd = {};
1376 	union nvmf_c2h_msg rsp = {};
1377 	int rc;
1378 
1379 	req.cmd = &cmd;
1380 	req.rsp = &rsp;
1381 
1382 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1383 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1384 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1385 
1386 	/* Test Case: Issue a Read command from Host B */
1387 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1388 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1389 	SPDK_CU_ASSERT_FATAL(rc < 0);
1390 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1391 
1392 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1393 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1394 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1395 	SPDK_CU_ASSERT_FATAL(rc == 0);
1396 }
1397 
1398 static void
1399 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1400 {
1401 	struct spdk_nvmf_request req = {};
1402 	union nvmf_h2c_msg cmd = {};
1403 	union nvmf_c2h_msg rsp = {};
1404 	int rc;
1405 
1406 	req.cmd = &cmd;
1407 	req.rsp = &rsp;
1408 
1409 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1410 	ut_reservation_init(rtype);
1411 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1412 
1413 	/* Test Case: Issue a Read command from Host A and Host C */
1414 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1415 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1416 	SPDK_CU_ASSERT_FATAL(rc == 0);
1417 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1418 	SPDK_CU_ASSERT_FATAL(rc == 0);
1419 
1420 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1421 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1422 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1423 	SPDK_CU_ASSERT_FATAL(rc == 0);
1424 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1425 	SPDK_CU_ASSERT_FATAL(rc == 0);
1426 
1427 	/* Unregister Host C */
1428 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1429 
1430 	/* Test Case: Read and Write commands from non-registrant Host C */
1431 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1432 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1433 	SPDK_CU_ASSERT_FATAL(rc == 0);
1434 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1435 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1436 	SPDK_CU_ASSERT_FATAL(rc < 0);
1437 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1438 }
1439 
1440 static void
1441 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1442 {
1443 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1444 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1445 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1446 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1447 }
1448 
1449 static void
1450 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1451 {
1452 	struct spdk_nvmf_request req = {};
1453 	union nvmf_h2c_msg cmd = {};
1454 	union nvmf_c2h_msg rsp = {};
1455 	int rc;
1456 
1457 	req.cmd = &cmd;
1458 	req.rsp = &rsp;
1459 
1460 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1461 	ut_reservation_init(rtype);
1462 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1463 
1464 	/* Test Case: Issue a Write command from Host B */
1465 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1466 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1467 	SPDK_CU_ASSERT_FATAL(rc == 0);
1468 
1469 	/* Unregister Host B */
1470 	spdk_uuid_set_null(&g_ns_info.reg_hostid[1]);
1471 
1472 	/* Test Case: Issue a Read command from Host B */
1473 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1474 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1475 	SPDK_CU_ASSERT_FATAL(rc < 0);
1476 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1477 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1478 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1479 	SPDK_CU_ASSERT_FATAL(rc < 0);
1480 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1481 }
1482 
1483 static void
1484 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1485 {
1486 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1487 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1488 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1489 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1490 }
1491 
1492 static void
1493 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1494 {
1495 	STAILQ_INIT(&ctrlr->async_events);
1496 }
1497 
1498 static void
1499 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1500 {
1501 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1502 
1503 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1504 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1505 		free(event);
1506 	}
1507 }
1508 
1509 static int
1510 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1511 {
1512 	int num = 0;
1513 	struct spdk_nvmf_async_event_completion *event;
1514 
1515 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1516 		num++;
1517 	}
1518 	return num;
1519 }
1520 
1521 static void
1522 test_reservation_notification_log_page(void)
1523 {
1524 	struct spdk_nvmf_ctrlr ctrlr;
1525 	struct spdk_nvmf_qpair qpair;
1526 	struct spdk_nvmf_ns ns;
1527 	struct spdk_nvmf_request req = {};
1528 	union nvmf_h2c_msg cmd = {};
1529 	union nvmf_c2h_msg rsp = {};
1530 	union spdk_nvme_async_event_completion event = {};
1531 	struct spdk_nvme_reservation_notification_log logs[3];
1532 	struct iovec iov;
1533 
1534 	memset(&ctrlr, 0, sizeof(ctrlr));
1535 	ctrlr.thread = spdk_get_thread();
1536 	TAILQ_INIT(&ctrlr.log_head);
1537 	init_pending_async_events(&ctrlr);
1538 	ns.nsid = 1;
1539 
1540 	/* Test Case: Mask all the reservation notifications */
1541 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1542 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1543 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1544 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1545 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1546 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1547 					  SPDK_NVME_RESERVATION_RELEASED);
1548 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1549 					  SPDK_NVME_RESERVATION_PREEMPTED);
1550 	poll_threads();
1551 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1552 
1553 	/* Test Case: Unmask all the reservation notifications,
1554 	 * 3 log pages are generated, and AER was triggered.
1555 	 */
1556 	ns.mask = 0;
1557 	ctrlr.num_avail_log_pages = 0;
1558 	req.cmd = &cmd;
1559 	req.rsp = &rsp;
1560 	ctrlr.aer_req[0] = &req;
1561 	ctrlr.nr_aer_reqs = 1;
1562 	req.qpair = &qpair;
1563 	TAILQ_INIT(&qpair.outstanding);
1564 	qpair.ctrlr = NULL;
1565 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1566 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1567 
1568 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1569 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1570 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1571 					  SPDK_NVME_RESERVATION_RELEASED);
1572 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1573 					  SPDK_NVME_RESERVATION_PREEMPTED);
1574 	poll_threads();
1575 	event.raw = rsp.nvme_cpl.cdw0;
1576 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1577 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1578 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1579 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1580 
1581 	/* Test Case: Get Log Page to clear the log pages */
1582 	iov.iov_base = &logs[0];
1583 	iov.iov_len = sizeof(logs);
1584 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1585 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1586 
1587 	cleanup_pending_async_events(&ctrlr);
1588 }
1589 
1590 static void
1591 test_get_dif_ctx(void)
1592 {
1593 	struct spdk_nvmf_subsystem subsystem = {};
1594 	struct spdk_nvmf_request req = {};
1595 	struct spdk_nvmf_qpair qpair = {};
1596 	struct spdk_nvmf_ctrlr ctrlr = {};
1597 	struct spdk_nvmf_ns ns = {};
1598 	struct spdk_nvmf_ns *_ns = NULL;
1599 	struct spdk_bdev bdev = {};
1600 	union nvmf_h2c_msg cmd = {};
1601 	struct spdk_dif_ctx dif_ctx = {};
1602 	bool ret;
1603 
1604 	ctrlr.subsys = &subsystem;
1605 
1606 	qpair.ctrlr = &ctrlr;
1607 
1608 	req.qpair = &qpair;
1609 	req.cmd = &cmd;
1610 
1611 	ns.bdev = &bdev;
1612 
1613 	ctrlr.dif_insert_or_strip = false;
1614 
1615 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1616 	CU_ASSERT(ret == false);
1617 
1618 	ctrlr.dif_insert_or_strip = true;
1619 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1620 
1621 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1622 	CU_ASSERT(ret == false);
1623 
1624 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1625 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1626 
1627 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1628 	CU_ASSERT(ret == false);
1629 
1630 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1631 
1632 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1633 	CU_ASSERT(ret == false);
1634 
1635 	qpair.qid = 1;
1636 
1637 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1638 	CU_ASSERT(ret == false);
1639 
1640 	cmd.nvme_cmd.nsid = 1;
1641 
1642 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1643 	CU_ASSERT(ret == false);
1644 
1645 	subsystem.max_nsid = 1;
1646 	subsystem.ns = &_ns;
1647 	subsystem.ns[0] = &ns;
1648 
1649 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1650 	CU_ASSERT(ret == false);
1651 
1652 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1653 
1654 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1655 	CU_ASSERT(ret == true);
1656 }
1657 
1658 static void
1659 test_identify_ctrlr(void)
1660 {
1661 	struct spdk_nvmf_tgt tgt = {};
1662 	struct spdk_nvmf_subsystem subsystem = {
1663 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1664 		.tgt = &tgt,
1665 	};
1666 	struct spdk_nvmf_transport_ops tops = {};
1667 	struct spdk_nvmf_transport transport = {
1668 		.ops = &tops,
1669 		.opts = {
1670 			.in_capsule_data_size = 4096,
1671 		},
1672 	};
1673 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1674 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1675 	struct spdk_nvme_ctrlr_data cdata = {};
1676 	uint32_t expected_ioccsz;
1677 
1678 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1679 
1680 	/* Check ioccsz, TCP transport */
1681 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1682 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1683 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1684 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1685 
1686 	/* Check ioccsz, RDMA transport */
1687 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1688 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1689 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1690 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1691 
1692 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1693 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1694 	ctrlr.dif_insert_or_strip = true;
1695 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1696 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1697 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1698 }
1699 
1700 static void
1701 test_identify_ctrlr_iocs_specific(void)
1702 {
1703 	struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 };
1704 	struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 };
1705 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop };
1706 	struct spdk_nvme_cmd cmd = {};
1707 	struct spdk_nvme_cpl rsp = {};
1708 	struct spdk_nvme_zns_ctrlr_data ctrlr_data = {};
1709 
1710 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1711 
1712 	/* ZNS max_zone_append_size_kib no limit */
1713 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1714 	memset(&rsp, 0, sizeof(rsp));
1715 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1716 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1717 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1718 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1719 	CU_ASSERT(ctrlr_data.zasl == 0);
1720 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1721 
1722 	/* ZNS max_zone_append_size_kib = 4096 */
1723 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1724 	memset(&rsp, 0, sizeof(rsp));
1725 	subsystem.max_zone_append_size_kib = 4096;
1726 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1727 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1728 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1729 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1730 	CU_ASSERT(ctrlr_data.zasl == 0);
1731 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1732 
1733 	/* ZNS max_zone_append_size_kib = 60000 */
1734 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1735 	memset(&rsp, 0, sizeof(rsp));
1736 	subsystem.max_zone_append_size_kib = 60000;
1737 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1738 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1739 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1740 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1741 	CU_ASSERT(ctrlr_data.zasl == 3);
1742 	ctrlr_data.zasl = 0;
1743 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1744 
1745 	/* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */
1746 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1747 	memset(&rsp, 0, sizeof(rsp));
1748 	ctrlr.vcprop.cap.bits.mpsmin = 2;
1749 	subsystem.max_zone_append_size_kib = 60000;
1750 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1751 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1752 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1753 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1754 	CU_ASSERT(ctrlr_data.zasl == 1);
1755 	ctrlr_data.zasl = 0;
1756 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1757 	ctrlr.vcprop.cap.bits.mpsmin = 0;
1758 
1759 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1760 
1761 	/* NVM */
1762 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1763 	memset(&rsp, 0, sizeof(rsp));
1764 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1765 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1766 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1767 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1768 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1769 }
1770 
1771 static int
1772 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1773 {
1774 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1775 
1776 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1777 };
1778 
1779 static void
1780 test_custom_admin_cmd(void)
1781 {
1782 	struct spdk_nvmf_subsystem subsystem;
1783 	struct spdk_nvmf_qpair qpair;
1784 	struct spdk_nvmf_ctrlr ctrlr;
1785 	struct spdk_nvmf_request req;
1786 	struct spdk_nvmf_ns *ns_ptrs[1];
1787 	struct spdk_nvmf_ns ns;
1788 	union nvmf_h2c_msg cmd;
1789 	union nvmf_c2h_msg rsp;
1790 	struct spdk_bdev bdev;
1791 	uint8_t buf[4096];
1792 	int rc;
1793 
1794 	memset(&subsystem, 0, sizeof(subsystem));
1795 	ns_ptrs[0] = &ns;
1796 	subsystem.ns = ns_ptrs;
1797 	subsystem.max_nsid = 1;
1798 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1799 
1800 	memset(&ns, 0, sizeof(ns));
1801 	ns.opts.nsid = 1;
1802 	ns.bdev = &bdev;
1803 
1804 	memset(&qpair, 0, sizeof(qpair));
1805 	qpair.ctrlr = &ctrlr;
1806 
1807 	memset(&ctrlr, 0, sizeof(ctrlr));
1808 	ctrlr.subsys = &subsystem;
1809 	ctrlr.vcprop.cc.bits.en = 1;
1810 	ctrlr.thread = spdk_get_thread();
1811 
1812 	memset(&req, 0, sizeof(req));
1813 	req.qpair = &qpair;
1814 	req.cmd = &cmd;
1815 	req.rsp = &rsp;
1816 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1817 	req.length = sizeof(buf);
1818 	spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length);
1819 
1820 	memset(&cmd, 0, sizeof(cmd));
1821 	cmd.nvme_cmd.opc = 0xc1;
1822 	cmd.nvme_cmd.nsid = 0;
1823 	memset(&rsp, 0, sizeof(rsp));
1824 
1825 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1826 
1827 	/* Ensure that our hdlr is being called */
1828 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1829 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1830 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1831 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1832 }
1833 
1834 static void
1835 test_fused_compare_and_write(void)
1836 {
1837 	struct spdk_nvmf_request req = {};
1838 	struct spdk_nvmf_qpair qpair = {};
1839 	struct spdk_nvme_cmd cmd = {};
1840 	union nvmf_c2h_msg rsp = {};
1841 	struct spdk_nvmf_ctrlr ctrlr = {};
1842 	struct spdk_nvmf_subsystem subsystem = {};
1843 	struct spdk_nvmf_ns ns = {};
1844 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1845 	enum spdk_nvme_ana_state ana_state[1];
1846 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1847 	struct spdk_bdev bdev = {};
1848 
1849 	struct spdk_nvmf_poll_group group = {};
1850 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1851 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1852 	struct spdk_io_channel io_ch = {};
1853 
1854 	ns.bdev = &bdev;
1855 	ns.anagrpid = 1;
1856 
1857 	subsystem.id = 0;
1858 	subsystem.max_nsid = 1;
1859 	subsys_ns[0] = &ns;
1860 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1861 
1862 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1863 
1864 	/* Enable controller */
1865 	ctrlr.vcprop.cc.bits.en = 1;
1866 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1867 	ctrlr.listener = &listener;
1868 
1869 	group.num_sgroups = 1;
1870 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1871 	sgroups.num_ns = 1;
1872 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1873 	ns_info.channel = &io_ch;
1874 	sgroups.ns_info = &ns_info;
1875 	TAILQ_INIT(&sgroups.queued);
1876 	group.sgroups = &sgroups;
1877 	TAILQ_INIT(&qpair.outstanding);
1878 
1879 	qpair.ctrlr = &ctrlr;
1880 	qpair.group = &group;
1881 	qpair.qid = 1;
1882 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1883 
1884 	cmd.nsid = 1;
1885 
1886 	req.qpair = &qpair;
1887 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1888 	req.rsp = &rsp;
1889 
1890 	/* SUCCESS/SUCCESS */
1891 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1892 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1893 
1894 	spdk_nvmf_request_exec(&req);
1895 	CU_ASSERT(qpair.first_fused_req != NULL);
1896 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1897 
1898 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1899 	cmd.opc = SPDK_NVME_OPC_WRITE;
1900 
1901 	spdk_nvmf_request_exec(&req);
1902 	CU_ASSERT(qpair.first_fused_req == NULL);
1903 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1904 
1905 	/* Wrong sequence */
1906 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1907 	cmd.opc = SPDK_NVME_OPC_WRITE;
1908 
1909 	spdk_nvmf_request_exec(&req);
1910 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1911 	CU_ASSERT(qpair.first_fused_req == NULL);
1912 
1913 	/* Write as FUSE_FIRST (Wrong op code) */
1914 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1915 	cmd.opc = SPDK_NVME_OPC_WRITE;
1916 
1917 	spdk_nvmf_request_exec(&req);
1918 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1919 	CU_ASSERT(qpair.first_fused_req == NULL);
1920 
1921 	/* Compare as FUSE_SECOND (Wrong op code) */
1922 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1923 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1924 
1925 	spdk_nvmf_request_exec(&req);
1926 	CU_ASSERT(qpair.first_fused_req != NULL);
1927 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1928 
1929 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1930 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1931 
1932 	spdk_nvmf_request_exec(&req);
1933 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1934 	CU_ASSERT(qpair.first_fused_req == NULL);
1935 }
1936 
1937 static void
1938 test_multi_async_event_reqs(void)
1939 {
1940 	struct spdk_nvmf_subsystem subsystem = {};
1941 	struct spdk_nvmf_qpair qpair = {};
1942 	struct spdk_nvmf_ctrlr ctrlr = {};
1943 	struct spdk_nvmf_request req[5] = {};
1944 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1945 	struct spdk_nvmf_ns ns = {};
1946 	union nvmf_h2c_msg cmd[5] = {};
1947 	union nvmf_c2h_msg rsp[5] = {};
1948 
1949 	struct spdk_nvmf_poll_group group = {};
1950 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1951 
1952 	int i;
1953 
1954 	ns_ptrs[0] = &ns;
1955 	subsystem.ns = ns_ptrs;
1956 	subsystem.max_nsid = 1;
1957 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1958 
1959 	ns.opts.nsid = 1;
1960 	group.sgroups = &sgroups;
1961 
1962 	qpair.ctrlr = &ctrlr;
1963 	qpair.group = &group;
1964 	TAILQ_INIT(&qpair.outstanding);
1965 
1966 	ctrlr.subsys = &subsystem;
1967 	ctrlr.vcprop.cc.bits.en = 1;
1968 	ctrlr.thread = spdk_get_thread();
1969 
1970 	for (i = 0; i < 5; i++) {
1971 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1972 		cmd[i].nvme_cmd.nsid = 1;
1973 		cmd[i].nvme_cmd.cid = i;
1974 
1975 		req[i].qpair = &qpair;
1976 		req[i].cmd = &cmd[i];
1977 		req[i].rsp = &rsp[i];
1978 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1979 	}
1980 
1981 	/* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */
1982 	sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS;
1983 	for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
1984 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1985 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
1986 	}
1987 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1988 
1989 	/* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */
1990 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1991 	CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS);
1992 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
1993 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1994 
1995 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
1996 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
1997 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1998 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1999 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
2000 
2001 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
2002 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2003 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2004 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
2005 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
2006 
2007 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
2008 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
2009 }
2010 
2011 static void
2012 test_get_ana_log_page_one_ns_per_anagrp(void)
2013 {
2014 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
2015 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
2016 	uint32_t ana_group[3];
2017 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
2018 	struct spdk_nvmf_ctrlr ctrlr = {};
2019 	enum spdk_nvme_ana_state ana_state[3];
2020 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2021 	struct spdk_nvmf_ns ns[3];
2022 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
2023 	uint64_t offset;
2024 	uint32_t length;
2025 	int i;
2026 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2027 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2028 	struct iovec iov, iovs[2];
2029 	struct spdk_nvme_ana_page *ana_hdr;
2030 	char _ana_desc[UT_ANA_DESC_SIZE];
2031 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2032 
2033 	subsystem.ns = ns_arr;
2034 	subsystem.max_nsid = 3;
2035 	for (i = 0; i < 3; i++) {
2036 		subsystem.ana_group[i] = 1;
2037 	}
2038 	ctrlr.subsys = &subsystem;
2039 	ctrlr.listener = &listener;
2040 
2041 	for (i = 0; i < 3; i++) {
2042 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2043 	}
2044 
2045 	for (i = 0; i < 3; i++) {
2046 		ns_arr[i]->nsid = i + 1;
2047 		ns_arr[i]->anagrpid = i + 1;
2048 	}
2049 
2050 	/* create expected page */
2051 	ana_hdr = (void *)&expected_page[0];
2052 	ana_hdr->num_ana_group_desc = 3;
2053 	ana_hdr->change_count = 0;
2054 
2055 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2056 	ana_desc = (void *)_ana_desc;
2057 	offset = sizeof(struct spdk_nvme_ana_page);
2058 
2059 	for (i = 0; i < 3; i++) {
2060 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
2061 		ana_desc->ana_group_id = ns_arr[i]->nsid;
2062 		ana_desc->num_of_nsid = 1;
2063 		ana_desc->change_count = 0;
2064 		ana_desc->ana_state = ctrlr.listener->ana_state[i];
2065 		ana_desc->nsid[0] = ns_arr[i]->nsid;
2066 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
2067 		offset += UT_ANA_DESC_SIZE;
2068 	}
2069 
2070 	/* read entire actual log page */
2071 	offset = 0;
2072 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2073 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2074 		iov.iov_base = &actual_page[offset];
2075 		iov.iov_len = length;
2076 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2077 		offset += length;
2078 	}
2079 
2080 	/* compare expected page and actual page */
2081 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2082 
2083 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2084 	offset = 0;
2085 	iovs[0].iov_base = &actual_page[offset];
2086 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2087 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
2088 	iovs[1].iov_base = &actual_page[offset];
2089 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
2090 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2091 
2092 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2093 
2094 #undef UT_ANA_DESC_SIZE
2095 #undef UT_ANA_LOG_PAGE_SIZE
2096 }
2097 
2098 static void
2099 test_get_ana_log_page_multi_ns_per_anagrp(void)
2100 {
2101 #define UT_ANA_LOG_PAGE_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
2102 				 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2103 				 sizeof(uint32_t) * 5)
2104 	struct spdk_nvmf_ns ns[5];
2105 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
2106 	uint32_t ana_group[5] = {0};
2107 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
2108 	enum spdk_nvme_ana_state ana_state[5];
2109 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
2110 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
2111 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2112 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
2113 	struct iovec iov, iovs[2];
2114 	struct spdk_nvme_ana_page *ana_hdr;
2115 	char _ana_desc[UT_ANA_LOG_PAGE_SIZE];
2116 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2117 	uint64_t offset;
2118 	uint32_t length;
2119 	int i;
2120 
2121 	subsystem.max_nsid = 5;
2122 	subsystem.ana_group[1] = 3;
2123 	subsystem.ana_group[2] = 2;
2124 	for (i = 0; i < 5; i++) {
2125 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2126 	}
2127 
2128 	for (i = 0; i < 5; i++) {
2129 		ns_arr[i]->nsid = i + 1;
2130 	}
2131 	ns_arr[0]->anagrpid = 2;
2132 	ns_arr[1]->anagrpid = 3;
2133 	ns_arr[2]->anagrpid = 2;
2134 	ns_arr[3]->anagrpid = 3;
2135 	ns_arr[4]->anagrpid = 2;
2136 
2137 	/* create expected page */
2138 	ana_hdr = (void *)&expected_page[0];
2139 	ana_hdr->num_ana_group_desc = 2;
2140 	ana_hdr->change_count = 0;
2141 
2142 	/* descriptor may be unaligned. So create data and then copy it to the location. */
2143 	ana_desc = (void *)_ana_desc;
2144 	offset = sizeof(struct spdk_nvme_ana_page);
2145 
2146 	memset(_ana_desc, 0, sizeof(_ana_desc));
2147 	ana_desc->ana_group_id = 2;
2148 	ana_desc->num_of_nsid = 3;
2149 	ana_desc->change_count = 0;
2150 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2151 	ana_desc->nsid[0] = 1;
2152 	ana_desc->nsid[1] = 3;
2153 	ana_desc->nsid[2] = 5;
2154 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2155 	       sizeof(uint32_t) * 3);
2156 	offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3;
2157 
2158 	memset(_ana_desc, 0, sizeof(_ana_desc));
2159 	ana_desc->ana_group_id = 3;
2160 	ana_desc->num_of_nsid = 2;
2161 	ana_desc->change_count = 0;
2162 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2163 	ana_desc->nsid[0] = 2;
2164 	ana_desc->nsid[1] = 4;
2165 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2166 	       sizeof(uint32_t) * 2);
2167 
2168 	/* read entire actual log page, and compare expected page and actual page. */
2169 	offset = 0;
2170 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
2171 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
2172 		iov.iov_base = &actual_page[offset];
2173 		iov.iov_len = length;
2174 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
2175 		offset += length;
2176 	}
2177 
2178 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2179 
2180 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
2181 	offset = 0;
2182 	iovs[0].iov_base = &actual_page[offset];
2183 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2184 	offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
2185 	iovs[1].iov_base = &actual_page[offset];
2186 	iovs[1].iov_len = sizeof(uint32_t) * 5;
2187 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
2188 
2189 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
2190 
2191 #undef UT_ANA_LOG_PAGE_SIZE
2192 }
2193 static void
2194 test_multi_async_events(void)
2195 {
2196 	struct spdk_nvmf_subsystem subsystem = {};
2197 	struct spdk_nvmf_qpair qpair = {};
2198 	struct spdk_nvmf_ctrlr ctrlr = {};
2199 	struct spdk_nvmf_request req[4] = {};
2200 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2201 	struct spdk_nvmf_ns ns = {};
2202 	union nvmf_h2c_msg cmd[4] = {};
2203 	union nvmf_c2h_msg rsp[4] = {};
2204 	union spdk_nvme_async_event_completion event = {};
2205 	struct spdk_nvmf_poll_group group = {};
2206 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2207 	int i;
2208 
2209 	ns_ptrs[0] = &ns;
2210 	subsystem.ns = ns_ptrs;
2211 	subsystem.max_nsid = 1;
2212 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2213 
2214 	ns.opts.nsid = 1;
2215 	group.sgroups = &sgroups;
2216 
2217 	qpair.ctrlr = &ctrlr;
2218 	qpair.group = &group;
2219 	TAILQ_INIT(&qpair.outstanding);
2220 
2221 	ctrlr.subsys = &subsystem;
2222 	ctrlr.vcprop.cc.bits.en = 1;
2223 	ctrlr.thread = spdk_get_thread();
2224 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2225 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2226 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2227 	init_pending_async_events(&ctrlr);
2228 
2229 	/* Target queue pending events when there is no outstanding AER request */
2230 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2231 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2232 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2233 
2234 	for (i = 0; i < 4; i++) {
2235 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2236 		cmd[i].nvme_cmd.nsid = 1;
2237 		cmd[i].nvme_cmd.cid = i;
2238 
2239 		req[i].qpair = &qpair;
2240 		req[i].cmd = &cmd[i];
2241 		req[i].rsp = &rsp[i];
2242 
2243 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2244 
2245 		sgroups.mgmt_io_outstanding = 1;
2246 		if (i < 3) {
2247 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2248 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2249 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2250 		} else {
2251 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2252 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2253 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2254 		}
2255 	}
2256 
2257 	event.raw = rsp[0].nvme_cpl.cdw0;
2258 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2259 	event.raw = rsp[1].nvme_cpl.cdw0;
2260 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2261 	event.raw = rsp[2].nvme_cpl.cdw0;
2262 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2263 
2264 	cleanup_pending_async_events(&ctrlr);
2265 }
2266 
2267 static void
2268 test_rae(void)
2269 {
2270 	struct spdk_nvmf_subsystem subsystem = {};
2271 	struct spdk_nvmf_qpair qpair = {};
2272 	struct spdk_nvmf_ctrlr ctrlr = {};
2273 	struct spdk_nvmf_request req[3] = {};
2274 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2275 	struct spdk_nvmf_ns ns = {};
2276 	union nvmf_h2c_msg cmd[3] = {};
2277 	union nvmf_c2h_msg rsp[3] = {};
2278 	union spdk_nvme_async_event_completion event = {};
2279 	struct spdk_nvmf_poll_group group = {};
2280 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2281 	int i;
2282 	char data[4096];
2283 
2284 	ns_ptrs[0] = &ns;
2285 	subsystem.ns = ns_ptrs;
2286 	subsystem.max_nsid = 1;
2287 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2288 
2289 	ns.opts.nsid = 1;
2290 	group.sgroups = &sgroups;
2291 
2292 	qpair.ctrlr = &ctrlr;
2293 	qpair.group = &group;
2294 	TAILQ_INIT(&qpair.outstanding);
2295 
2296 	ctrlr.subsys = &subsystem;
2297 	ctrlr.vcprop.cc.bits.en = 1;
2298 	ctrlr.thread = spdk_get_thread();
2299 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2300 	init_pending_async_events(&ctrlr);
2301 
2302 	/* Target queue pending events when there is no outstanding AER request */
2303 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2304 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2305 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2306 	/* only one event will be queued before RAE is clear */
2307 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2308 
2309 	req[0].qpair = &qpair;
2310 	req[0].cmd = &cmd[0];
2311 	req[0].rsp = &rsp[0];
2312 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2313 	cmd[0].nvme_cmd.nsid = 1;
2314 	cmd[0].nvme_cmd.cid = 0;
2315 
2316 	for (i = 1; i < 3; i++) {
2317 		req[i].qpair = &qpair;
2318 		req[i].cmd = &cmd[i];
2319 		req[i].rsp = &rsp[i];
2320 		req[i].length = sizeof(data);
2321 		spdk_iov_one(req[i].iov, &req[i].iovcnt, &data, req[i].length);
2322 
2323 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2324 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2325 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2326 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2327 			spdk_nvme_bytes_to_numd(req[i].length);
2328 		cmd[i].nvme_cmd.cid = i;
2329 	}
2330 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2331 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2332 
2333 	/* consume the pending event */
2334 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2335 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2336 	event.raw = rsp[0].nvme_cpl.cdw0;
2337 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2338 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2339 
2340 	/* get log with RAE set */
2341 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2342 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2343 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2344 
2345 	/* will not generate new event until RAE is clear */
2346 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2347 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2348 
2349 	/* get log with RAE clear */
2350 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2351 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2352 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2353 
2354 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2355 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2356 
2357 	cleanup_pending_async_events(&ctrlr);
2358 }
2359 
2360 static void
2361 test_nvmf_ctrlr_create_destruct(void)
2362 {
2363 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2364 	struct spdk_nvmf_poll_group group = {};
2365 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2366 	struct spdk_nvmf_transport transport = {};
2367 	struct spdk_nvmf_transport_ops tops = {};
2368 	struct spdk_nvmf_subsystem subsystem = {};
2369 	struct spdk_nvmf_request req = {};
2370 	struct spdk_nvmf_qpair qpair = {};
2371 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2372 	struct spdk_nvmf_tgt tgt = {};
2373 	union nvmf_h2c_msg cmd = {};
2374 	union nvmf_c2h_msg rsp = {};
2375 	const uint8_t hostid[16] = {
2376 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2377 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2378 	};
2379 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2380 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2381 
2382 	group.thread = spdk_get_thread();
2383 	transport.ops = &tops;
2384 	transport.opts.max_aq_depth = 32;
2385 	transport.opts.max_queue_depth = 64;
2386 	transport.opts.max_qpairs_per_ctrlr = 3;
2387 	transport.opts.dif_insert_or_strip = true;
2388 	transport.tgt = &tgt;
2389 	qpair.transport = &transport;
2390 	qpair.group = &group;
2391 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2392 	TAILQ_INIT(&qpair.outstanding);
2393 
2394 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2395 	connect_data.cntlid = 0xFFFF;
2396 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2397 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2398 
2399 	subsystem.thread = spdk_get_thread();
2400 	subsystem.id = 1;
2401 	TAILQ_INIT(&subsystem.ctrlrs);
2402 	subsystem.tgt = &tgt;
2403 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2404 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2405 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2406 
2407 	group.sgroups = sgroups;
2408 
2409 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2410 	cmd.connect_cmd.cid = 1;
2411 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2412 	cmd.connect_cmd.recfmt = 0;
2413 	cmd.connect_cmd.qid = 0;
2414 	cmd.connect_cmd.sqsize = 31;
2415 	cmd.connect_cmd.cattr = 0;
2416 	cmd.connect_cmd.kato = 120000;
2417 
2418 	req.qpair = &qpair;
2419 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2420 	req.length = sizeof(connect_data);
2421 	spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length);
2422 	req.cmd = &cmd;
2423 	req.rsp = &rsp;
2424 
2425 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2426 	sgroups[subsystem.id].mgmt_io_outstanding++;
2427 
2428 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base);
2429 	poll_threads();
2430 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2431 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2432 	CU_ASSERT(ctrlr->subsys == &subsystem);
2433 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2434 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2435 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2436 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2437 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2438 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2439 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2440 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2441 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2442 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2443 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2444 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2445 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2446 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2447 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2448 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2449 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2450 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2451 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2452 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2453 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2454 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2455 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2456 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2457 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2458 
2459 	ctrlr->in_destruct = true;
2460 	nvmf_ctrlr_destruct(ctrlr);
2461 	poll_threads();
2462 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2463 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2464 }
2465 
2466 static void
2467 test_nvmf_ctrlr_use_zcopy(void)
2468 {
2469 	struct spdk_nvmf_subsystem subsystem = {};
2470 	struct spdk_nvmf_transport transport = {};
2471 	struct spdk_nvmf_request req = {};
2472 	struct spdk_nvmf_qpair qpair = {};
2473 	struct spdk_nvmf_ctrlr ctrlr = {};
2474 	union nvmf_h2c_msg cmd = {};
2475 	struct spdk_nvmf_ns ns = {};
2476 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2477 	struct spdk_bdev bdev = {};
2478 	struct spdk_nvmf_poll_group group = {};
2479 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2480 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2481 	struct spdk_io_channel io_ch = {};
2482 	int opc;
2483 
2484 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2485 	ns.bdev = &bdev;
2486 
2487 	subsystem.id = 0;
2488 	subsystem.max_nsid = 1;
2489 	subsys_ns[0] = &ns;
2490 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2491 
2492 	ctrlr.subsys = &subsystem;
2493 
2494 	transport.opts.zcopy = true;
2495 
2496 	qpair.ctrlr = &ctrlr;
2497 	qpair.group = &group;
2498 	qpair.qid = 1;
2499 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2500 	qpair.transport = &transport;
2501 
2502 	group.thread = spdk_get_thread();
2503 	group.num_sgroups = 1;
2504 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2505 	sgroups.num_ns = 1;
2506 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2507 	ns_info.channel = &io_ch;
2508 	sgroups.ns_info = &ns_info;
2509 	TAILQ_INIT(&sgroups.queued);
2510 	group.sgroups = &sgroups;
2511 	TAILQ_INIT(&qpair.outstanding);
2512 
2513 	req.qpair = &qpair;
2514 	req.cmd = &cmd;
2515 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2516 
2517 	/* Admin queue */
2518 	qpair.qid = 0;
2519 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2520 	qpair.qid = 1;
2521 
2522 	/* Invalid Opcodes */
2523 	for (opc = 0; opc <= 255; opc++) {
2524 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2525 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2526 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2527 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2528 		}
2529 	}
2530 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2531 
2532 	/* Fused WRITE */
2533 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2534 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2535 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2536 
2537 	/* Non bdev */
2538 	cmd.nvme_cmd.nsid = 4;
2539 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2540 	cmd.nvme_cmd.nsid = 1;
2541 
2542 	/* ZCOPY Not supported */
2543 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2544 	ns.zcopy = true;
2545 
2546 	/* ZCOPY disabled on transport level */
2547 	transport.opts.zcopy = false;
2548 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2549 	transport.opts.zcopy = true;
2550 
2551 	/* Success */
2552 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2553 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2554 }
2555 
2556 static void
2557 qpair_state_change_done(void *cb_arg, int status)
2558 {
2559 }
2560 
2561 static void
2562 test_spdk_nvmf_request_zcopy_start(void)
2563 {
2564 	struct spdk_nvmf_request req = {};
2565 	struct spdk_nvmf_qpair qpair = {};
2566 	struct spdk_nvmf_transport transport = {};
2567 	struct spdk_nvme_cmd cmd = {};
2568 	union nvmf_c2h_msg rsp = {};
2569 	struct spdk_nvmf_ctrlr ctrlr = {};
2570 	struct spdk_nvmf_subsystem subsystem = {};
2571 	struct spdk_nvmf_ns ns = {};
2572 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2573 	enum spdk_nvme_ana_state ana_state[1];
2574 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2575 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2576 
2577 	struct spdk_nvmf_poll_group group = {};
2578 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2579 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2580 	struct spdk_io_channel io_ch = {};
2581 
2582 	ns.bdev = &bdev;
2583 	ns.zcopy = true;
2584 	ns.anagrpid = 1;
2585 
2586 	subsystem.id = 0;
2587 	subsystem.max_nsid = 1;
2588 	subsys_ns[0] = &ns;
2589 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2590 
2591 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2592 
2593 	/* Enable controller */
2594 	ctrlr.vcprop.cc.bits.en = 1;
2595 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2596 	ctrlr.listener = &listener;
2597 
2598 	transport.opts.zcopy = true;
2599 
2600 	group.thread = spdk_get_thread();
2601 	group.num_sgroups = 1;
2602 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2603 	sgroups.num_ns = 1;
2604 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2605 	ns_info.channel = &io_ch;
2606 	sgroups.ns_info = &ns_info;
2607 	TAILQ_INIT(&sgroups.queued);
2608 	group.sgroups = &sgroups;
2609 	TAILQ_INIT(&qpair.outstanding);
2610 
2611 	qpair.ctrlr = &ctrlr;
2612 	qpair.group = &group;
2613 	qpair.transport = &transport;
2614 	qpair.qid = 1;
2615 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2616 
2617 	cmd.nsid = 1;
2618 
2619 	req.qpair = &qpair;
2620 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2621 	req.rsp = &rsp;
2622 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2623 	cmd.opc = SPDK_NVME_OPC_READ;
2624 
2625 	/* Fail because no controller */
2626 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2627 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2628 	qpair.ctrlr = NULL;
2629 	spdk_nvmf_request_zcopy_start(&req);
2630 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2631 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2632 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2633 	qpair.ctrlr = &ctrlr;
2634 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2635 
2636 	/* Fail because bad NSID */
2637 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2638 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2639 	cmd.nsid = 0;
2640 	spdk_nvmf_request_zcopy_start(&req);
2641 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2642 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2643 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2644 	cmd.nsid = 1;
2645 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2646 
2647 	/* Fail because bad Channel */
2648 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2649 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2650 	ns_info.channel = NULL;
2651 	spdk_nvmf_request_zcopy_start(&req);
2652 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2653 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2654 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2655 	ns_info.channel = &io_ch;
2656 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2657 
2658 	/* Queue the requet because NSID is not active */
2659 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2660 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2661 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2662 	spdk_nvmf_request_zcopy_start(&req);
2663 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2664 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2665 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2666 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2667 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2668 
2669 	/* Fail because QPair is not active */
2670 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2671 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2672 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2673 	qpair.state_cb = qpair_state_change_done;
2674 	spdk_nvmf_request_zcopy_start(&req);
2675 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2676 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2677 	qpair.state_cb = NULL;
2678 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2679 
2680 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2681 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2682 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2683 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2684 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2685 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2686 	spdk_nvmf_request_zcopy_start(&req);
2687 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2688 	cmd.cdw10 = 0;
2689 	cmd.cdw12 = 0;
2690 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2691 
2692 	/* Success */
2693 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2694 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2695 	spdk_nvmf_request_zcopy_start(&req);
2696 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2697 }
2698 
2699 static void
2700 test_zcopy_read(void)
2701 {
2702 	struct spdk_nvmf_request req = {};
2703 	struct spdk_nvmf_qpair qpair = {};
2704 	struct spdk_nvmf_transport transport = {};
2705 	struct spdk_nvme_cmd cmd = {};
2706 	union nvmf_c2h_msg rsp = {};
2707 	struct spdk_nvmf_ctrlr ctrlr = {};
2708 	struct spdk_nvmf_subsystem subsystem = {};
2709 	struct spdk_nvmf_ns ns = {};
2710 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2711 	enum spdk_nvme_ana_state ana_state[1];
2712 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2713 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2714 
2715 	struct spdk_nvmf_poll_group group = {};
2716 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2717 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2718 	struct spdk_io_channel io_ch = {};
2719 
2720 	ns.bdev = &bdev;
2721 	ns.zcopy = true;
2722 	ns.anagrpid = 1;
2723 
2724 	subsystem.id = 0;
2725 	subsystem.max_nsid = 1;
2726 	subsys_ns[0] = &ns;
2727 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2728 
2729 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2730 
2731 	/* Enable controller */
2732 	ctrlr.vcprop.cc.bits.en = 1;
2733 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2734 	ctrlr.listener = &listener;
2735 
2736 	transport.opts.zcopy = true;
2737 
2738 	group.thread = spdk_get_thread();
2739 	group.num_sgroups = 1;
2740 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2741 	sgroups.num_ns = 1;
2742 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2743 	ns_info.channel = &io_ch;
2744 	sgroups.ns_info = &ns_info;
2745 	TAILQ_INIT(&sgroups.queued);
2746 	group.sgroups = &sgroups;
2747 	TAILQ_INIT(&qpair.outstanding);
2748 
2749 	qpair.ctrlr = &ctrlr;
2750 	qpair.group = &group;
2751 	qpair.transport = &transport;
2752 	qpair.qid = 1;
2753 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2754 
2755 	cmd.nsid = 1;
2756 
2757 	req.qpair = &qpair;
2758 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2759 	req.rsp = &rsp;
2760 	cmd.opc = SPDK_NVME_OPC_READ;
2761 
2762 	/* Prepare for zcopy */
2763 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2764 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2765 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2766 	CU_ASSERT(ns_info.io_outstanding == 0);
2767 
2768 	/* Perform the zcopy start */
2769 	spdk_nvmf_request_zcopy_start(&req);
2770 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2771 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2772 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2773 	CU_ASSERT(ns_info.io_outstanding == 1);
2774 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2775 
2776 	/* Perform the zcopy end */
2777 	spdk_nvmf_request_zcopy_end(&req, false);
2778 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2779 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2780 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2781 	CU_ASSERT(ns_info.io_outstanding == 0);
2782 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2783 }
2784 
2785 static void
2786 test_zcopy_write(void)
2787 {
2788 	struct spdk_nvmf_request req = {};
2789 	struct spdk_nvmf_qpair qpair = {};
2790 	struct spdk_nvmf_transport transport = {};
2791 	struct spdk_nvme_cmd cmd = {};
2792 	union nvmf_c2h_msg rsp = {};
2793 	struct spdk_nvmf_ctrlr ctrlr = {};
2794 	struct spdk_nvmf_subsystem subsystem = {};
2795 	struct spdk_nvmf_ns ns = {};
2796 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2797 	enum spdk_nvme_ana_state ana_state[1];
2798 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2799 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2800 
2801 	struct spdk_nvmf_poll_group group = {};
2802 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2803 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2804 	struct spdk_io_channel io_ch = {};
2805 
2806 	ns.bdev = &bdev;
2807 	ns.zcopy = true;
2808 	ns.anagrpid = 1;
2809 
2810 	subsystem.id = 0;
2811 	subsystem.max_nsid = 1;
2812 	subsys_ns[0] = &ns;
2813 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2814 
2815 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2816 
2817 	/* Enable controller */
2818 	ctrlr.vcprop.cc.bits.en = 1;
2819 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2820 	ctrlr.listener = &listener;
2821 
2822 	transport.opts.zcopy = true;
2823 
2824 	group.thread = spdk_get_thread();
2825 	group.num_sgroups = 1;
2826 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2827 	sgroups.num_ns = 1;
2828 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2829 	ns_info.channel = &io_ch;
2830 	sgroups.ns_info = &ns_info;
2831 	TAILQ_INIT(&sgroups.queued);
2832 	group.sgroups = &sgroups;
2833 	TAILQ_INIT(&qpair.outstanding);
2834 
2835 	qpair.ctrlr = &ctrlr;
2836 	qpair.group = &group;
2837 	qpair.transport = &transport;
2838 	qpair.qid = 1;
2839 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2840 
2841 	cmd.nsid = 1;
2842 
2843 	req.qpair = &qpair;
2844 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2845 	req.rsp = &rsp;
2846 	cmd.opc = SPDK_NVME_OPC_WRITE;
2847 
2848 	/* Prepare for zcopy */
2849 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2850 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2851 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2852 	CU_ASSERT(ns_info.io_outstanding == 0);
2853 
2854 	/* Perform the zcopy start */
2855 	spdk_nvmf_request_zcopy_start(&req);
2856 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2857 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2858 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2859 	CU_ASSERT(ns_info.io_outstanding == 1);
2860 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2861 
2862 	/* Perform the zcopy end */
2863 	spdk_nvmf_request_zcopy_end(&req, true);
2864 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2865 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2866 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2867 	CU_ASSERT(ns_info.io_outstanding == 0);
2868 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2869 }
2870 
2871 static void
2872 test_nvmf_property_set(void)
2873 {
2874 	int rc;
2875 	struct spdk_nvmf_request req = {};
2876 	struct spdk_nvmf_qpair qpair = {};
2877 	struct spdk_nvmf_ctrlr ctrlr = {};
2878 	union nvmf_h2c_msg cmd = {};
2879 	union nvmf_c2h_msg rsp = {};
2880 
2881 	req.qpair = &qpair;
2882 	qpair.ctrlr = &ctrlr;
2883 	req.cmd = &cmd;
2884 	req.rsp = &rsp;
2885 
2886 	/* Invalid parameters */
2887 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2888 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2889 
2890 	rc = nvmf_property_set(&req);
2891 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2892 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2893 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2894 
2895 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2896 
2897 	rc = nvmf_property_get(&req);
2898 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2899 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2900 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2901 
2902 	/* Set cc with same property size */
2903 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2904 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2905 
2906 	rc = nvmf_property_set(&req);
2907 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2908 
2909 	/* Emulate cc data */
2910 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2911 
2912 	rc = nvmf_property_get(&req);
2913 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2914 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2915 
2916 	/* Set asq with different property size */
2917 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2918 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2919 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2920 
2921 	rc = nvmf_property_set(&req);
2922 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2923 
2924 	/* Emulate asq data */
2925 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2926 
2927 	rc = nvmf_property_get(&req);
2928 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2929 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2930 }
2931 
2932 static void
2933 test_nvmf_ctrlr_get_features_host_behavior_support(void)
2934 {
2935 	int rc;
2936 	struct spdk_nvmf_request req = {};
2937 	struct spdk_nvmf_qpair qpair = {};
2938 	struct spdk_nvmf_ctrlr ctrlr = {};
2939 	struct spdk_nvme_host_behavior behavior = {};
2940 	union nvmf_h2c_msg cmd = {};
2941 	union nvmf_c2h_msg rsp = {};
2942 
2943 	qpair.ctrlr = &ctrlr;
2944 	req.qpair = &qpair;
2945 	req.cmd = &cmd;
2946 	req.rsp = &rsp;
2947 
2948 	/* Invalid data */
2949 	req.length = sizeof(struct spdk_nvme_host_behavior);
2950 	req.iovcnt = 0;
2951 
2952 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2953 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2954 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2955 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2956 
2957 	/* Wrong structure length */
2958 	req.length = sizeof(struct spdk_nvme_host_behavior) - 1;
2959 	spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length);
2960 
2961 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2962 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2963 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2964 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2965 
2966 	/* Get Features Host Behavior Support Success */
2967 	req.length = sizeof(struct spdk_nvme_host_behavior);
2968 	spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length);
2969 
2970 	ctrlr.acre_enabled = true;
2971 	behavior.acre = false;
2972 
2973 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2974 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2975 	CU_ASSERT(behavior.acre == true);
2976 }
2977 
2978 static void
2979 test_nvmf_ctrlr_set_features_host_behavior_support(void)
2980 {
2981 	int rc;
2982 	struct spdk_nvmf_request req = {};
2983 	struct spdk_nvmf_qpair qpair = {};
2984 	struct spdk_nvmf_ctrlr ctrlr = {};
2985 	struct spdk_nvme_host_behavior host_behavior = {};
2986 	union nvmf_h2c_msg cmd = {};
2987 	union nvmf_c2h_msg rsp = {};
2988 
2989 	qpair.ctrlr = &ctrlr;
2990 	req.qpair = &qpair;
2991 	req.cmd = &cmd;
2992 	req.rsp = &rsp;
2993 	req.iov[0].iov_base = &host_behavior;
2994 	req.iov[0].iov_len = sizeof(host_behavior);
2995 
2996 	/* Invalid iovcnt */
2997 	req.iovcnt = 0;
2998 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2999 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3000 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3001 
3002 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3003 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3004 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3005 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3006 
3007 	/* Invalid iov_len */
3008 	req.iovcnt = 1;
3009 	req.iov[0].iov_len = 0;
3010 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3011 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3012 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3013 
3014 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3015 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3016 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3017 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3018 
3019 	/* acre is false */
3020 	host_behavior.acre = 0;
3021 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3022 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3023 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3024 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3025 
3026 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3027 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3028 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3029 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3030 	CU_ASSERT(ctrlr.acre_enabled == false);
3031 
3032 	/* acre is true */
3033 	host_behavior.acre = 1;
3034 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3035 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3036 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3037 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3038 
3039 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3040 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3041 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3042 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3043 	CU_ASSERT(ctrlr.acre_enabled == true);
3044 
3045 	/* Invalid acre */
3046 	host_behavior.acre = 2;
3047 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3048 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3049 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3050 
3051 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3052 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3053 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3054 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3055 }
3056 
3057 int
3058 main(int argc, char **argv)
3059 {
3060 	CU_pSuite	suite = NULL;
3061 	unsigned int	num_failures;
3062 
3063 	CU_initialize_registry();
3064 
3065 	suite = CU_add_suite("nvmf", NULL, NULL);
3066 	CU_ADD_TEST(suite, test_get_log_page);
3067 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
3068 	CU_ADD_TEST(suite, test_connect);
3069 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
3070 	CU_ADD_TEST(suite, test_identify_ns);
3071 	CU_ADD_TEST(suite, test_identify_ns_iocs_specific);
3072 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
3073 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
3074 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
3075 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
3076 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
3077 	CU_ADD_TEST(suite, test_get_dif_ctx);
3078 	CU_ADD_TEST(suite, test_set_get_features);
3079 	CU_ADD_TEST(suite, test_identify_ctrlr);
3080 	CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific);
3081 	CU_ADD_TEST(suite, test_custom_admin_cmd);
3082 	CU_ADD_TEST(suite, test_fused_compare_and_write);
3083 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
3084 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
3085 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
3086 	CU_ADD_TEST(suite, test_multi_async_events);
3087 	CU_ADD_TEST(suite, test_rae);
3088 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
3089 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
3090 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
3091 	CU_ADD_TEST(suite, test_zcopy_read);
3092 	CU_ADD_TEST(suite, test_zcopy_write);
3093 	CU_ADD_TEST(suite, test_nvmf_property_set);
3094 	CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support);
3095 	CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support);
3096 
3097 	allocate_threads(1);
3098 	set_thread(0);
3099 
3100 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3101 	CU_cleanup_registry();
3102 
3103 	free_threads();
3104 
3105 	return num_failures;
3106 }
3107