xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision cec5ba284b55d19c90359936d77b707e398829f7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/bdev_zone.h"
8 #include "spdk/nvme_spec.h"
9 #include "spdk/stdinc.h"
10 
11 #include "spdk_internal/cunit.h"
12 #include "spdk_internal/mock.h"
13 #include "thread/thread_internal.h"
14 
15 #include "common/lib/ut_multithread.c"
16 #include "nvmf/ctrlr.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 struct spdk_bdev {
21 	int ut_mock;
22 	uint64_t blockcnt;
23 	uint32_t blocklen;
24 	bool zoned;
25 	uint32_t zone_size;
26 	uint32_t max_open_zones;
27 	uint32_t max_active_zones;
28 	enum spdk_dif_type dif_type;
29 };
30 
31 #define MAX_OPEN_ZONES 12
32 #define MAX_ACTIVE_ZONES 34
33 #define ZONE_SIZE 56
34 
35 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
36 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
37 
38 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
39 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
40 		0x8877665544332211UL;
41 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
42 
43 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
44 	    struct spdk_nvmf_subsystem *,
45 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
46 	    NULL);
47 
48 DEFINE_STUB(spdk_nvmf_poll_group_create,
49 	    struct spdk_nvmf_poll_group *,
50 	    (struct spdk_nvmf_tgt *tgt),
51 	    NULL);
52 
53 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
54 	    const char *,
55 	    (const struct spdk_nvmf_subsystem *subsystem),
56 	    subsystem_default_sn);
57 
58 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
59 	    const char *,
60 	    (const struct spdk_nvmf_subsystem *subsystem),
61 	    subsystem_default_mn);
62 
63 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
64 	    bool,
65 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
66 	    true);
67 
68 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
69 	    int,
70 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
71 	    0);
72 
73 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
74 	    struct spdk_nvmf_ctrlr *,
75 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
76 	    NULL);
77 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool,
78 	    (struct spdk_nvmf_subsystem *subsystem), false);
79 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
80 	    bool,
81 	    (struct spdk_nvmf_ctrlr *ctrlr),
82 	    false);
83 
84 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
85 	    bool,
86 	    (struct spdk_nvmf_ctrlr *ctrlr),
87 	    false);
88 
89 DEFINE_STUB(nvmf_ctrlr_copy_supported,
90 	    bool,
91 	    (struct spdk_nvmf_ctrlr *ctrlr),
92 	    false);
93 
94 DEFINE_STUB_V(nvmf_get_discovery_log_page,
95 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
96 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
97 
98 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
99 	    int,
100 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
101 	    0);
102 
103 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
104 	    bool,
105 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
106 	    true);
107 
108 DEFINE_STUB(nvmf_subsystem_find_listener,
109 	    struct spdk_nvmf_subsystem_listener *,
110 	    (struct spdk_nvmf_subsystem *subsystem,
111 	     const struct spdk_nvme_transport_id *trid),
112 	    (void *)0x1);
113 
114 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
115 	    int,
116 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
117 	     struct spdk_nvmf_request *req),
118 	    0);
119 
120 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
121 	    int,
122 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
123 	     struct spdk_nvmf_request *req),
124 	    0);
125 
126 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
127 	    int,
128 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
129 	     struct spdk_nvmf_request *req),
130 	    0);
131 
132 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
133 	    int,
134 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
135 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
136 	    0);
137 
138 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
139 	    int,
140 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
141 	     struct spdk_nvmf_request *req),
142 	    0);
143 
144 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
145 	    int,
146 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
147 	     struct spdk_nvmf_request *req),
148 	    0);
149 
150 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
151 	    int,
152 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
153 	     struct spdk_nvmf_request *req),
154 	    0);
155 
156 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd,
157 	    int,
158 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
159 	     struct spdk_nvmf_request *req),
160 	    0);
161 
162 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
163 	    int,
164 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
165 	     struct spdk_nvmf_request *req),
166 	    0);
167 
168 DEFINE_STUB(nvmf_transport_req_complete,
169 	    int,
170 	    (struct spdk_nvmf_request *req),
171 	    0);
172 
173 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
174 
175 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
176 	    (struct spdk_bdev_desc *desc, struct spdk_nvme_cmd *cmd,
177 	     struct spdk_dif_ctx *dif_ctx),
178 	    true);
179 
180 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
181 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
182 
183 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
184 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
185 
186 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
187 		struct spdk_nvmf_ctrlr *ctrlr));
188 
189 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
190 	    int,
191 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
192 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
193 	    0);
194 
195 DEFINE_STUB(nvmf_transport_req_free,
196 	    int,
197 	    (struct spdk_nvmf_request *req),
198 	    0);
199 
200 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
201 	    int,
202 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
203 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
204 	    0);
205 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
206 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
207 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
208 
209 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev),
210 	    MAX_ACTIVE_ZONES);
211 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES);
212 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE);
213 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
214 
215 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t,
216 	    (const struct spdk_nvme_ns_data *nsdata), 0);
217 
218 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false);
219 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n),
220 	    false);
221 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0);
222 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r),
223 	    SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
224 
225 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
226 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
227 
228 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
229 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
230 
231 void
232 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, enum spdk_nvmf_qpair_state state)
233 {
234 	qpair->state = state;
235 }
236 
237 int
238 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair)
239 {
240 	return 0;
241 }
242 
243 void
244 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
245 			    bool dif_insert_or_strip)
246 {
247 	uint64_t num_blocks;
248 
249 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
250 	num_blocks = ns->bdev->blockcnt;
251 	nsdata->nsze = num_blocks;
252 	nsdata->ncap = num_blocks;
253 	nsdata->nuse = num_blocks;
254 	nsdata->nlbaf = 0;
255 	nsdata->flbas.format = 0;
256 	nsdata->flbas.msb_format = 0;
257 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
258 }
259 
260 void
261 nvmf_bdev_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ns *ns,
262 				  struct spdk_nvme_nvm_ns_data *nsdata_nvm)
263 {
264 	if (ns->bdev->dif_type == SPDK_DIF_DISABLE) {
265 		return;
266 	}
267 
268 	nsdata_nvm->lbstm = 0;
269 	nsdata_nvm->pic._16bpists = 0;
270 	nsdata_nvm->pic._16bpistm = 1;
271 	nsdata_nvm->pic.stcrs = 0;
272 	nsdata_nvm->elbaf[0].sts = 16;
273 	nsdata_nvm->elbaf[0].pif = SPDK_DIF_PI_FORMAT_32;
274 }
275 
276 struct spdk_nvmf_ns *
277 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
278 {
279 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
280 	return subsystem->ns[0];
281 }
282 
283 struct spdk_nvmf_ns *
284 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
285 				struct spdk_nvmf_ns *prev_ns)
286 {
287 	uint32_t nsid;
288 
289 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
290 	nsid = prev_ns->nsid;
291 
292 	if (nsid >= subsystem->max_nsid) {
293 		return NULL;
294 	}
295 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
296 		if (subsystem->ns[nsid - 1]) {
297 			return subsystem->ns[nsid - 1];
298 		}
299 	}
300 	return NULL;
301 }
302 
303 bool
304 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
305 {
306 	return true;
307 }
308 
309 int
310 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
311 			    struct spdk_bdev_desc *desc,
312 			    struct spdk_io_channel *ch,
313 			    struct spdk_nvmf_request *req)
314 {
315 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
316 	uint64_t start_lba;
317 	uint64_t num_blocks;
318 
319 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
320 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
321 
322 	if ((start_lba + num_blocks) > bdev->blockcnt) {
323 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
324 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
325 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
326 	}
327 
328 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
329 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
330 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
331 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
332 	} else {
333 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
334 	}
335 
336 
337 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
338 }
339 
340 void
341 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
342 {
343 	req->zcopy_bdev_io = NULL;
344 	spdk_nvmf_request_complete(req);
345 }
346 
347 bool
348 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns)
349 {
350 	return ns->ptpl_file != NULL;
351 }
352 
353 static void
354 test_get_log_page(void)
355 {
356 	struct spdk_nvmf_subsystem subsystem = {};
357 	struct spdk_nvmf_request req = {};
358 	struct spdk_nvmf_qpair qpair = {};
359 	struct spdk_nvmf_ctrlr ctrlr = {};
360 	union nvmf_h2c_msg cmd = {};
361 	union nvmf_c2h_msg rsp = {};
362 	char data[4096];
363 
364 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
365 
366 	ctrlr.subsys = &subsystem;
367 
368 	qpair.ctrlr = &ctrlr;
369 
370 	req.qpair = &qpair;
371 	req.cmd = &cmd;
372 	req.rsp = &rsp;
373 	req.length = sizeof(data);
374 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &data, req.length);
375 
376 	/* Get Log Page - all valid */
377 	memset(&cmd, 0, sizeof(cmd));
378 	memset(&rsp, 0, sizeof(rsp));
379 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
380 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
381 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
382 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
383 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
384 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
385 
386 	/* Get Log Page with invalid log ID */
387 	memset(&cmd, 0, sizeof(cmd));
388 	memset(&rsp, 0, sizeof(rsp));
389 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
390 	cmd.nvme_cmd.cdw10 = 0;
391 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
392 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
393 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
394 
395 	/* Get Log Page with invalid offset (not dword aligned) */
396 	memset(&cmd, 0, sizeof(cmd));
397 	memset(&rsp, 0, sizeof(rsp));
398 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
399 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
400 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
401 	cmd.nvme_cmd.cdw12 = 2;
402 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
403 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
404 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
405 
406 	/* Get Log Page without data buffer */
407 	memset(&cmd, 0, sizeof(cmd));
408 	memset(&rsp, 0, sizeof(rsp));
409 	req.iovcnt = 0;
410 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
411 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
412 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
413 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
414 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
415 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
416 }
417 
418 static void
419 test_process_fabrics_cmd(void)
420 {
421 	struct	spdk_nvmf_request req = {};
422 	bool	ret;
423 	struct	spdk_nvmf_qpair req_qpair = {};
424 	union	nvmf_h2c_msg  req_cmd = {};
425 	union	nvmf_c2h_msg   req_rsp = {};
426 
427 	TAILQ_INIT(&req_qpair.outstanding);
428 	req_qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
429 	req.qpair = &req_qpair;
430 	req.cmd  = &req_cmd;
431 	req.rsp  = &req_rsp;
432 	req.qpair->ctrlr = NULL;
433 
434 	/* No ctrlr and invalid command check */
435 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
436 	ret = nvmf_check_qpair_active(&req);
437 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
438 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
439 	CU_ASSERT(ret == false);
440 }
441 
442 static bool
443 nvme_status_success(const struct spdk_nvme_status *status)
444 {
445 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
446 }
447 
448 static void
449 test_connect(void)
450 {
451 	struct spdk_nvmf_fabric_connect_data connect_data;
452 	struct spdk_nvmf_poll_group group;
453 	struct spdk_nvmf_subsystem_poll_group *sgroups;
454 	struct spdk_nvmf_transport transport;
455 	struct spdk_nvmf_transport_ops tops = {};
456 	struct spdk_nvmf_subsystem subsystem;
457 	struct spdk_nvmf_ns *ns_arr[1] = { NULL };
458 	struct spdk_nvmf_request req;
459 	struct spdk_nvmf_qpair admin_qpair;
460 	struct spdk_nvmf_qpair qpair;
461 	struct spdk_nvmf_ctrlr ctrlr;
462 	struct spdk_nvmf_tgt tgt;
463 	union nvmf_h2c_msg cmd;
464 	union nvmf_c2h_msg rsp;
465 	const uint8_t hostid[16] = {
466 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
467 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
468 	};
469 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
470 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
471 	int rc;
472 
473 	memset(&group, 0, sizeof(group));
474 	group.thread = spdk_get_thread();
475 
476 	memset(&ctrlr, 0, sizeof(ctrlr));
477 	ctrlr.subsys = &subsystem;
478 	ctrlr.qpair_mask = spdk_bit_array_create(3);
479 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
480 	ctrlr.vcprop.cc.bits.en = 1;
481 	ctrlr.vcprop.cc.bits.iosqes = 6;
482 	ctrlr.vcprop.cc.bits.iocqes = 4;
483 
484 	memset(&admin_qpair, 0, sizeof(admin_qpair));
485 	admin_qpair.group = &group;
486 	admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
487 
488 	memset(&tgt, 0, sizeof(tgt));
489 	memset(&transport, 0, sizeof(transport));
490 	transport.ops = &tops;
491 	transport.opts.max_aq_depth = 32;
492 	transport.opts.max_queue_depth = 64;
493 	transport.opts.max_qpairs_per_ctrlr = 3;
494 	transport.tgt = &tgt;
495 
496 	memset(&qpair, 0, sizeof(qpair));
497 	qpair.transport = &transport;
498 	qpair.group = &group;
499 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
500 	TAILQ_INIT(&qpair.outstanding);
501 
502 	memset(&connect_data, 0, sizeof(connect_data));
503 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
504 	connect_data.cntlid = 0xFFFF;
505 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
506 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
507 
508 	memset(&subsystem, 0, sizeof(subsystem));
509 	subsystem.thread = spdk_get_thread();
510 	subsystem.id = 1;
511 	TAILQ_INIT(&subsystem.ctrlrs);
512 	subsystem.tgt = &tgt;
513 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
514 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
515 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
516 	subsystem.ns = ns_arr;
517 	subsystem.max_nsid = 1;
518 
519 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
520 	group.sgroups = sgroups;
521 
522 	memset(&cmd, 0, sizeof(cmd));
523 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
524 	cmd.connect_cmd.cid = 1;
525 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
526 	cmd.connect_cmd.recfmt = 0;
527 	cmd.connect_cmd.qid = 0;
528 	cmd.connect_cmd.sqsize = 31;
529 	cmd.connect_cmd.cattr = 0;
530 	cmd.connect_cmd.kato = 120000;
531 
532 	memset(&req, 0, sizeof(req));
533 	req.qpair = &qpair;
534 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
535 	req.length = sizeof(connect_data);
536 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
537 	req.cmd = &cmd;
538 	req.rsp = &rsp;
539 
540 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
541 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
542 
543 	/* Valid admin connect command */
544 	memset(&rsp, 0, sizeof(rsp));
545 	sgroups[subsystem.id].mgmt_io_outstanding++;
546 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
547 	group.current_unassociated_qpairs = 1;
548 	rc = nvmf_ctrlr_cmd_connect(&req);
549 	poll_threads();
550 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
551 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
552 	CU_ASSERT(qpair.ctrlr != NULL);
553 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
554 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
555 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
556 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
557 	free(qpair.ctrlr->visible_ns);
558 	free(qpair.ctrlr);
559 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
560 	qpair.ctrlr = NULL;
561 
562 	/* Valid admin connect command with kato = 0 */
563 	cmd.connect_cmd.kato = 0;
564 	memset(&rsp, 0, sizeof(rsp));
565 	sgroups[subsystem.id].mgmt_io_outstanding++;
566 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
567 	group.current_unassociated_qpairs = 1;
568 	rc = nvmf_ctrlr_cmd_connect(&req);
569 	poll_threads();
570 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
571 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
572 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
573 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
574 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
575 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
576 	free(qpair.ctrlr->visible_ns);
577 	free(qpair.ctrlr);
578 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
579 	qpair.ctrlr = NULL;
580 	cmd.connect_cmd.kato = 120000;
581 
582 	/* Invalid data length */
583 	memset(&rsp, 0, sizeof(rsp));
584 	req.length = sizeof(connect_data) - 1;
585 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
586 	group.current_unassociated_qpairs = 1;
587 	rc = nvmf_ctrlr_cmd_connect(&req);
588 	poll_threads();
589 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
590 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
591 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
592 	CU_ASSERT(qpair.ctrlr == NULL);
593 	req.length = sizeof(connect_data);
594 
595 	/* Invalid recfmt */
596 	memset(&rsp, 0, sizeof(rsp));
597 	cmd.connect_cmd.recfmt = 1234;
598 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
599 	group.current_unassociated_qpairs = 1;
600 	rc = nvmf_ctrlr_cmd_connect(&req);
601 	poll_threads();
602 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
603 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
604 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
605 	CU_ASSERT(qpair.ctrlr == NULL);
606 	cmd.connect_cmd.recfmt = 0;
607 
608 	/* Subsystem not found */
609 	memset(&rsp, 0, sizeof(rsp));
610 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
611 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
612 	group.current_unassociated_qpairs = 1;
613 	rc = nvmf_ctrlr_cmd_connect(&req);
614 	poll_threads();
615 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
616 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
617 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
618 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
619 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
620 	CU_ASSERT(qpair.ctrlr == NULL);
621 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
622 
623 	/* Unterminated hostnqn */
624 	memset(&rsp, 0, sizeof(rsp));
625 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
626 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
627 	group.current_unassociated_qpairs = 1;
628 	rc = nvmf_ctrlr_cmd_connect(&req);
629 	poll_threads();
630 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
631 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
632 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
633 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
634 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
635 	CU_ASSERT(qpair.ctrlr == NULL);
636 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
637 
638 	/* Host not allowed */
639 	memset(&rsp, 0, sizeof(rsp));
640 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
641 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
642 	group.current_unassociated_qpairs = 1;
643 	rc = nvmf_ctrlr_cmd_connect(&req);
644 	poll_threads();
645 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
646 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
647 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
648 	CU_ASSERT(qpair.ctrlr == NULL);
649 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
650 
651 	/* Invalid sqsize == 0 */
652 	memset(&rsp, 0, sizeof(rsp));
653 	cmd.connect_cmd.sqsize = 0;
654 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
655 	group.current_unassociated_qpairs = 1;
656 	rc = nvmf_ctrlr_cmd_connect(&req);
657 	poll_threads();
658 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
659 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
660 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
661 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
662 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
663 	CU_ASSERT(qpair.ctrlr == NULL);
664 	cmd.connect_cmd.sqsize = 31;
665 
666 	/* Invalid admin sqsize > max_aq_depth */
667 	memset(&rsp, 0, sizeof(rsp));
668 	cmd.connect_cmd.sqsize = 32;
669 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
670 	group.current_unassociated_qpairs = 1;
671 	rc = nvmf_ctrlr_cmd_connect(&req);
672 	poll_threads();
673 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
674 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
675 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
676 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
677 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
678 	CU_ASSERT(qpair.ctrlr == NULL);
679 	cmd.connect_cmd.sqsize = 31;
680 
681 	/* Invalid I/O sqsize > max_queue_depth */
682 	memset(&rsp, 0, sizeof(rsp));
683 	cmd.connect_cmd.qid = 1;
684 	cmd.connect_cmd.sqsize = 64;
685 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
686 	group.current_unassociated_qpairs = 1;
687 	rc = nvmf_ctrlr_cmd_connect(&req);
688 	poll_threads();
689 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
690 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
691 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
692 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
693 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
694 	CU_ASSERT(qpair.ctrlr == NULL);
695 	cmd.connect_cmd.qid = 0;
696 	cmd.connect_cmd.sqsize = 31;
697 
698 	/* Invalid cntlid for admin queue */
699 	memset(&rsp, 0, sizeof(rsp));
700 	connect_data.cntlid = 0x1234;
701 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
702 	group.current_unassociated_qpairs = 1;
703 	rc = nvmf_ctrlr_cmd_connect(&req);
704 	poll_threads();
705 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
706 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
707 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
708 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
709 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
710 	CU_ASSERT(qpair.ctrlr == NULL);
711 	connect_data.cntlid = 0xFFFF;
712 
713 	ctrlr.admin_qpair = &admin_qpair;
714 	ctrlr.subsys = &subsystem;
715 
716 	/* Valid I/O queue connect command */
717 	memset(&rsp, 0, sizeof(rsp));
718 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
719 	cmd.connect_cmd.qid = 1;
720 	cmd.connect_cmd.sqsize = 63;
721 	sgroups[subsystem.id].mgmt_io_outstanding++;
722 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
723 	group.current_unassociated_qpairs = 1;
724 	rc = nvmf_ctrlr_cmd_connect(&req);
725 	poll_threads();
726 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
727 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
728 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
729 	CU_ASSERT(qpair.ctrlr == &ctrlr);
730 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
731 	qpair.ctrlr = NULL;
732 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
733 	cmd.connect_cmd.sqsize = 31;
734 
735 	/* Non-existent controller */
736 	memset(&rsp, 0, sizeof(rsp));
737 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
738 	sgroups[subsystem.id].mgmt_io_outstanding++;
739 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
740 	group.current_unassociated_qpairs = 1;
741 	rc = nvmf_ctrlr_cmd_connect(&req);
742 	poll_threads();
743 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
744 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
745 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
746 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
747 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
748 	CU_ASSERT(qpair.ctrlr == NULL);
749 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
750 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
751 
752 	/* I/O connect to discovery controller */
753 	memset(&rsp, 0, sizeof(rsp));
754 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
755 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
756 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
757 	sgroups[subsystem.id].mgmt_io_outstanding++;
758 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
759 	group.current_unassociated_qpairs = 1;
760 	rc = nvmf_ctrlr_cmd_connect(&req);
761 	poll_threads();
762 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
763 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
764 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
765 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
766 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
767 	CU_ASSERT(qpair.ctrlr == NULL);
768 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
769 
770 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
771 	cmd.connect_cmd.qid = 0;
772 	cmd.connect_cmd.kato = 120000;
773 	memset(&rsp, 0, sizeof(rsp));
774 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
775 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
776 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
777 	sgroups[subsystem.id].mgmt_io_outstanding++;
778 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
779 	group.current_unassociated_qpairs = 1;
780 	rc = nvmf_ctrlr_cmd_connect(&req);
781 	poll_threads();
782 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
783 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
784 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
785 	CU_ASSERT(qpair.ctrlr != NULL);
786 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
787 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
788 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
789 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
790 	free(qpair.ctrlr->visible_ns);
791 	free(qpair.ctrlr);
792 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
793 	qpair.ctrlr = NULL;
794 
795 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
796 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
797 	 */
798 	cmd.connect_cmd.kato = 0;
799 	memset(&rsp, 0, sizeof(rsp));
800 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
801 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
802 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
803 	sgroups[subsystem.id].mgmt_io_outstanding++;
804 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
805 	group.current_unassociated_qpairs = 1;
806 	rc = nvmf_ctrlr_cmd_connect(&req);
807 	poll_threads();
808 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
809 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
810 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
811 	CU_ASSERT(qpair.ctrlr != NULL);
812 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
813 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
814 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
815 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
816 	free(qpair.ctrlr->visible_ns);
817 	free(qpair.ctrlr);
818 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
819 	qpair.ctrlr = NULL;
820 	cmd.connect_cmd.qid = 1;
821 	cmd.connect_cmd.kato = 120000;
822 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
823 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, false);
824 
825 	/* I/O connect to disabled controller */
826 	memset(&rsp, 0, sizeof(rsp));
827 	ctrlr.vcprop.cc.bits.en = 0;
828 	sgroups[subsystem.id].mgmt_io_outstanding++;
829 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
830 	group.current_unassociated_qpairs = 1;
831 	rc = nvmf_ctrlr_cmd_connect(&req);
832 	poll_threads();
833 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
834 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
835 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
836 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
837 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
838 	CU_ASSERT(qpair.ctrlr == NULL);
839 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
840 	ctrlr.vcprop.cc.bits.en = 1;
841 
842 	/* I/O connect with invalid IOSQES */
843 	memset(&rsp, 0, sizeof(rsp));
844 	ctrlr.vcprop.cc.bits.iosqes = 3;
845 	sgroups[subsystem.id].mgmt_io_outstanding++;
846 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
847 	group.current_unassociated_qpairs = 1;
848 	rc = nvmf_ctrlr_cmd_connect(&req);
849 	poll_threads();
850 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
851 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
852 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
853 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
854 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
855 	CU_ASSERT(qpair.ctrlr == NULL);
856 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
857 	ctrlr.vcprop.cc.bits.iosqes = 6;
858 
859 	/* I/O connect with invalid IOCQES */
860 	memset(&rsp, 0, sizeof(rsp));
861 	ctrlr.vcprop.cc.bits.iocqes = 3;
862 	sgroups[subsystem.id].mgmt_io_outstanding++;
863 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
864 	group.current_unassociated_qpairs = 1;
865 	rc = nvmf_ctrlr_cmd_connect(&req);
866 	poll_threads();
867 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
868 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
869 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
870 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
871 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
872 	CU_ASSERT(qpair.ctrlr == NULL);
873 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
874 	ctrlr.vcprop.cc.bits.iocqes = 4;
875 
876 	/* I/O connect with qid that is too large */
877 	memset(&rsp, 0, sizeof(rsp));
878 	cmd.connect_cmd.qid = 3;
879 	sgroups[subsystem.id].mgmt_io_outstanding++;
880 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
881 	group.current_unassociated_qpairs = 1;
882 	rc = nvmf_ctrlr_cmd_connect(&req);
883 	poll_threads();
884 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
885 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
886 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
887 	CU_ASSERT(qpair.ctrlr == NULL);
888 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
889 
890 	/* I/O connect with duplicate queue ID */
891 	memset(&rsp, 0, sizeof(rsp));
892 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
893 	cmd.connect_cmd.qid = 1;
894 	sgroups[subsystem.id].mgmt_io_outstanding++;
895 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
896 	group.current_unassociated_qpairs = 1;
897 	rc = nvmf_ctrlr_cmd_connect(&req);
898 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
899 	poll_threads();
900 	/* First time, it will detect duplicate QID and schedule a retry.  So for
901 	 * now we should expect the response to still be all zeroes.
902 	 */
903 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
904 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
905 
906 	/* Now advance the clock, so that the retry poller executes. */
907 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
908 	poll_threads();
909 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
910 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
911 	CU_ASSERT(qpair.ctrlr == NULL);
912 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
913 
914 	/* I/O connect with temporarily duplicate queue ID. This covers race
915 	 * where qpair_mask bit may not yet be cleared, even though initiator
916 	 * has closed the connection.  See issue #2955. */
917 	memset(&rsp, 0, sizeof(rsp));
918 	sgroups[subsystem.id].mgmt_io_outstanding++;
919 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
920 	group.current_unassociated_qpairs = 1;
921 	rc = nvmf_ctrlr_cmd_connect(&req);
922 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
923 	poll_threads();
924 	/* First time, it will detect duplicate QID and schedule a retry.  So for
925 	 * now we should expect the response to still be all zeroes.
926 	 */
927 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
928 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
929 
930 	/* Now advance the clock, so that the retry poller executes. */
931 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
932 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
933 	poll_threads();
934 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
935 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
936 	CU_ASSERT(qpair.ctrlr == &ctrlr);
937 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
938 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
939 	qpair.ctrlr = NULL;
940 
941 	/* I/O connect when admin qpair is being destroyed */
942 	admin_qpair.group = NULL;
943 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
944 	memset(&rsp, 0, sizeof(rsp));
945 	sgroups[subsystem.id].mgmt_io_outstanding++;
946 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
947 	group.current_unassociated_qpairs = 1;
948 	rc = nvmf_ctrlr_cmd_connect(&req);
949 	poll_threads();
950 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
951 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
952 	CU_ASSERT(qpair.ctrlr == NULL);
953 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
954 	admin_qpair.group = &group;
955 	admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
956 
957 	/* I/O connect when admin qpair was destroyed */
958 	ctrlr.admin_qpair = NULL;
959 	memset(&rsp, 0, sizeof(rsp));
960 	sgroups[subsystem.id].mgmt_io_outstanding++;
961 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
962 	group.current_unassociated_qpairs = 1;
963 	rc = nvmf_ctrlr_cmd_connect(&req);
964 	poll_threads();
965 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
966 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
967 	CU_ASSERT(qpair.ctrlr == NULL);
968 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
969 	ctrlr.admin_qpair = &admin_qpair;
970 
971 	/* Clean up globals */
972 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
973 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
974 
975 	spdk_bit_array_free(&ctrlr.qpair_mask);
976 	free(sgroups);
977 }
978 
979 static void
980 test_get_ns_id_desc_list(void)
981 {
982 	struct spdk_nvmf_subsystem subsystem;
983 	struct spdk_nvmf_qpair qpair;
984 	struct spdk_nvmf_ctrlr ctrlr;
985 	struct spdk_nvmf_request req;
986 	struct spdk_nvmf_ns *ns_ptrs[1];
987 	struct spdk_nvmf_ns ns;
988 	union nvmf_h2c_msg cmd;
989 	union nvmf_c2h_msg rsp;
990 	struct spdk_bdev bdev;
991 	uint8_t buf[4096];
992 
993 	memset(&subsystem, 0, sizeof(subsystem));
994 	ns_ptrs[0] = &ns;
995 	subsystem.ns = ns_ptrs;
996 	subsystem.max_nsid = 1;
997 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
998 
999 	memset(&ns, 0, sizeof(ns));
1000 	ns.opts.nsid = 1;
1001 	ns.bdev = &bdev;
1002 
1003 	memset(&qpair, 0, sizeof(qpair));
1004 	qpair.ctrlr = &ctrlr;
1005 
1006 	memset(&ctrlr, 0, sizeof(ctrlr));
1007 	ctrlr.subsys = &subsystem;
1008 	ctrlr.vcprop.cc.bits.en = 1;
1009 	ctrlr.thread = spdk_get_thread();
1010 	ctrlr.visible_ns = spdk_bit_array_create(1);
1011 
1012 	memset(&req, 0, sizeof(req));
1013 	req.qpair = &qpair;
1014 	req.cmd = &cmd;
1015 	req.rsp = &rsp;
1016 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1017 	req.length = sizeof(buf);
1018 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
1019 
1020 	memset(&cmd, 0, sizeof(cmd));
1021 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1022 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
1023 
1024 	/* Invalid NSID */
1025 	cmd.nvme_cmd.nsid = 0;
1026 	memset(&rsp, 0, sizeof(rsp));
1027 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1028 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1029 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1030 
1031 	/* Valid NSID, but ns is inactive */
1032 	spdk_bit_array_clear(ctrlr.visible_ns, 0);
1033 	cmd.nvme_cmd.nsid = 1;
1034 	memset(&rsp, 0, sizeof(rsp));
1035 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1036 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1037 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1038 
1039 	/* Valid NSID, but ns has no IDs defined */
1040 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1041 	cmd.nvme_cmd.nsid = 1;
1042 	memset(&rsp, 0, sizeof(rsp));
1043 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1044 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1045 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1046 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
1047 
1048 	/* Valid NSID, but command not using NSID */
1049 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_KEEP_ALIVE;
1050 	memset(&rsp, 0, sizeof(rsp));
1051 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1052 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1053 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1054 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1055 
1056 	/* Valid NSID, only EUI64 defined */
1057 	ns.opts.eui64[0] = 0x11;
1058 	ns.opts.eui64[7] = 0xFF;
1059 	memset(&rsp, 0, sizeof(rsp));
1060 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1061 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1062 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1063 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
1064 	CU_ASSERT(buf[1] == 8);
1065 	CU_ASSERT(buf[4] == 0x11);
1066 	CU_ASSERT(buf[11] == 0xFF);
1067 	CU_ASSERT(buf[13] == 0);
1068 
1069 	/* Valid NSID, only NGUID defined */
1070 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
1071 	ns.opts.nguid[0] = 0x22;
1072 	ns.opts.nguid[15] = 0xEE;
1073 	memset(&rsp, 0, sizeof(rsp));
1074 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1075 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1076 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1077 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
1078 	CU_ASSERT(buf[1] == 16);
1079 	CU_ASSERT(buf[4] == 0x22);
1080 	CU_ASSERT(buf[19] == 0xEE);
1081 	CU_ASSERT(buf[21] == 0);
1082 
1083 	/* Valid NSID, both EUI64 and NGUID defined */
1084 	ns.opts.eui64[0] = 0x11;
1085 	ns.opts.eui64[7] = 0xFF;
1086 	ns.opts.nguid[0] = 0x22;
1087 	ns.opts.nguid[15] = 0xEE;
1088 	memset(&rsp, 0, sizeof(rsp));
1089 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1090 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1091 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1092 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
1093 	CU_ASSERT(buf[1] == 8);
1094 	CU_ASSERT(buf[4] == 0x11);
1095 	CU_ASSERT(buf[11] == 0xFF);
1096 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
1097 	CU_ASSERT(buf[13] == 16);
1098 	CU_ASSERT(buf[16] == 0x22);
1099 	CU_ASSERT(buf[31] == 0xEE);
1100 	CU_ASSERT(buf[33] == 0);
1101 
1102 	/* Valid NSID, EUI64, NGUID, and UUID defined */
1103 	ns.opts.eui64[0] = 0x11;
1104 	ns.opts.eui64[7] = 0xFF;
1105 	ns.opts.nguid[0] = 0x22;
1106 	ns.opts.nguid[15] = 0xEE;
1107 	ns.opts.uuid.u.raw[0] = 0x33;
1108 	ns.opts.uuid.u.raw[15] = 0xDD;
1109 	memset(&rsp, 0, sizeof(rsp));
1110 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1111 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1112 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1113 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
1114 	CU_ASSERT(buf[1] == 8);
1115 	CU_ASSERT(buf[4] == 0x11);
1116 	CU_ASSERT(buf[11] == 0xFF);
1117 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
1118 	CU_ASSERT(buf[13] == 16);
1119 	CU_ASSERT(buf[16] == 0x22);
1120 	CU_ASSERT(buf[31] == 0xEE);
1121 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
1122 	CU_ASSERT(buf[33] == 16);
1123 	CU_ASSERT(buf[36] == 0x33);
1124 	CU_ASSERT(buf[51] == 0xDD);
1125 	CU_ASSERT(buf[53] == 0);
1126 
1127 	spdk_bit_array_free(&ctrlr.visible_ns);
1128 }
1129 
1130 static void
1131 test_identify_ns(void)
1132 {
1133 	struct spdk_nvmf_subsystem subsystem = {};
1134 	struct spdk_nvmf_transport transport = {};
1135 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1136 	struct spdk_nvmf_ctrlr ctrlr = {
1137 		.subsys = &subsystem,
1138 		.admin_qpair = &admin_qpair,
1139 	};
1140 	struct spdk_nvme_cmd cmd = {};
1141 	struct spdk_nvme_cpl rsp = {};
1142 	struct spdk_nvme_ns_data nsdata = {};
1143 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
1144 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
1145 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1146 
1147 	ctrlr.visible_ns = spdk_bit_array_create(3);
1148 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1149 	spdk_bit_array_set(ctrlr.visible_ns, 2);
1150 
1151 	subsystem.ns = ns_arr;
1152 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1153 
1154 	/* Invalid NSID 0 */
1155 	cmd.nsid = 0;
1156 	memset(&nsdata, 0, sizeof(nsdata));
1157 	memset(&rsp, 0, sizeof(rsp));
1158 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1159 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1160 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1161 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1162 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1163 
1164 	/* Valid NSID 1 */
1165 	cmd.nsid = 1;
1166 	memset(&nsdata, 0, sizeof(nsdata));
1167 	memset(&rsp, 0, sizeof(rsp));
1168 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1169 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1170 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1171 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1172 	CU_ASSERT(nsdata.nsze == 1234);
1173 
1174 	/* Valid but inactive NSID 1 */
1175 	spdk_bit_array_clear(ctrlr.visible_ns, 0);
1176 	cmd.nsid = 1;
1177 	memset(&nsdata, 0, sizeof(nsdata));
1178 	memset(&rsp, 0, sizeof(rsp));
1179 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1180 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1181 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1182 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1183 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1184 
1185 	/* Valid but unallocated NSID 2 */
1186 	cmd.nsid = 2;
1187 	memset(&nsdata, 0, sizeof(nsdata));
1188 	memset(&rsp, 0, sizeof(rsp));
1189 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1190 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1191 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1192 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1193 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1194 
1195 	/* Valid NSID 3 */
1196 	cmd.nsid = 3;
1197 	memset(&nsdata, 0, sizeof(nsdata));
1198 	memset(&rsp, 0, sizeof(rsp));
1199 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1200 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1201 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1202 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1203 	CU_ASSERT(nsdata.nsze == 5678);
1204 
1205 	/* Invalid NSID 4 */
1206 	cmd.nsid = 4;
1207 	memset(&nsdata, 0, sizeof(nsdata));
1208 	memset(&rsp, 0, sizeof(rsp));
1209 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1210 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1211 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1212 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1213 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1214 
1215 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1216 	cmd.nsid = 0xFFFFFFFF;
1217 	memset(&nsdata, 0, sizeof(nsdata));
1218 	memset(&rsp, 0, sizeof(rsp));
1219 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1220 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1221 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1222 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1223 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1224 
1225 	spdk_bit_array_free(&ctrlr.visible_ns);
1226 }
1227 
1228 static void
1229 test_identify_ns_iocs_specific(void)
1230 {
1231 	struct spdk_nvmf_subsystem subsystem = {};
1232 	struct spdk_nvmf_transport transport = {};
1233 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport };
1234 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1235 	struct spdk_nvme_cmd cmd = {};
1236 	struct spdk_nvme_cpl rsp = {};
1237 	struct spdk_nvme_zns_ns_data nsdata_zns = {};
1238 	struct spdk_nvme_nvm_ns_data nsdata_nvm = {};
1239 	struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}};
1240 	struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}};
1241 	struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]};
1242 
1243 	ctrlr.visible_ns = spdk_bit_array_create(3);
1244 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1245 	spdk_bit_array_set(ctrlr.visible_ns, 1);
1246 	spdk_bit_array_set(ctrlr.visible_ns, 2);
1247 	subsystem.ns = ns_arr;
1248 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1249 
1250 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1251 
1252 	/* Invalid ZNS NSID 0 */
1253 	cmd.nsid = 0;
1254 	memset(&nsdata_zns, 0xFF, sizeof(nsdata_zns));
1255 	memset(&rsp, 0, sizeof(rsp));
1256 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1257 			&nsdata_zns, sizeof(nsdata_zns)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1258 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1259 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1260 	CU_ASSERT(spdk_mem_all_zero(&nsdata_zns, sizeof(nsdata_zns)));
1261 
1262 	/* Valid ZNS NSID 1 */
1263 	cmd.nsid = 1;
1264 	memset(&nsdata_zns, 0xFF, sizeof(nsdata_zns));
1265 	memset(&rsp, 0, sizeof(rsp));
1266 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1267 			&nsdata_zns, sizeof(nsdata_zns)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1268 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1269 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1270 	CU_ASSERT(nsdata_zns.ozcs.read_across_zone_boundaries == 1);
1271 	CU_ASSERT(nsdata_zns.mar == MAX_ACTIVE_ZONES - 1);
1272 	CU_ASSERT(nsdata_zns.mor == MAX_OPEN_ZONES - 1);
1273 	CU_ASSERT(nsdata_zns.lbafe[0].zsze == ZONE_SIZE);
1274 	nsdata_zns.ozcs.read_across_zone_boundaries = 0;
1275 	nsdata_zns.mar = 0;
1276 	nsdata_zns.mor = 0;
1277 	nsdata_zns.lbafe[0].zsze = 0;
1278 	CU_ASSERT(spdk_mem_all_zero(&nsdata_zns, sizeof(nsdata_zns)));
1279 
1280 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1281 
1282 	/* Valid NVM NSID 2 with DIF type 1 */
1283 	bdev[1].dif_type = SPDK_DIF_TYPE1;
1284 	cmd.nsid = 2;
1285 	memset(&nsdata_nvm, 0xFF, sizeof(nsdata_nvm));
1286 	memset(&rsp, 0, sizeof(rsp));
1287 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1288 			&nsdata_nvm, sizeof(nsdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1289 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1290 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1291 	CU_ASSERT(nsdata_nvm.lbstm == 0);
1292 	CU_ASSERT(nsdata_nvm.pic._16bpists == 0);
1293 	CU_ASSERT(nsdata_nvm.pic._16bpistm == 1);
1294 	CU_ASSERT(nsdata_nvm.pic.stcrs == 0);
1295 	CU_ASSERT(nsdata_nvm.elbaf[0].sts == 16);
1296 	CU_ASSERT(nsdata_nvm.elbaf[0].pif == SPDK_DIF_PI_FORMAT_32);
1297 	nsdata_nvm.pic._16bpistm = 0;
1298 	nsdata_nvm.elbaf[0].sts = 0;
1299 	nsdata_nvm.elbaf[0].pif = 0;
1300 	CU_ASSERT(spdk_mem_all_zero(&nsdata_nvm, sizeof(nsdata_nvm)));
1301 
1302 	/* Invalid NVM NSID 3 */
1303 	cmd.nsid = 0;
1304 	memset(&nsdata_nvm, 0xFF, sizeof(nsdata_nvm));
1305 	memset(&rsp, 0, sizeof(rsp));
1306 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1307 			&nsdata_nvm, sizeof(nsdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1308 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1309 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1310 	CU_ASSERT(spdk_mem_all_zero(&nsdata_nvm, sizeof(nsdata_nvm)));
1311 
1312 	spdk_bit_array_free(&ctrlr.visible_ns);
1313 }
1314 
1315 static void
1316 test_set_get_features(void)
1317 {
1318 	struct spdk_nvmf_subsystem subsystem = {};
1319 	struct spdk_nvmf_qpair admin_qpair = {};
1320 	enum spdk_nvme_ana_state ana_state[3];
1321 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1322 	struct spdk_nvmf_ctrlr ctrlr = {
1323 		.subsys = &subsystem,
1324 		.admin_qpair = &admin_qpair,
1325 		.listener = &listener
1326 	};
1327 	union nvmf_h2c_msg cmd = {};
1328 	union nvmf_c2h_msg rsp = {};
1329 	struct spdk_nvmf_ns ns[3];
1330 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1331 	struct spdk_nvmf_request req;
1332 	int rc;
1333 
1334 	ctrlr.visible_ns = spdk_bit_array_create(3);
1335 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1336 	spdk_bit_array_set(ctrlr.visible_ns, 2);
1337 	ns[0].anagrpid = 1;
1338 	ns[2].anagrpid = 3;
1339 	subsystem.ns = ns_arr;
1340 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1341 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1342 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1343 	admin_qpair.ctrlr = &ctrlr;
1344 	req.qpair = &admin_qpair;
1345 	cmd.nvme_cmd.nsid = 1;
1346 	req.cmd = &cmd;
1347 	req.rsp = &rsp;
1348 
1349 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1350 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1351 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1352 	ns[0].ptpl_file = "testcfg";
1353 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1354 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1355 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1356 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1357 	CU_ASSERT(ns[0].ptpl_activated == true);
1358 
1359 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1360 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1361 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1362 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1363 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1364 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1365 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1366 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1367 
1368 
1369 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1370 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1371 	cmd.nvme_cmd.cdw11 = 0x42;
1372 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1373 
1374 	rc = nvmf_ctrlr_get_features(&req);
1375 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1376 
1377 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1378 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1379 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1380 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1381 
1382 	rc = nvmf_ctrlr_get_features(&req);
1383 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1384 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1385 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1386 
1387 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1388 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1389 	cmd.nvme_cmd.cdw11 = 0x42;
1390 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1391 
1392 	rc = nvmf_ctrlr_set_features(&req);
1393 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1394 
1395 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1396 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1397 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1398 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1399 
1400 	rc = nvmf_ctrlr_set_features(&req);
1401 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1402 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1403 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1404 
1405 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1406 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1407 	cmd.nvme_cmd.cdw11 = 0x42;
1408 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1409 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1410 
1411 	rc = nvmf_ctrlr_set_features(&req);
1412 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1413 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1414 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1415 
1416 
1417 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1418 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1419 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1420 
1421 	rc = nvmf_ctrlr_get_features(&req);
1422 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1423 
1424 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1425 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1426 	cmd.nvme_cmd.cdw11 = 0x42;
1427 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1428 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1429 
1430 	rc = nvmf_ctrlr_set_features(&req);
1431 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1432 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1433 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1434 
1435 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1436 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1437 	cmd.nvme_cmd.cdw11 = 0x42;
1438 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1439 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1440 
1441 	rc = nvmf_ctrlr_set_features(&req);
1442 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1443 
1444 	spdk_bit_array_free(&ctrlr.visible_ns);
1445 }
1446 
1447 /*
1448  * Reservation Unit Test Configuration
1449  *       --------             --------    --------
1450  *      | Host A |           | Host B |  | Host C |
1451  *       --------             --------    --------
1452  *      /        \               |           |
1453  *  --------   --------       -------     -------
1454  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1455  *  --------   --------       -------     -------
1456  *    \           \              /           /
1457  *     \           \            /           /
1458  *      \           \          /           /
1459  *      --------------------------------------
1460  *     |            NAMESPACE 1               |
1461  *      --------------------------------------
1462  */
1463 
1464 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1465 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1466 
1467 static void
1468 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1469 {
1470 	/* Host A has two controllers */
1471 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1472 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1473 
1474 	/* Host B has 1 controller */
1475 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1476 
1477 	/* Host C has 1 controller */
1478 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1479 
1480 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1481 	g_ns_info.rtype = rtype;
1482 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1483 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1484 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1485 }
1486 
1487 static void
1488 test_reservation_write_exclusive(void)
1489 {
1490 	struct spdk_nvmf_request req = {};
1491 	union nvmf_h2c_msg cmd = {};
1492 	union nvmf_c2h_msg rsp = {};
1493 	int rc;
1494 
1495 	req.cmd = &cmd;
1496 	req.rsp = &rsp;
1497 
1498 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1499 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1500 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1501 
1502 	/* Test Case: Issue a Read command from Host A and Host B */
1503 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1504 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1505 	SPDK_CU_ASSERT_FATAL(rc == 0);
1506 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1507 	SPDK_CU_ASSERT_FATAL(rc == 0);
1508 
1509 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1510 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1511 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1512 	SPDK_CU_ASSERT_FATAL(rc == 0);
1513 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1514 	SPDK_CU_ASSERT_FATAL(rc < 0);
1515 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1516 
1517 	/* Test Case: Issue a Write command from Host C */
1518 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1519 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1520 	SPDK_CU_ASSERT_FATAL(rc < 0);
1521 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1522 
1523 	/* Test Case: Issue a Read command from Host B */
1524 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1525 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1526 	SPDK_CU_ASSERT_FATAL(rc == 0);
1527 
1528 	/* Unregister Host C */
1529 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1530 
1531 	/* Test Case: Read and Write commands from non-registrant Host C */
1532 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1533 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1534 	SPDK_CU_ASSERT_FATAL(rc < 0);
1535 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1536 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1537 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1538 	SPDK_CU_ASSERT_FATAL(rc == 0);
1539 }
1540 
1541 static void
1542 test_reservation_exclusive_access(void)
1543 {
1544 	struct spdk_nvmf_request req = {};
1545 	union nvmf_h2c_msg cmd = {};
1546 	union nvmf_c2h_msg rsp = {};
1547 	int rc;
1548 
1549 	req.cmd = &cmd;
1550 	req.rsp = &rsp;
1551 
1552 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1553 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1554 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1555 
1556 	/* Test Case: Issue a Read command from Host B */
1557 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1558 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1559 	SPDK_CU_ASSERT_FATAL(rc < 0);
1560 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1561 
1562 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1563 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1564 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1565 	SPDK_CU_ASSERT_FATAL(rc == 0);
1566 }
1567 
1568 static void
1569 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1570 {
1571 	struct spdk_nvmf_request req = {};
1572 	union nvmf_h2c_msg cmd = {};
1573 	union nvmf_c2h_msg rsp = {};
1574 	int rc;
1575 
1576 	req.cmd = &cmd;
1577 	req.rsp = &rsp;
1578 
1579 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1580 	ut_reservation_init(rtype);
1581 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1582 
1583 	/* Test Case: Issue a Read command from Host A and Host C */
1584 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1585 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1586 	SPDK_CU_ASSERT_FATAL(rc == 0);
1587 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1588 	SPDK_CU_ASSERT_FATAL(rc == 0);
1589 
1590 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1591 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1592 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1593 	SPDK_CU_ASSERT_FATAL(rc == 0);
1594 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1595 	SPDK_CU_ASSERT_FATAL(rc == 0);
1596 
1597 	/* Unregister Host C */
1598 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1599 
1600 	/* Test Case: Read and Write commands from non-registrant Host C */
1601 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1602 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1603 	SPDK_CU_ASSERT_FATAL(rc == 0);
1604 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1605 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1606 	SPDK_CU_ASSERT_FATAL(rc < 0);
1607 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1608 }
1609 
1610 static void
1611 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1612 {
1613 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1614 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1615 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1616 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1617 }
1618 
1619 static void
1620 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1621 {
1622 	struct spdk_nvmf_request req = {};
1623 	union nvmf_h2c_msg cmd = {};
1624 	union nvmf_c2h_msg rsp = {};
1625 	int rc;
1626 
1627 	req.cmd = &cmd;
1628 	req.rsp = &rsp;
1629 
1630 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1631 	ut_reservation_init(rtype);
1632 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1633 
1634 	/* Test Case: Issue a Write command from Host B */
1635 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1636 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1637 	SPDK_CU_ASSERT_FATAL(rc == 0);
1638 
1639 	/* Unregister Host B */
1640 	spdk_uuid_set_null(&g_ns_info.reg_hostid[1]);
1641 
1642 	/* Test Case: Issue a Read command from Host B */
1643 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1644 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1645 	SPDK_CU_ASSERT_FATAL(rc < 0);
1646 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1647 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1648 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1649 	SPDK_CU_ASSERT_FATAL(rc < 0);
1650 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1651 }
1652 
1653 static void
1654 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1655 {
1656 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1657 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1658 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1659 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1660 }
1661 
1662 static void
1663 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1664 {
1665 	STAILQ_INIT(&ctrlr->async_events);
1666 }
1667 
1668 static void
1669 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1670 {
1671 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1672 
1673 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1674 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1675 		free(event);
1676 	}
1677 }
1678 
1679 static int
1680 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1681 {
1682 	int num = 0;
1683 	struct spdk_nvmf_async_event_completion *event;
1684 
1685 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1686 		num++;
1687 	}
1688 	return num;
1689 }
1690 
1691 static void
1692 test_reservation_notification_log_page(void)
1693 {
1694 	struct spdk_nvmf_ctrlr ctrlr;
1695 	struct spdk_nvmf_qpair qpair;
1696 	struct spdk_nvmf_ns ns;
1697 	struct spdk_nvmf_request req = {};
1698 	union nvmf_h2c_msg cmd = {};
1699 	union nvmf_c2h_msg rsp = {};
1700 	union spdk_nvme_async_event_completion event = {};
1701 	struct spdk_nvme_reservation_notification_log logs[3];
1702 	struct iovec iov;
1703 
1704 	memset(&ctrlr, 0, sizeof(ctrlr));
1705 	ctrlr.thread = spdk_get_thread();
1706 	TAILQ_INIT(&ctrlr.log_head);
1707 	init_pending_async_events(&ctrlr);
1708 	ns.nsid = 1;
1709 
1710 	/* Test Case: Mask all the reservation notifications */
1711 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1712 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1713 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1714 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1715 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1716 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1717 					  SPDK_NVME_RESERVATION_RELEASED);
1718 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1719 					  SPDK_NVME_RESERVATION_PREEMPTED);
1720 	poll_threads();
1721 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1722 
1723 	/* Test Case: Unmask all the reservation notifications,
1724 	 * 3 log pages are generated, and AER was triggered.
1725 	 */
1726 	ns.mask = 0;
1727 	ctrlr.num_avail_log_pages = 0;
1728 	req.cmd = &cmd;
1729 	req.rsp = &rsp;
1730 	ctrlr.aer_req[0] = &req;
1731 	ctrlr.nr_aer_reqs = 1;
1732 	req.qpair = &qpair;
1733 	TAILQ_INIT(&qpair.outstanding);
1734 	qpair.ctrlr = NULL;
1735 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
1736 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1737 
1738 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1739 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1740 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1741 					  SPDK_NVME_RESERVATION_RELEASED);
1742 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1743 					  SPDK_NVME_RESERVATION_PREEMPTED);
1744 	poll_threads();
1745 	event.raw = rsp.nvme_cpl.cdw0;
1746 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1747 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1748 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1749 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1750 
1751 	/* Test Case: Get Log Page to clear the log pages */
1752 	iov.iov_base = &logs[0];
1753 	iov.iov_len = sizeof(logs);
1754 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1755 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1756 
1757 	cleanup_pending_async_events(&ctrlr);
1758 }
1759 
1760 static void
1761 test_get_dif_ctx(void)
1762 {
1763 	struct spdk_nvmf_subsystem subsystem = {};
1764 	struct spdk_nvmf_request req = {};
1765 	struct spdk_nvmf_qpair qpair = {};
1766 	struct spdk_nvmf_ctrlr ctrlr = {};
1767 	struct spdk_nvmf_ns ns = {};
1768 	struct spdk_nvmf_ns *_ns = NULL;
1769 	struct spdk_bdev bdev = {};
1770 	union nvmf_h2c_msg cmd = {};
1771 	struct spdk_dif_ctx dif_ctx = {};
1772 	bool ret;
1773 
1774 	ctrlr.subsys = &subsystem;
1775 	ctrlr.visible_ns = spdk_bit_array_create(1);
1776 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1777 
1778 	qpair.ctrlr = &ctrlr;
1779 
1780 	req.qpair = &qpair;
1781 	req.cmd = &cmd;
1782 
1783 	ns.bdev = &bdev;
1784 
1785 	ctrlr.dif_insert_or_strip = false;
1786 
1787 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1788 	CU_ASSERT(ret == false);
1789 
1790 	ctrlr.dif_insert_or_strip = true;
1791 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1792 
1793 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1794 	CU_ASSERT(ret == false);
1795 
1796 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
1797 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1798 
1799 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1800 	CU_ASSERT(ret == false);
1801 
1802 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1803 
1804 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1805 	CU_ASSERT(ret == false);
1806 
1807 	qpair.qid = 1;
1808 
1809 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1810 	CU_ASSERT(ret == false);
1811 
1812 	cmd.nvme_cmd.nsid = 1;
1813 
1814 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1815 	CU_ASSERT(ret == false);
1816 
1817 	subsystem.max_nsid = 1;
1818 	subsystem.ns = &_ns;
1819 	subsystem.ns[0] = &ns;
1820 
1821 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1822 	CU_ASSERT(ret == false);
1823 
1824 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1825 
1826 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1827 	CU_ASSERT(ret == true);
1828 
1829 	spdk_bit_array_free(&ctrlr.visible_ns);
1830 }
1831 
1832 static void
1833 test_identify_ctrlr(void)
1834 {
1835 	struct spdk_nvmf_tgt tgt = {};
1836 	struct spdk_nvmf_subsystem subsystem = {
1837 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1838 		.tgt = &tgt,
1839 	};
1840 	struct spdk_nvmf_transport_ops tops = {};
1841 	struct spdk_nvmf_transport transport = {
1842 		.ops = &tops,
1843 		.opts = {
1844 			.in_capsule_data_size = 4096,
1845 		},
1846 	};
1847 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1848 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1849 	struct spdk_nvme_ctrlr_data cdata = {};
1850 	uint32_t expected_ioccsz;
1851 
1852 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1853 
1854 	/* Check ioccsz, TCP transport */
1855 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1856 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1857 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1858 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1859 
1860 	/* Check ioccsz, RDMA transport */
1861 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1862 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1863 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1864 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1865 
1866 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1867 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1868 	ctrlr.dif_insert_or_strip = true;
1869 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1870 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1871 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1872 }
1873 
1874 static void
1875 test_identify_ctrlr_iocs_specific(void)
1876 {
1877 	struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 };
1878 	struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 };
1879 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop };
1880 	struct spdk_nvme_cmd cmd = {};
1881 	struct spdk_nvme_cpl rsp = {};
1882 	struct spdk_nvme_zns_ctrlr_data ctrlr_data = {};
1883 	struct spdk_nvme_nvm_ctrlr_data cdata_nvm = {};
1884 
1885 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1886 
1887 	/* ZNS max_zone_append_size_kib no limit */
1888 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1889 	memset(&rsp, 0, sizeof(rsp));
1890 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1891 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1892 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1893 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1894 	CU_ASSERT(ctrlr_data.zasl == 0);
1895 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1896 
1897 	/* ZNS max_zone_append_size_kib = 4096 */
1898 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1899 	memset(&rsp, 0, sizeof(rsp));
1900 	subsystem.max_zone_append_size_kib = 4096;
1901 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1902 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1903 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1904 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1905 	CU_ASSERT(ctrlr_data.zasl == 0);
1906 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1907 
1908 	/* ZNS max_zone_append_size_kib = 60000 */
1909 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1910 	memset(&rsp, 0, sizeof(rsp));
1911 	subsystem.max_zone_append_size_kib = 60000;
1912 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1913 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1914 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1915 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1916 	CU_ASSERT(ctrlr_data.zasl == 3);
1917 	ctrlr_data.zasl = 0;
1918 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1919 
1920 	/* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */
1921 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1922 	memset(&rsp, 0, sizeof(rsp));
1923 	ctrlr.vcprop.cap.bits.mpsmin = 2;
1924 	subsystem.max_zone_append_size_kib = 60000;
1925 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1926 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1927 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1928 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1929 	CU_ASSERT(ctrlr_data.zasl == 1);
1930 	ctrlr_data.zasl = 0;
1931 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1932 	ctrlr.vcprop.cap.bits.mpsmin = 0;
1933 
1934 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1935 
1936 	/* NVM max_discard_size_kib = 1024;
1937 	 * max_write_zeroes_size_kib = 1024;
1938 	 * mpsmin = 0;
1939 	 */
1940 	memset(&cdata_nvm, 0xFF, sizeof(cdata_nvm));
1941 	memset(&rsp, 0, sizeof(rsp));
1942 	subsystem.max_discard_size_kib = (uint64_t)1024;
1943 	subsystem.max_write_zeroes_size_kib = (uint64_t)1024;
1944 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1945 			&cdata_nvm, sizeof(cdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1946 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1947 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1948 	CU_ASSERT(cdata_nvm.wzsl == 8);
1949 	CU_ASSERT(cdata_nvm.dmrsl == 2048);
1950 	CU_ASSERT(cdata_nvm.dmrl == 1);
1951 }
1952 
1953 static int
1954 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1955 {
1956 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1957 
1958 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1959 };
1960 
1961 static void
1962 test_custom_admin_cmd(void)
1963 {
1964 	struct spdk_nvmf_subsystem subsystem;
1965 	struct spdk_nvmf_qpair qpair;
1966 	struct spdk_nvmf_ctrlr ctrlr;
1967 	struct spdk_nvmf_request req;
1968 	struct spdk_nvmf_ns *ns_ptrs[1];
1969 	struct spdk_nvmf_ns ns;
1970 	union nvmf_h2c_msg cmd;
1971 	union nvmf_c2h_msg rsp;
1972 	struct spdk_bdev bdev;
1973 	uint8_t buf[4096];
1974 	int rc;
1975 
1976 	memset(&subsystem, 0, sizeof(subsystem));
1977 	ns_ptrs[0] = &ns;
1978 	subsystem.ns = ns_ptrs;
1979 	subsystem.max_nsid = 1;
1980 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1981 
1982 	memset(&ns, 0, sizeof(ns));
1983 	ns.opts.nsid = 1;
1984 	ns.bdev = &bdev;
1985 
1986 	memset(&qpair, 0, sizeof(qpair));
1987 	qpair.ctrlr = &ctrlr;
1988 
1989 	memset(&ctrlr, 0, sizeof(ctrlr));
1990 	ctrlr.subsys = &subsystem;
1991 	ctrlr.vcprop.cc.bits.en = 1;
1992 	ctrlr.thread = spdk_get_thread();
1993 
1994 	memset(&req, 0, sizeof(req));
1995 	req.qpair = &qpair;
1996 	req.cmd = &cmd;
1997 	req.rsp = &rsp;
1998 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1999 	req.length = sizeof(buf);
2000 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
2001 
2002 	memset(&cmd, 0, sizeof(cmd));
2003 	cmd.nvme_cmd.opc = 0xc1;
2004 	cmd.nvme_cmd.nsid = 0;
2005 	memset(&rsp, 0, sizeof(rsp));
2006 
2007 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
2008 
2009 	/* Ensure that our hdlr is being called */
2010 	rc = nvmf_ctrlr_process_admin_cmd(&req);
2011 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2012 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2013 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2014 }
2015 
2016 static void
2017 test_fused_compare_and_write(void)
2018 {
2019 	struct spdk_nvmf_request req = {};
2020 	struct spdk_nvmf_qpair qpair = {};
2021 	struct spdk_nvme_cmd cmd = {};
2022 	union nvmf_c2h_msg rsp = {};
2023 	struct spdk_nvmf_ctrlr ctrlr = {};
2024 	struct spdk_nvmf_subsystem subsystem = {};
2025 	struct spdk_nvmf_ns ns = {};
2026 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2027 	enum spdk_nvme_ana_state ana_state[1];
2028 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2029 	struct spdk_bdev bdev = {};
2030 
2031 	struct spdk_nvmf_poll_group group = {};
2032 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2033 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2034 	struct spdk_io_channel io_ch = {};
2035 
2036 	ns.bdev = &bdev;
2037 	ns.anagrpid = 1;
2038 
2039 	subsystem.id = 0;
2040 	subsystem.max_nsid = 1;
2041 	subsys_ns[0] = &ns;
2042 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2043 
2044 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2045 
2046 	/* Enable controller */
2047 	ctrlr.vcprop.cc.bits.en = 1;
2048 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2049 	ctrlr.listener = &listener;
2050 	ctrlr.visible_ns = spdk_bit_array_create(1);
2051 	spdk_bit_array_set(ctrlr.visible_ns, 0);
2052 
2053 	group.num_sgroups = 1;
2054 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2055 	sgroups.num_ns = 1;
2056 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2057 	ns_info.channel = &io_ch;
2058 	sgroups.ns_info = &ns_info;
2059 	TAILQ_INIT(&sgroups.queued);
2060 	group.sgroups = &sgroups;
2061 	TAILQ_INIT(&qpair.outstanding);
2062 
2063 	qpair.ctrlr = &ctrlr;
2064 	qpair.group = &group;
2065 	qpair.qid = 1;
2066 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2067 
2068 	cmd.nsid = 1;
2069 
2070 	req.qpair = &qpair;
2071 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2072 	req.rsp = &rsp;
2073 
2074 	/* SUCCESS/SUCCESS */
2075 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
2076 	cmd.opc = SPDK_NVME_OPC_COMPARE;
2077 
2078 	spdk_nvmf_request_exec(&req);
2079 	CU_ASSERT(qpair.first_fused_req != NULL);
2080 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2081 
2082 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2083 	cmd.opc = SPDK_NVME_OPC_WRITE;
2084 
2085 	spdk_nvmf_request_exec(&req);
2086 	CU_ASSERT(qpair.first_fused_req == NULL);
2087 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2088 
2089 	/* Wrong sequence */
2090 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2091 	cmd.opc = SPDK_NVME_OPC_WRITE;
2092 
2093 	spdk_nvmf_request_exec(&req);
2094 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
2095 	CU_ASSERT(qpair.first_fused_req == NULL);
2096 
2097 	/* Write as FUSE_FIRST (Wrong op code) */
2098 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
2099 	cmd.opc = SPDK_NVME_OPC_WRITE;
2100 
2101 	spdk_nvmf_request_exec(&req);
2102 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
2103 	CU_ASSERT(qpair.first_fused_req == NULL);
2104 
2105 	/* Compare as FUSE_SECOND (Wrong op code) */
2106 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
2107 	cmd.opc = SPDK_NVME_OPC_COMPARE;
2108 
2109 	spdk_nvmf_request_exec(&req);
2110 	CU_ASSERT(qpair.first_fused_req != NULL);
2111 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2112 
2113 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2114 	cmd.opc = SPDK_NVME_OPC_COMPARE;
2115 
2116 	spdk_nvmf_request_exec(&req);
2117 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
2118 	CU_ASSERT(qpair.first_fused_req == NULL);
2119 
2120 	spdk_bit_array_free(&ctrlr.visible_ns);
2121 }
2122 
2123 static void
2124 test_multi_async_event_reqs(void)
2125 {
2126 	struct spdk_nvmf_subsystem subsystem = {};
2127 	struct spdk_nvmf_qpair qpair = {};
2128 	struct spdk_nvmf_ctrlr ctrlr = {};
2129 	struct spdk_nvmf_request req[5] = {};
2130 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2131 	struct spdk_nvmf_ns ns = {};
2132 	union nvmf_h2c_msg cmd[5] = {};
2133 	union nvmf_c2h_msg rsp[5] = {};
2134 
2135 	struct spdk_nvmf_poll_group group = {};
2136 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2137 
2138 	int i;
2139 
2140 	ns_ptrs[0] = &ns;
2141 	subsystem.ns = ns_ptrs;
2142 	subsystem.max_nsid = 1;
2143 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2144 
2145 	ns.opts.nsid = 1;
2146 	group.sgroups = &sgroups;
2147 
2148 	qpair.ctrlr = &ctrlr;
2149 	qpair.group = &group;
2150 	TAILQ_INIT(&qpair.outstanding);
2151 
2152 	ctrlr.subsys = &subsystem;
2153 	ctrlr.vcprop.cc.bits.en = 1;
2154 	ctrlr.thread = spdk_get_thread();
2155 
2156 	for (i = 0; i < 5; i++) {
2157 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2158 		cmd[i].nvme_cmd.nsid = 0;
2159 		cmd[i].nvme_cmd.cid = i;
2160 
2161 		req[i].qpair = &qpair;
2162 		req[i].cmd = &cmd[i];
2163 		req[i].rsp = &rsp[i];
2164 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2165 	}
2166 
2167 	/* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */
2168 	sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS;
2169 	for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
2170 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2171 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
2172 	}
2173 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2174 
2175 	/* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */
2176 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2177 	CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS);
2178 	CU_ASSERT(rsp[4].nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2179 	CU_ASSERT(rsp[4].nvme_cpl.status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
2180 
2181 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
2182 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
2183 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2184 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2185 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
2186 
2187 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
2188 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2189 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2190 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
2191 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
2192 
2193 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
2194 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
2195 }
2196 
2197 static void
2198 test_get_ana_log_page_one_ns_per_anagrp(void)
2199 {
2200 #define UT_ANA_DESC_MAX_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
2201 #define UT_ANA_LOG_PAGE_MAX_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_MAX_SIZE)
2202 #define UT_ANA_DESC_SIZE(rgo) (sizeof(struct spdk_nvme_ana_group_descriptor) + (rgo ? 0 : sizeof(uint32_t)))
2203 #define UT_ANA_LOG_PAGE_SIZE(rgo) (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE(rgo))
2204 	uint32_t ana_group[3];
2205 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
2206 	struct spdk_nvmf_ctrlr ctrlr = {};
2207 	enum spdk_nvme_ana_state ana_state[3];
2208 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2209 	struct spdk_nvmf_ns ns[3];
2210 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
2211 	uint64_t offset;
2212 	uint32_t length;
2213 	int i;
2214 	char expected_page[UT_ANA_LOG_PAGE_MAX_SIZE] = {0};
2215 	char actual_page[UT_ANA_LOG_PAGE_MAX_SIZE] = {0};
2216 	struct iovec iov, iovs[2];
2217 	struct spdk_nvme_ana_page *ana_hdr;
2218 	char _ana_desc[UT_ANA_DESC_MAX_SIZE];
2219 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2220 	uint32_t rgo;
2221 
2222 	subsystem.ns = ns_arr;
2223 	subsystem.max_nsid = 3;
2224 	for (i = 0; i < 3; i++) {
2225 		subsystem.ana_group[i] = 1;
2226 	}
2227 	ctrlr.subsys = &subsystem;
2228 	ctrlr.listener = &listener;
2229 
2230 	for (i = 0; i < 3; i++) {
2231 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2232 	}
2233 
2234 	for (i = 0; i < 3; i++) {
2235 		ns_arr[i]->nsid = i + 1;
2236 		ns_arr[i]->anagrpid = i + 1;
2237 	}
2238 
2239 	for (rgo = 0; rgo <= 1; rgo++) {
2240 		memset(expected_page, 0, sizeof(expected_page));
2241 		memset(actual_page, 0, sizeof(actual_page));
2242 
2243 		/* create expected page */
2244 		ana_hdr = (void *)&expected_page[0];
2245 		ana_hdr->num_ana_group_desc = 3;
2246 		ana_hdr->change_count = 0;
2247 
2248 		/* descriptor may be unaligned. So create data and then copy it to the location. */
2249 		ana_desc = (void *)_ana_desc;
2250 		offset = sizeof(struct spdk_nvme_ana_page);
2251 
2252 		for (i = 0; i < 3; i++) {
2253 			memset(ana_desc, 0, UT_ANA_DESC_MAX_SIZE);
2254 			ana_desc->ana_group_id = ns_arr[i]->nsid;
2255 			ana_desc->num_of_nsid = rgo ? 0 : 1;
2256 			ana_desc->change_count = 0;
2257 			ana_desc->ana_state = ctrlr.listener->ana_state[i];
2258 			if (!rgo) {
2259 				ana_desc->nsid[0] = ns_arr[i]->nsid;
2260 			}
2261 			memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE(rgo));
2262 			offset += UT_ANA_DESC_SIZE(rgo);
2263 		}
2264 
2265 		/* read entire actual log page */
2266 		offset = 0;
2267 		while (offset < UT_ANA_LOG_PAGE_MAX_SIZE) {
2268 			length = spdk_min(16, UT_ANA_LOG_PAGE_MAX_SIZE - offset);
2269 			iov.iov_base = &actual_page[offset];
2270 			iov.iov_len = length;
2271 			nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0, rgo);
2272 			offset += length;
2273 		}
2274 
2275 		/* compare expected page and actual page */
2276 		CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_MAX_SIZE) == 0);
2277 
2278 		memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_MAX_SIZE);
2279 		offset = 0;
2280 		iovs[0].iov_base = &actual_page[offset];
2281 		iovs[0].iov_len = UT_ANA_LOG_PAGE_MAX_SIZE - UT_ANA_DESC_MAX_SIZE + 4;
2282 		offset += UT_ANA_LOG_PAGE_MAX_SIZE - UT_ANA_DESC_MAX_SIZE + 4;
2283 		iovs[1].iov_base = &actual_page[offset];
2284 		iovs[1].iov_len = UT_ANA_LOG_PAGE_MAX_SIZE - offset;
2285 		nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_MAX_SIZE, 0, rgo);
2286 
2287 		CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_MAX_SIZE) == 0);
2288 	}
2289 
2290 #undef UT_ANA_DESC_SIZE
2291 #undef UT_ANA_LOG_PAGE_SIZE
2292 #undef UT_ANA_DESC_MAX_SIZE
2293 #undef UT_ANA_LOG_PAGE_MAX_SIZE
2294 }
2295 
2296 static void
2297 test_get_ana_log_page_multi_ns_per_anagrp(void)
2298 {
2299 #define UT_ANA_LOG_PAGE_SIZE(rgo)	(sizeof(struct spdk_nvme_ana_page) +	\
2300 					 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2301 					 (rgo ? 0 : (sizeof(uint32_t) * 5)))
2302 #define UT_ANA_LOG_PAGE_MAX_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
2303 					 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2304 					 sizeof(uint32_t) * 5)
2305 	struct spdk_nvmf_ns ns[5];
2306 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
2307 	uint32_t ana_group[5] = {0};
2308 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
2309 	enum spdk_nvme_ana_state ana_state[5];
2310 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
2311 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
2312 	char expected_page[UT_ANA_LOG_PAGE_MAX_SIZE] = {0};
2313 	char actual_page[UT_ANA_LOG_PAGE_MAX_SIZE] = {0};
2314 	struct iovec iov, iovs[2];
2315 	struct spdk_nvme_ana_page *ana_hdr;
2316 	char _ana_desc[UT_ANA_LOG_PAGE_MAX_SIZE];
2317 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2318 	uint64_t offset;
2319 	uint32_t length;
2320 	int i;
2321 	uint32_t rgo;
2322 
2323 	subsystem.max_nsid = 5;
2324 	subsystem.ana_group[1] = 3;
2325 	subsystem.ana_group[2] = 2;
2326 	for (i = 0; i < 5; i++) {
2327 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2328 	}
2329 
2330 	for (i = 0; i < 5; i++) {
2331 		ns_arr[i]->nsid = i + 1;
2332 	}
2333 	ns_arr[0]->anagrpid = 2;
2334 	ns_arr[1]->anagrpid = 3;
2335 	ns_arr[2]->anagrpid = 2;
2336 	ns_arr[3]->anagrpid = 3;
2337 	ns_arr[4]->anagrpid = 2;
2338 
2339 	for (rgo = 0; rgo <= 1; rgo++) {
2340 		memset(expected_page, 0, sizeof(expected_page));
2341 		memset(actual_page, 0, sizeof(actual_page));
2342 
2343 		/* create expected page */
2344 		ana_hdr = (void *)&expected_page[0];
2345 		ana_hdr->num_ana_group_desc = 2;
2346 		ana_hdr->change_count = 0;
2347 
2348 		/* descriptor may be unaligned. So create data and then copy it to the location. */
2349 		ana_desc = (void *)_ana_desc;
2350 		offset = sizeof(struct spdk_nvme_ana_page);
2351 
2352 		memset(_ana_desc, 0, sizeof(_ana_desc));
2353 		ana_desc->ana_group_id = 2;
2354 		ana_desc->num_of_nsid = rgo ? 0 : 3;
2355 		ana_desc->change_count = 0;
2356 		ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2357 		if (!rgo) {
2358 			ana_desc->nsid[0] = 1;
2359 			ana_desc->nsid[1] = 3;
2360 			ana_desc->nsid[2] = 5;
2361 		}
2362 		memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2363 		       (rgo ? 0 : (sizeof(uint32_t) * 3)));
2364 		offset += sizeof(struct spdk_nvme_ana_group_descriptor) + (rgo ? 0 : (sizeof(uint32_t) * 3));
2365 
2366 		memset(_ana_desc, 0, sizeof(_ana_desc));
2367 		ana_desc->ana_group_id = 3;
2368 		ana_desc->num_of_nsid = rgo ? 0 : 2;
2369 		ana_desc->change_count = 0;
2370 		ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2371 		if (!rgo) {
2372 			ana_desc->nsid[0] = 2;
2373 			ana_desc->nsid[1] = 4;
2374 		}
2375 		memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2376 		       (rgo ? 0 : (sizeof(uint32_t) * 2)));
2377 
2378 		/* read entire actual log page, and compare expected page and actual page. */
2379 		offset = 0;
2380 		while (offset < UT_ANA_LOG_PAGE_MAX_SIZE) {
2381 			length = spdk_min(16, UT_ANA_LOG_PAGE_MAX_SIZE - offset);
2382 			iov.iov_base = &actual_page[offset];
2383 			iov.iov_len = length;
2384 			nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0, rgo);
2385 			offset += length;
2386 		}
2387 
2388 		CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_MAX_SIZE) == 0);
2389 
2390 		memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_MAX_SIZE);
2391 		offset = 0;
2392 		iovs[0].iov_base = &actual_page[offset];
2393 		iovs[0].iov_len = UT_ANA_LOG_PAGE_MAX_SIZE - sizeof(uint32_t) * 5;
2394 		offset += UT_ANA_LOG_PAGE_MAX_SIZE - sizeof(uint32_t) * 5;
2395 		iovs[1].iov_base = &actual_page[offset];
2396 		iovs[1].iov_len = sizeof(uint32_t) * 5;
2397 		nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_MAX_SIZE, 0, rgo);
2398 
2399 		CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_MAX_SIZE) == 0);
2400 	}
2401 
2402 #undef UT_ANA_LOG_PAGE_SIZE
2403 #undef UT_ANA_LOG_PAGE_MAX_SIZE
2404 }
2405 static void
2406 test_multi_async_events(void)
2407 {
2408 	struct spdk_nvmf_subsystem subsystem = {};
2409 	struct spdk_nvmf_qpair qpair = {};
2410 	struct spdk_nvmf_ctrlr ctrlr = {};
2411 	struct spdk_nvmf_request req[4] = {};
2412 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2413 	struct spdk_nvmf_ns ns = {};
2414 	union nvmf_h2c_msg cmd[4] = {};
2415 	union nvmf_c2h_msg rsp[4] = {};
2416 	union spdk_nvme_async_event_completion event = {};
2417 	struct spdk_nvmf_poll_group group = {};
2418 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2419 	int i;
2420 
2421 	ns_ptrs[0] = &ns;
2422 	subsystem.ns = ns_ptrs;
2423 	subsystem.max_nsid = 1;
2424 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2425 
2426 	ns.opts.nsid = 1;
2427 	group.sgroups = &sgroups;
2428 
2429 	qpair.ctrlr = &ctrlr;
2430 	qpair.group = &group;
2431 	TAILQ_INIT(&qpair.outstanding);
2432 
2433 	ctrlr.subsys = &subsystem;
2434 	ctrlr.vcprop.cc.bits.en = 1;
2435 	ctrlr.thread = spdk_get_thread();
2436 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2437 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2438 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2439 	init_pending_async_events(&ctrlr);
2440 
2441 	/* Target queue pending events when there is no outstanding AER request */
2442 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2443 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2444 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2445 
2446 	for (i = 0; i < 4; i++) {
2447 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2448 		cmd[i].nvme_cmd.nsid = 0;
2449 		cmd[i].nvme_cmd.cid = i;
2450 
2451 		req[i].qpair = &qpair;
2452 		req[i].cmd = &cmd[i];
2453 		req[i].rsp = &rsp[i];
2454 
2455 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2456 
2457 		sgroups.mgmt_io_outstanding = 1;
2458 		if (i < 3) {
2459 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2460 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2461 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2462 		} else {
2463 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2464 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2465 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2466 		}
2467 	}
2468 
2469 	event.raw = rsp[0].nvme_cpl.cdw0;
2470 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2471 	event.raw = rsp[1].nvme_cpl.cdw0;
2472 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2473 	event.raw = rsp[2].nvme_cpl.cdw0;
2474 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2475 
2476 	cleanup_pending_async_events(&ctrlr);
2477 }
2478 
2479 static void
2480 test_rae(void)
2481 {
2482 	struct spdk_nvmf_subsystem subsystem = {};
2483 	struct spdk_nvmf_qpair qpair = {};
2484 	struct spdk_nvmf_ctrlr ctrlr = {};
2485 	struct spdk_nvmf_request req[3] = {};
2486 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2487 	struct spdk_nvmf_ns ns = {};
2488 	union nvmf_h2c_msg cmd[3] = {};
2489 	union nvmf_c2h_msg rsp[3] = {};
2490 	union spdk_nvme_async_event_completion event = {};
2491 	struct spdk_nvmf_poll_group group = {};
2492 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2493 	int i;
2494 	char data[4096];
2495 
2496 	ns_ptrs[0] = &ns;
2497 	subsystem.ns = ns_ptrs;
2498 	subsystem.max_nsid = 1;
2499 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2500 
2501 	ns.opts.nsid = 1;
2502 	group.sgroups = &sgroups;
2503 
2504 	qpair.ctrlr = &ctrlr;
2505 	qpair.group = &group;
2506 	TAILQ_INIT(&qpair.outstanding);
2507 
2508 	ctrlr.subsys = &subsystem;
2509 	ctrlr.vcprop.cc.bits.en = 1;
2510 	ctrlr.thread = spdk_get_thread();
2511 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2512 	init_pending_async_events(&ctrlr);
2513 
2514 	/* Target queue pending events when there is no outstanding AER request */
2515 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2516 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2517 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2518 	/* only one event will be queued before RAE is clear */
2519 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2520 
2521 	req[0].qpair = &qpair;
2522 	req[0].cmd = &cmd[0];
2523 	req[0].rsp = &rsp[0];
2524 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2525 	cmd[0].nvme_cmd.nsid = 0;
2526 	cmd[0].nvme_cmd.cid = 0;
2527 
2528 	for (i = 1; i < 3; i++) {
2529 		req[i].qpair = &qpair;
2530 		req[i].cmd = &cmd[i];
2531 		req[i].rsp = &rsp[i];
2532 		req[i].length = sizeof(data);
2533 		SPDK_IOV_ONE(req[i].iov, &req[i].iovcnt, &data, req[i].length);
2534 
2535 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2536 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2537 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2538 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2539 			spdk_nvme_bytes_to_numd(req[i].length);
2540 		cmd[i].nvme_cmd.cid = i;
2541 	}
2542 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2543 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2544 
2545 	/* consume the pending event */
2546 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2547 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2548 	event.raw = rsp[0].nvme_cpl.cdw0;
2549 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2550 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2551 
2552 	/* get log with RAE set */
2553 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2554 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2555 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2556 
2557 	/* will not generate new event until RAE is clear */
2558 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2559 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2560 
2561 	/* get log with RAE clear */
2562 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2563 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2564 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2565 
2566 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2567 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2568 
2569 	cleanup_pending_async_events(&ctrlr);
2570 }
2571 
2572 static void
2573 test_nvmf_ctrlr_create_destruct(void)
2574 {
2575 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2576 	struct spdk_nvmf_poll_group group = {};
2577 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2578 	struct spdk_nvmf_transport transport = {};
2579 	struct spdk_nvmf_transport_ops tops = {};
2580 	struct spdk_nvmf_subsystem subsystem = {};
2581 	struct spdk_nvmf_ns *ns_arr[1] = { NULL };
2582 	struct spdk_nvmf_request req = {};
2583 	struct spdk_nvmf_qpair qpair = {};
2584 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2585 	struct spdk_nvmf_tgt tgt = {};
2586 	union nvmf_h2c_msg cmd = {};
2587 	union nvmf_c2h_msg rsp = {};
2588 	const uint8_t hostid[16] = {
2589 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2590 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2591 	};
2592 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2593 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2594 
2595 	group.thread = spdk_get_thread();
2596 	transport.ops = &tops;
2597 	transport.opts.max_aq_depth = 32;
2598 	transport.opts.max_queue_depth = 64;
2599 	transport.opts.max_qpairs_per_ctrlr = 3;
2600 	transport.opts.dif_insert_or_strip = true;
2601 	transport.tgt = &tgt;
2602 	qpair.transport = &transport;
2603 	qpair.group = &group;
2604 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
2605 	TAILQ_INIT(&qpair.outstanding);
2606 
2607 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2608 	connect_data.cntlid = 0xFFFF;
2609 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2610 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2611 
2612 	subsystem.thread = spdk_get_thread();
2613 	subsystem.id = 1;
2614 	TAILQ_INIT(&subsystem.ctrlrs);
2615 	subsystem.tgt = &tgt;
2616 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2617 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2618 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2619 	subsystem.ns = ns_arr;
2620 
2621 	group.sgroups = sgroups;
2622 
2623 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2624 	cmd.connect_cmd.cid = 1;
2625 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2626 	cmd.connect_cmd.recfmt = 0;
2627 	cmd.connect_cmd.qid = 0;
2628 	cmd.connect_cmd.sqsize = 31;
2629 	cmd.connect_cmd.cattr = 0;
2630 	cmd.connect_cmd.kato = 120000;
2631 
2632 	req.qpair = &qpair;
2633 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2634 	req.length = sizeof(connect_data);
2635 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
2636 	req.cmd = &cmd;
2637 	req.rsp = &rsp;
2638 
2639 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2640 	sgroups[subsystem.id].mgmt_io_outstanding++;
2641 
2642 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base);
2643 	poll_threads();
2644 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2645 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2646 	CU_ASSERT(ctrlr->subsys == &subsystem);
2647 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2648 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2649 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2650 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2651 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2652 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2653 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2654 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2655 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2656 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2657 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2658 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2659 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2660 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2661 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2662 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2663 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2664 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2665 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2666 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2667 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2668 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2669 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2670 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2671 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2672 
2673 	ctrlr->in_destruct = true;
2674 	nvmf_ctrlr_destruct(ctrlr);
2675 	poll_threads();
2676 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2677 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2678 }
2679 
2680 static void
2681 test_nvmf_ctrlr_use_zcopy(void)
2682 {
2683 	struct spdk_nvmf_subsystem subsystem = {};
2684 	struct spdk_nvmf_transport transport = {};
2685 	struct spdk_nvmf_request req = {};
2686 	struct spdk_nvmf_qpair qpair = {};
2687 	struct spdk_nvmf_ctrlr ctrlr = {};
2688 	union nvmf_h2c_msg cmd = {};
2689 	struct spdk_nvmf_ns ns = {};
2690 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2691 	struct spdk_bdev bdev = {};
2692 	struct spdk_nvmf_poll_group group = {};
2693 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2694 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2695 	struct spdk_io_channel io_ch = {};
2696 	int opc;
2697 
2698 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2699 	ns.bdev = &bdev;
2700 
2701 	subsystem.id = 0;
2702 	subsystem.max_nsid = 1;
2703 	subsys_ns[0] = &ns;
2704 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2705 
2706 	ctrlr.subsys = &subsystem;
2707 	ctrlr.visible_ns = spdk_bit_array_create(1);
2708 	spdk_bit_array_set(ctrlr.visible_ns, 0);
2709 
2710 	transport.opts.zcopy = true;
2711 
2712 	qpair.ctrlr = &ctrlr;
2713 	qpair.group = &group;
2714 	qpair.qid = 1;
2715 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2716 	qpair.transport = &transport;
2717 
2718 	group.thread = spdk_get_thread();
2719 	group.num_sgroups = 1;
2720 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2721 	sgroups.num_ns = 1;
2722 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2723 	ns_info.channel = &io_ch;
2724 	sgroups.ns_info = &ns_info;
2725 	TAILQ_INIT(&sgroups.queued);
2726 	group.sgroups = &sgroups;
2727 	TAILQ_INIT(&qpair.outstanding);
2728 
2729 	req.qpair = &qpair;
2730 	req.cmd = &cmd;
2731 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2732 
2733 	/* Admin queue */
2734 	qpair.qid = 0;
2735 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2736 	qpair.qid = 1;
2737 
2738 	/* Invalid Opcodes */
2739 	for (opc = 0; opc <= 255; opc++) {
2740 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2741 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2742 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2743 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2744 		}
2745 	}
2746 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2747 
2748 	/* Fused WRITE */
2749 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2750 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2751 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2752 
2753 	/* Non bdev */
2754 	cmd.nvme_cmd.nsid = 4;
2755 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2756 	cmd.nvme_cmd.nsid = 1;
2757 
2758 	/* ZCOPY Not supported */
2759 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2760 	ns.zcopy = true;
2761 
2762 	/* ZCOPY disabled on transport level */
2763 	transport.opts.zcopy = false;
2764 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2765 	transport.opts.zcopy = true;
2766 
2767 	/* Success */
2768 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2769 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2770 
2771 	spdk_bit_array_free(&ctrlr.visible_ns);
2772 }
2773 
2774 static void
2775 qpair_state_change_done(void *cb_arg, int status)
2776 {
2777 }
2778 
2779 static void
2780 test_spdk_nvmf_request_zcopy_start(void)
2781 {
2782 	struct spdk_nvmf_request req = {};
2783 	struct spdk_nvmf_qpair qpair = {};
2784 	struct spdk_nvmf_transport transport = {};
2785 	struct spdk_nvme_cmd cmd = {};
2786 	union nvmf_c2h_msg rsp = {};
2787 	struct spdk_nvmf_ctrlr ctrlr = {};
2788 	struct spdk_nvmf_subsystem subsystem = {};
2789 	struct spdk_nvmf_ns ns = {};
2790 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2791 	enum spdk_nvme_ana_state ana_state[1];
2792 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2793 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2794 
2795 	struct spdk_nvmf_poll_group group = {};
2796 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2797 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2798 	struct spdk_io_channel io_ch = {};
2799 
2800 	ns.bdev = &bdev;
2801 	ns.zcopy = true;
2802 	ns.anagrpid = 1;
2803 
2804 	subsystem.id = 0;
2805 	subsystem.max_nsid = 1;
2806 	subsys_ns[0] = &ns;
2807 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2808 
2809 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2810 
2811 	/* Enable controller */
2812 	ctrlr.vcprop.cc.bits.en = 1;
2813 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2814 	ctrlr.listener = &listener;
2815 	ctrlr.visible_ns = spdk_bit_array_create(1);
2816 	spdk_bit_array_set(ctrlr.visible_ns, 0);
2817 
2818 	transport.opts.zcopy = true;
2819 
2820 	group.thread = spdk_get_thread();
2821 	group.num_sgroups = 1;
2822 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2823 	sgroups.num_ns = 1;
2824 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2825 	ns_info.channel = &io_ch;
2826 	sgroups.ns_info = &ns_info;
2827 	TAILQ_INIT(&sgroups.queued);
2828 	group.sgroups = &sgroups;
2829 	TAILQ_INIT(&qpair.outstanding);
2830 
2831 	qpair.ctrlr = &ctrlr;
2832 	qpair.group = &group;
2833 	qpair.transport = &transport;
2834 	qpair.qid = 1;
2835 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2836 
2837 	cmd.nsid = 1;
2838 
2839 	req.qpair = &qpair;
2840 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2841 	req.rsp = &rsp;
2842 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2843 	cmd.opc = SPDK_NVME_OPC_READ;
2844 
2845 	/* Fail because no controller */
2846 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2847 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2848 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
2849 	qpair.ctrlr = NULL;
2850 	spdk_nvmf_request_zcopy_start(&req);
2851 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2852 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2853 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2854 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2855 	qpair.ctrlr = &ctrlr;
2856 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2857 
2858 	/* Fail because bad NSID */
2859 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2860 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2861 	cmd.nsid = 0;
2862 	spdk_nvmf_request_zcopy_start(&req);
2863 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2864 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2865 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2866 	cmd.nsid = 1;
2867 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2868 
2869 	/* Fail because bad Channel */
2870 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2871 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2872 	ns_info.channel = NULL;
2873 	spdk_nvmf_request_zcopy_start(&req);
2874 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2875 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2876 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2877 	ns_info.channel = &io_ch;
2878 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2879 
2880 	/* Queue the request because NSID is not active */
2881 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2882 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2883 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2884 	spdk_nvmf_request_zcopy_start(&req);
2885 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2886 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2887 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2888 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2889 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2890 
2891 	/* Fail because QPair is not active */
2892 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2893 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2894 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2895 	qpair.state_cb = qpair_state_change_done;
2896 	spdk_nvmf_request_zcopy_start(&req);
2897 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2898 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2899 	qpair.state_cb = NULL;
2900 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2901 
2902 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2903 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2904 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2905 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2906 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2907 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2908 	spdk_nvmf_request_zcopy_start(&req);
2909 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2910 	cmd.cdw10 = 0;
2911 	cmd.cdw12 = 0;
2912 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2913 
2914 	/* Success */
2915 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2916 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2917 	spdk_nvmf_request_zcopy_start(&req);
2918 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2919 
2920 	spdk_bit_array_free(&ctrlr.visible_ns);
2921 }
2922 
2923 static void
2924 test_zcopy_read(void)
2925 {
2926 	struct spdk_nvmf_request req = {};
2927 	struct spdk_nvmf_qpair qpair = {};
2928 	struct spdk_nvmf_transport transport = {};
2929 	struct spdk_nvme_cmd cmd = {};
2930 	union nvmf_c2h_msg rsp = {};
2931 	struct spdk_nvmf_ctrlr ctrlr = {};
2932 	struct spdk_nvmf_subsystem subsystem = {};
2933 	struct spdk_nvmf_ns ns = {};
2934 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2935 	enum spdk_nvme_ana_state ana_state[1];
2936 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2937 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2938 
2939 	struct spdk_nvmf_poll_group group = {};
2940 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2941 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2942 	struct spdk_io_channel io_ch = {};
2943 
2944 	ns.bdev = &bdev;
2945 	ns.zcopy = true;
2946 	ns.anagrpid = 1;
2947 
2948 	subsystem.id = 0;
2949 	subsystem.max_nsid = 1;
2950 	subsys_ns[0] = &ns;
2951 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2952 
2953 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2954 
2955 	/* Enable controller */
2956 	ctrlr.vcprop.cc.bits.en = 1;
2957 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2958 	ctrlr.listener = &listener;
2959 	ctrlr.visible_ns = spdk_bit_array_create(1);
2960 	spdk_bit_array_set(ctrlr.visible_ns, 0);
2961 
2962 	transport.opts.zcopy = true;
2963 
2964 	group.thread = spdk_get_thread();
2965 	group.num_sgroups = 1;
2966 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2967 	sgroups.num_ns = 1;
2968 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2969 	ns_info.channel = &io_ch;
2970 	sgroups.ns_info = &ns_info;
2971 	TAILQ_INIT(&sgroups.queued);
2972 	group.sgroups = &sgroups;
2973 	TAILQ_INIT(&qpair.outstanding);
2974 
2975 	qpair.ctrlr = &ctrlr;
2976 	qpair.group = &group;
2977 	qpair.transport = &transport;
2978 	qpair.qid = 1;
2979 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2980 
2981 	cmd.nsid = 1;
2982 
2983 	req.qpair = &qpair;
2984 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2985 	req.rsp = &rsp;
2986 	cmd.opc = SPDK_NVME_OPC_READ;
2987 
2988 	/* Prepare for zcopy */
2989 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2990 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2991 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2992 	CU_ASSERT(ns_info.io_outstanding == 0);
2993 
2994 	/* Perform the zcopy start */
2995 	spdk_nvmf_request_zcopy_start(&req);
2996 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2997 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2998 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2999 	CU_ASSERT(ns_info.io_outstanding == 1);
3000 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
3001 
3002 	/* Perform the zcopy end */
3003 	spdk_nvmf_request_zcopy_end(&req, false);
3004 	CU_ASSERT(req.zcopy_bdev_io == NULL);
3005 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
3006 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
3007 	CU_ASSERT(ns_info.io_outstanding == 0);
3008 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
3009 
3010 	spdk_bit_array_free(&ctrlr.visible_ns);
3011 }
3012 
3013 static void
3014 test_zcopy_write(void)
3015 {
3016 	struct spdk_nvmf_request req = {};
3017 	struct spdk_nvmf_qpair qpair = {};
3018 	struct spdk_nvmf_transport transport = {};
3019 	struct spdk_nvme_cmd cmd = {};
3020 	union nvmf_c2h_msg rsp = {};
3021 	struct spdk_nvmf_ctrlr ctrlr = {};
3022 	struct spdk_nvmf_subsystem subsystem = {};
3023 	struct spdk_nvmf_ns ns = {};
3024 	struct spdk_nvmf_ns *subsys_ns[1] = {};
3025 	enum spdk_nvme_ana_state ana_state[1];
3026 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
3027 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
3028 
3029 	struct spdk_nvmf_poll_group group = {};
3030 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
3031 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
3032 	struct spdk_io_channel io_ch = {};
3033 
3034 	ns.bdev = &bdev;
3035 	ns.zcopy = true;
3036 	ns.anagrpid = 1;
3037 
3038 	subsystem.id = 0;
3039 	subsystem.max_nsid = 1;
3040 	subsys_ns[0] = &ns;
3041 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
3042 
3043 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
3044 
3045 	/* Enable controller */
3046 	ctrlr.vcprop.cc.bits.en = 1;
3047 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
3048 	ctrlr.listener = &listener;
3049 	ctrlr.visible_ns = spdk_bit_array_create(1);
3050 	spdk_bit_array_set(ctrlr.visible_ns, 0);
3051 
3052 	transport.opts.zcopy = true;
3053 
3054 	group.thread = spdk_get_thread();
3055 	group.num_sgroups = 1;
3056 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
3057 	sgroups.num_ns = 1;
3058 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
3059 	ns_info.channel = &io_ch;
3060 	sgroups.ns_info = &ns_info;
3061 	TAILQ_INIT(&sgroups.queued);
3062 	group.sgroups = &sgroups;
3063 	TAILQ_INIT(&qpair.outstanding);
3064 
3065 	qpair.ctrlr = &ctrlr;
3066 	qpair.group = &group;
3067 	qpair.transport = &transport;
3068 	qpair.qid = 1;
3069 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
3070 
3071 	cmd.nsid = 1;
3072 
3073 	req.qpair = &qpair;
3074 	req.cmd = (union nvmf_h2c_msg *)&cmd;
3075 	req.rsp = &rsp;
3076 	cmd.opc = SPDK_NVME_OPC_WRITE;
3077 
3078 	/* Prepare for zcopy */
3079 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
3080 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
3081 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
3082 	CU_ASSERT(ns_info.io_outstanding == 0);
3083 
3084 	/* Perform the zcopy start */
3085 	spdk_nvmf_request_zcopy_start(&req);
3086 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
3087 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
3088 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
3089 	CU_ASSERT(ns_info.io_outstanding == 1);
3090 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
3091 
3092 	/* Perform the zcopy end */
3093 	spdk_nvmf_request_zcopy_end(&req, true);
3094 	CU_ASSERT(req.zcopy_bdev_io == NULL);
3095 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
3096 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
3097 	CU_ASSERT(ns_info.io_outstanding == 0);
3098 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
3099 
3100 	spdk_bit_array_free(&ctrlr.visible_ns);
3101 }
3102 
3103 static void
3104 test_nvmf_property_set(void)
3105 {
3106 	int rc;
3107 	struct spdk_nvmf_request req = {};
3108 	struct spdk_nvmf_qpair qpair = {};
3109 	struct spdk_nvmf_ctrlr ctrlr = {};
3110 	union nvmf_h2c_msg cmd = {};
3111 	union nvmf_c2h_msg rsp = {};
3112 
3113 	req.qpair = &qpair;
3114 	qpair.ctrlr = &ctrlr;
3115 	req.cmd = &cmd;
3116 	req.rsp = &rsp;
3117 
3118 	/* Invalid parameters */
3119 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
3120 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
3121 
3122 	rc = nvmf_property_set(&req);
3123 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3124 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
3125 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
3126 
3127 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
3128 
3129 	rc = nvmf_property_get(&req);
3130 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3131 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
3132 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
3133 
3134 	/* Set cc with same property size */
3135 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
3136 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
3137 
3138 	rc = nvmf_property_set(&req);
3139 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3140 
3141 	/* Emulate cc data */
3142 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
3143 
3144 	rc = nvmf_property_get(&req);
3145 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3146 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
3147 
3148 	/* Set asq with different property size */
3149 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
3150 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
3151 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
3152 
3153 	rc = nvmf_property_set(&req);
3154 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3155 
3156 	/* Emulate asq data */
3157 	ctrlr.vcprop.asq = 0xAADDADBEEF;
3158 
3159 	rc = nvmf_property_get(&req);
3160 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3161 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
3162 }
3163 
3164 static void
3165 test_nvmf_ctrlr_get_features_host_behavior_support(void)
3166 {
3167 	int rc;
3168 	struct spdk_nvmf_request req = {};
3169 	struct spdk_nvmf_qpair qpair = {};
3170 	struct spdk_nvmf_ctrlr ctrlr = {};
3171 	struct spdk_nvme_host_behavior behavior = {};
3172 	union nvmf_h2c_msg cmd = {};
3173 	union nvmf_c2h_msg rsp = {};
3174 
3175 	qpair.ctrlr = &ctrlr;
3176 	req.qpair = &qpair;
3177 	req.cmd = &cmd;
3178 	req.rsp = &rsp;
3179 
3180 	/* Invalid data */
3181 	req.length = sizeof(struct spdk_nvme_host_behavior);
3182 	req.iovcnt = 0;
3183 
3184 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
3185 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3186 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3187 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3188 
3189 	/* Wrong structure length */
3190 	req.length = sizeof(struct spdk_nvme_host_behavior) - 1;
3191 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
3192 
3193 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
3194 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3195 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3196 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3197 
3198 	/* Get Features Host Behavior Support Success */
3199 	req.length = sizeof(struct spdk_nvme_host_behavior);
3200 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
3201 
3202 	ctrlr.acre_enabled = true;
3203 	ctrlr.lbafee_enabled = true;
3204 	behavior.acre = false;
3205 	behavior.lbafee = false;
3206 
3207 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
3208 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3209 	CU_ASSERT(behavior.acre == true);
3210 	CU_ASSERT(behavior.lbafee == true);
3211 }
3212 
3213 static void
3214 test_nvmf_ctrlr_set_features_host_behavior_support(void)
3215 {
3216 	int rc;
3217 	struct spdk_nvmf_request req = {};
3218 	struct spdk_nvmf_qpair qpair = {};
3219 	struct spdk_nvmf_ctrlr ctrlr = {};
3220 	struct spdk_nvme_host_behavior host_behavior = {};
3221 	union nvmf_h2c_msg cmd = {};
3222 	union nvmf_c2h_msg rsp = {};
3223 
3224 	qpair.ctrlr = &ctrlr;
3225 	req.qpair = &qpair;
3226 	req.cmd = &cmd;
3227 	req.rsp = &rsp;
3228 	req.iov[0].iov_base = &host_behavior;
3229 	req.iov[0].iov_len = sizeof(host_behavior);
3230 
3231 	/* Invalid iovcnt */
3232 	req.iovcnt = 0;
3233 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3234 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3235 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3236 
3237 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3238 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3239 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3240 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3241 
3242 	/* Invalid iov_len */
3243 	req.iovcnt = 1;
3244 	req.iov[0].iov_len = 0;
3245 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3246 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3247 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3248 
3249 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3250 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3251 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3252 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3253 
3254 	/* acre is false but lbafee is true */
3255 	host_behavior.acre = 0;
3256 	host_behavior.lbafee = 1;
3257 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3258 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3259 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3260 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3261 
3262 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3263 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3264 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3265 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3266 	CU_ASSERT(ctrlr.acre_enabled == false);
3267 	CU_ASSERT(ctrlr.lbafee_enabled == true);
3268 
3269 	/* acre is true but lbafee is false */
3270 	host_behavior.acre = 1;
3271 	host_behavior.lbafee = 0;
3272 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3273 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3274 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3275 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3276 
3277 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3278 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3279 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3280 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3281 	CU_ASSERT(ctrlr.acre_enabled == true);
3282 	CU_ASSERT(ctrlr.lbafee_enabled == false);
3283 
3284 	/* Invalid acre */
3285 	host_behavior.acre = 2;
3286 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3287 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3288 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3289 
3290 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3291 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3292 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3293 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3294 
3295 	/* Invalid lbafee */
3296 	host_behavior.lbafee = 3;
3297 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3298 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3299 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3300 
3301 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3302 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3303 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3304 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3305 }
3306 
3307 static void
3308 test_nvmf_ctrlr_ns_attachment(void)
3309 {
3310 	struct spdk_nvmf_subsystem subsystem = {};
3311 	struct spdk_nvmf_ns ns1 = {
3312 		.nsid = 1,
3313 		.always_visible = false
3314 	};
3315 	struct spdk_nvmf_ns ns3 = {
3316 		.nsid = 3,
3317 		.always_visible = false
3318 	};
3319 	struct spdk_nvmf_ctrlr ctrlrA = {
3320 		.subsys = &subsystem
3321 	};
3322 	struct spdk_nvmf_ctrlr ctrlrB = {
3323 		.subsys = &subsystem
3324 	};
3325 	struct spdk_nvmf_host *host;
3326 	uint32_t nsid;
3327 
3328 	subsystem.max_nsid = 3;
3329 	subsystem.ns = calloc(subsystem.max_nsid, sizeof(subsystem.ns));
3330 	SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
3331 
3332 	/* nsid = 2 -> unallocated, nsid = 1,3 -> allocated */
3333 	subsystem.ns[0] = &ns1;
3334 	subsystem.ns[2] = &ns3;
3335 
3336 	snprintf(ctrlrA.hostnqn, sizeof(ctrlrA.hostnqn), "nqn.2016-06.io.spdk:host1");
3337 	ctrlrA.visible_ns = spdk_bit_array_create(subsystem.max_nsid);
3338 	SPDK_CU_ASSERT_FATAL(ctrlrA.visible_ns != NULL);
3339 	snprintf(ctrlrB.hostnqn, sizeof(ctrlrB.hostnqn), "nqn.2016-06.io.spdk:host2");
3340 	ctrlrB.visible_ns = spdk_bit_array_create(subsystem.max_nsid);
3341 	SPDK_CU_ASSERT_FATAL(ctrlrB.visible_ns != NULL);
3342 
3343 	/* Do not auto attach and no cold attach of any ctrlr */
3344 	nsid = 1;
3345 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3346 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3347 	nvmf_ctrlr_init_visible_ns(&ctrlrA);
3348 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3349 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3350 	nsid = 3;
3351 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3352 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3353 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3354 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3355 
3356 	/* Cold attach ctrlrA to namespace 1 */
3357 	nsid = 1;
3358 	host = calloc(1, sizeof(*host));
3359 	SPDK_CU_ASSERT_FATAL(host != NULL);
3360 	snprintf(host->nqn, sizeof(host->nqn), "%s", ctrlrA.hostnqn);
3361 	TAILQ_INSERT_HEAD(&ns1.hosts, host, link);
3362 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host);
3363 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3364 	nvmf_ctrlr_init_visible_ns(&ctrlrA);
3365 	CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3366 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3367 	nsid = 3;
3368 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3369 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3370 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host);
3371 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3372 
3373 	/* Detach ctrlrA from namespace 1 */
3374 	nsid = 1;
3375 	spdk_bit_array_clear(ctrlrA.visible_ns, nsid - 1);
3376 	TAILQ_REMOVE(&ns1.hosts, host, link);
3377 	free(host);
3378 
3379 	/* Auto attach any ctrlr to namespace 2 */
3380 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3381 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3382 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3383 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3384 	nsid = 3;
3385 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3386 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3387 	ns1.always_visible = true;
3388 	nvmf_ctrlr_init_visible_ns(&ctrlrA);
3389 	nsid = 1;
3390 	CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3391 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3392 	nsid = 3;
3393 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3394 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3395 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3396 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3397 	nvmf_ctrlr_init_visible_ns(&ctrlrB);
3398 	nsid = 1;
3399 	CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3400 	CU_ASSERT(spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3401 	nsid = 3;
3402 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3403 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3404 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3405 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3406 
3407 	free(ctrlrA.visible_ns);
3408 	free(ctrlrB.visible_ns);
3409 	free(subsystem.ns);
3410 }
3411 
3412 static void
3413 test_nvmf_check_qpair_active(void)
3414 {
3415 	union nvmf_c2h_msg rsp = {};
3416 	union nvmf_h2c_msg cmd = {};
3417 	struct spdk_nvmf_qpair qpair = { .outstanding = TAILQ_HEAD_INITIALIZER(qpair.outstanding) };
3418 	struct spdk_nvmf_request req = { .qpair = &qpair, .cmd = &cmd, .rsp = &rsp };
3419 	size_t i;
3420 
3421 	/* qpair is active */
3422 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
3423 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
3424 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true);
3425 
3426 	/* qpair is connecting - CONNECT is allowed */
3427 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
3428 	cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
3429 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
3430 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true);
3431 
3432 	/* qpair is connecting - other commands are disallowed */
3433 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
3434 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
3435 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false);
3436 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
3437 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
3438 
3439 	/* qpair is authenticating - AUTHENTICATION_SEND is allowed */
3440 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
3441 	cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND;
3442 	qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING;
3443 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true);
3444 
3445 	/* qpair is authenticating - AUTHENTICATION_RECV is allowed */
3446 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
3447 	cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV;
3448 	qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING;
3449 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true);
3450 
3451 	/* qpair is authenticating - other commands are disallowed */
3452 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
3453 	qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING;
3454 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false);
3455 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_COMMAND_SPECIFIC);
3456 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVMF_FABRIC_SC_AUTH_REQUIRED);
3457 
3458 	/* qpair is in one of the other states - all commands are disallowed */
3459 	int disallowed_states[] = {
3460 		SPDK_NVMF_QPAIR_UNINITIALIZED,
3461 		SPDK_NVMF_QPAIR_DEACTIVATING,
3462 		SPDK_NVMF_QPAIR_ERROR,
3463 	};
3464 	qpair.state_cb = qpair_state_change_done;
3465 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
3466 	for (i = 0; i < SPDK_COUNTOF(disallowed_states); ++i) {
3467 		qpair.state = disallowed_states[i];
3468 		CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false);
3469 		CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
3470 		CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
3471 	}
3472 }
3473 
3474 int
3475 main(int argc, char **argv)
3476 {
3477 	CU_pSuite	suite = NULL;
3478 	unsigned int	num_failures;
3479 
3480 	CU_initialize_registry();
3481 
3482 	suite = CU_add_suite("nvmf", NULL, NULL);
3483 	CU_ADD_TEST(suite, test_get_log_page);
3484 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
3485 	CU_ADD_TEST(suite, test_connect);
3486 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
3487 	CU_ADD_TEST(suite, test_identify_ns);
3488 	CU_ADD_TEST(suite, test_identify_ns_iocs_specific);
3489 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
3490 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
3491 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
3492 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
3493 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
3494 	CU_ADD_TEST(suite, test_get_dif_ctx);
3495 	CU_ADD_TEST(suite, test_set_get_features);
3496 	CU_ADD_TEST(suite, test_identify_ctrlr);
3497 	CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific);
3498 	CU_ADD_TEST(suite, test_custom_admin_cmd);
3499 	CU_ADD_TEST(suite, test_fused_compare_and_write);
3500 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
3501 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
3502 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
3503 	CU_ADD_TEST(suite, test_multi_async_events);
3504 	CU_ADD_TEST(suite, test_rae);
3505 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
3506 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
3507 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
3508 	CU_ADD_TEST(suite, test_zcopy_read);
3509 	CU_ADD_TEST(suite, test_zcopy_write);
3510 	CU_ADD_TEST(suite, test_nvmf_property_set);
3511 	CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support);
3512 	CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support);
3513 	CU_ADD_TEST(suite, test_nvmf_ctrlr_ns_attachment);
3514 	CU_ADD_TEST(suite, test_nvmf_check_qpair_active);
3515 
3516 	allocate_threads(1);
3517 	set_thread(0);
3518 
3519 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3520 	CU_cleanup_registry();
3521 
3522 	free_threads();
3523 
3524 	return num_failures;
3525 }
3526