xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision b37db06935181fd0e8f5592a96d860040abaa201)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/bdev_zone.h"
8 #include "spdk/nvme_spec.h"
9 #include "spdk/stdinc.h"
10 
11 #include "spdk_internal/cunit.h"
12 #include "spdk_internal/mock.h"
13 #include "thread/thread_internal.h"
14 
15 #include "common/lib/ut_multithread.c"
16 #include "nvmf/ctrlr.c"
17 
18 SPDK_LOG_REGISTER_COMPONENT(nvmf)
19 
20 struct spdk_bdev {
21 	int ut_mock;
22 	uint64_t blockcnt;
23 	uint32_t blocklen;
24 	bool zoned;
25 	uint32_t zone_size;
26 	uint32_t max_open_zones;
27 	uint32_t max_active_zones;
28 	enum spdk_dif_type dif_type;
29 };
30 
31 #define MAX_OPEN_ZONES 12
32 #define MAX_ACTIVE_ZONES 34
33 #define ZONE_SIZE 56
34 
35 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
36 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
37 
38 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
39 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
40 		0x8877665544332211UL;
41 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
42 
43 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
44 	    struct spdk_nvmf_subsystem *,
45 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
46 	    NULL);
47 
48 DEFINE_STUB(spdk_nvmf_poll_group_create,
49 	    struct spdk_nvmf_poll_group *,
50 	    (struct spdk_nvmf_tgt *tgt),
51 	    NULL);
52 
53 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
54 	    const char *,
55 	    (const struct spdk_nvmf_subsystem *subsystem),
56 	    subsystem_default_sn);
57 
58 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
59 	    const char *,
60 	    (const struct spdk_nvmf_subsystem *subsystem),
61 	    subsystem_default_mn);
62 
63 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
64 	    bool,
65 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
66 	    true);
67 
68 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
69 	    int,
70 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
71 	    0);
72 
73 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
74 	    struct spdk_nvmf_ctrlr *,
75 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
76 	    NULL);
77 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool,
78 	    (struct spdk_nvmf_subsystem *subsystem), false);
79 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
80 	    bool,
81 	    (struct spdk_nvmf_ctrlr *ctrlr),
82 	    false);
83 
84 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
85 	    bool,
86 	    (struct spdk_nvmf_ctrlr *ctrlr),
87 	    false);
88 
89 DEFINE_STUB(nvmf_ctrlr_copy_supported,
90 	    bool,
91 	    (struct spdk_nvmf_ctrlr *ctrlr),
92 	    false);
93 
94 DEFINE_STUB_V(nvmf_get_discovery_log_page,
95 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
96 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
97 
98 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
99 	    int,
100 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
101 	    0);
102 
103 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
104 	    bool,
105 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
106 	    true);
107 
108 DEFINE_STUB(nvmf_subsystem_find_listener,
109 	    struct spdk_nvmf_subsystem_listener *,
110 	    (struct spdk_nvmf_subsystem *subsystem,
111 	     const struct spdk_nvme_transport_id *trid),
112 	    (void *)0x1);
113 
114 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
115 	    int,
116 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
117 	     struct spdk_nvmf_request *req),
118 	    0);
119 
120 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
121 	    int,
122 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
123 	     struct spdk_nvmf_request *req),
124 	    0);
125 
126 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
127 	    int,
128 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
129 	     struct spdk_nvmf_request *req),
130 	    0);
131 
132 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
133 	    int,
134 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
135 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
136 	    0);
137 
138 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
139 	    int,
140 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
141 	     struct spdk_nvmf_request *req),
142 	    0);
143 
144 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
145 	    int,
146 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
147 	     struct spdk_nvmf_request *req),
148 	    0);
149 
150 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
151 	    int,
152 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
153 	     struct spdk_nvmf_request *req),
154 	    0);
155 
156 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd,
157 	    int,
158 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
159 	     struct spdk_nvmf_request *req),
160 	    0);
161 
162 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
163 	    int,
164 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
165 	     struct spdk_nvmf_request *req),
166 	    0);
167 
168 DEFINE_STUB(nvmf_transport_req_complete,
169 	    int,
170 	    (struct spdk_nvmf_request *req),
171 	    0);
172 
173 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
174 
175 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
176 	    (struct spdk_bdev_desc *desc, struct spdk_nvme_cmd *cmd,
177 	     struct spdk_dif_ctx *dif_ctx),
178 	    true);
179 
180 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
181 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
182 
183 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
184 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
185 
186 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
187 		struct spdk_nvmf_ctrlr *ctrlr));
188 
189 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
190 	    int,
191 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
192 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
193 	    0);
194 
195 DEFINE_STUB(nvmf_transport_req_free,
196 	    int,
197 	    (struct spdk_nvmf_request *req),
198 	    0);
199 
200 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
201 	    int,
202 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
203 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
204 	    0);
205 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
206 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
207 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
208 
209 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev),
210 	    MAX_ACTIVE_ZONES);
211 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES);
212 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE);
213 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
214 
215 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t,
216 	    (const struct spdk_nvme_ns_data *nsdata), 0);
217 
218 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false);
219 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n),
220 	    false);
221 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0);
222 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r),
223 	    SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
224 
225 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
226 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
227 
228 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
229 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
230 
231 void
232 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, enum spdk_nvmf_qpair_state state)
233 {
234 	qpair->state = state;
235 }
236 
237 int
238 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair)
239 {
240 	return 0;
241 }
242 
243 void
244 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
245 			    bool dif_insert_or_strip)
246 {
247 	uint64_t num_blocks;
248 
249 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
250 	num_blocks = ns->bdev->blockcnt;
251 	nsdata->nsze = num_blocks;
252 	nsdata->ncap = num_blocks;
253 	nsdata->nuse = num_blocks;
254 	nsdata->nlbaf = 0;
255 	nsdata->flbas.format = 0;
256 	nsdata->flbas.msb_format = 0;
257 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
258 }
259 
260 void
261 nvmf_bdev_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ns *ns,
262 				  struct spdk_nvme_nvm_ns_data *nsdata_nvm)
263 {
264 	if (ns->bdev->dif_type == SPDK_DIF_DISABLE) {
265 		return;
266 	}
267 
268 	nsdata_nvm->lbstm = 0;
269 	nsdata_nvm->pic._16bpists = 0;
270 	nsdata_nvm->pic._16bpistm = 1;
271 	nsdata_nvm->pic.stcrs = 0;
272 	nsdata_nvm->elbaf[0].sts = 16;
273 	nsdata_nvm->elbaf[0].pif = SPDK_DIF_PI_FORMAT_32;
274 }
275 
276 struct spdk_nvmf_ns *
277 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
278 {
279 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
280 	return subsystem->ns[0];
281 }
282 
283 struct spdk_nvmf_ns *
284 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
285 				struct spdk_nvmf_ns *prev_ns)
286 {
287 	uint32_t nsid;
288 
289 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
290 	nsid = prev_ns->nsid;
291 
292 	if (nsid >= subsystem->max_nsid) {
293 		return NULL;
294 	}
295 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
296 		if (subsystem->ns[nsid - 1]) {
297 			return subsystem->ns[nsid - 1];
298 		}
299 	}
300 	return NULL;
301 }
302 
303 bool
304 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
305 {
306 	return true;
307 }
308 
309 int
310 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
311 			    struct spdk_bdev_desc *desc,
312 			    struct spdk_io_channel *ch,
313 			    struct spdk_nvmf_request *req)
314 {
315 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
316 	uint64_t start_lba;
317 	uint64_t num_blocks;
318 
319 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
320 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
321 
322 	if ((start_lba + num_blocks) > bdev->blockcnt) {
323 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
324 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
325 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
326 	}
327 
328 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
329 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
330 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
331 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
332 	} else {
333 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
334 	}
335 
336 
337 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
338 }
339 
340 void
341 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
342 {
343 	req->zcopy_bdev_io = NULL;
344 	spdk_nvmf_request_complete(req);
345 }
346 
347 bool
348 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns)
349 {
350 	return ns->ptpl_file != NULL;
351 }
352 
353 static void
354 test_get_log_page(void)
355 {
356 	struct spdk_nvmf_subsystem subsystem = {};
357 	struct spdk_nvmf_request req = {};
358 	struct spdk_nvmf_qpair qpair = {};
359 	struct spdk_nvmf_ctrlr ctrlr = {};
360 	union nvmf_h2c_msg cmd = {};
361 	union nvmf_c2h_msg rsp = {};
362 	char data[4096];
363 
364 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
365 
366 	ctrlr.subsys = &subsystem;
367 
368 	qpair.ctrlr = &ctrlr;
369 
370 	req.qpair = &qpair;
371 	req.cmd = &cmd;
372 	req.rsp = &rsp;
373 	req.length = sizeof(data);
374 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &data, req.length);
375 
376 	/* Get Log Page - all valid */
377 	memset(&cmd, 0, sizeof(cmd));
378 	memset(&rsp, 0, sizeof(rsp));
379 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
380 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
381 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
382 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
383 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
384 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
385 
386 	/* Get Log Page with invalid log ID */
387 	memset(&cmd, 0, sizeof(cmd));
388 	memset(&rsp, 0, sizeof(rsp));
389 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
390 	cmd.nvme_cmd.cdw10 = 0;
391 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
392 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
393 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
394 
395 	/* Get Log Page with invalid offset (not dword aligned) */
396 	memset(&cmd, 0, sizeof(cmd));
397 	memset(&rsp, 0, sizeof(rsp));
398 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
399 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
400 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
401 	cmd.nvme_cmd.cdw12 = 2;
402 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
403 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
404 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
405 
406 	/* Get Log Page without data buffer */
407 	memset(&cmd, 0, sizeof(cmd));
408 	memset(&rsp, 0, sizeof(rsp));
409 	req.iovcnt = 0;
410 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
411 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
412 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
413 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
414 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
415 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
416 }
417 
418 static void
419 test_process_fabrics_cmd(void)
420 {
421 	struct	spdk_nvmf_request req = {};
422 	bool	ret;
423 	struct	spdk_nvmf_qpair req_qpair = {};
424 	union	nvmf_h2c_msg  req_cmd = {};
425 	union	nvmf_c2h_msg   req_rsp = {};
426 
427 	TAILQ_INIT(&req_qpair.outstanding);
428 	req_qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
429 	req.qpair = &req_qpair;
430 	req.cmd  = &req_cmd;
431 	req.rsp  = &req_rsp;
432 	req.qpair->ctrlr = NULL;
433 
434 	/* No ctrlr and invalid command check */
435 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
436 	ret = nvmf_check_qpair_active(&req);
437 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
438 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
439 	CU_ASSERT(ret == false);
440 }
441 
442 static bool
443 nvme_status_success(const struct spdk_nvme_status *status)
444 {
445 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
446 }
447 
448 static void
449 test_connect(void)
450 {
451 	struct spdk_nvmf_fabric_connect_data connect_data;
452 	struct spdk_nvmf_poll_group group;
453 	struct spdk_nvmf_subsystem_poll_group *sgroups;
454 	struct spdk_nvmf_transport transport;
455 	struct spdk_nvmf_transport_ops tops = {};
456 	struct spdk_nvmf_subsystem subsystem;
457 	struct spdk_nvmf_ns *ns_arr[1] = { NULL };
458 	struct spdk_nvmf_request req;
459 	struct spdk_nvmf_qpair admin_qpair;
460 	struct spdk_nvmf_qpair qpair;
461 	struct spdk_nvmf_ctrlr ctrlr;
462 	struct spdk_nvmf_tgt tgt;
463 	union nvmf_h2c_msg cmd;
464 	union nvmf_c2h_msg rsp;
465 	const uint8_t hostid[16] = {
466 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
467 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
468 	};
469 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
470 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
471 	int rc;
472 
473 	memset(&group, 0, sizeof(group));
474 	group.thread = spdk_get_thread();
475 
476 	memset(&ctrlr, 0, sizeof(ctrlr));
477 	ctrlr.subsys = &subsystem;
478 	ctrlr.qpair_mask = spdk_bit_array_create(3);
479 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
480 	ctrlr.vcprop.cc.bits.en = 1;
481 	ctrlr.vcprop.cc.bits.iosqes = 6;
482 	ctrlr.vcprop.cc.bits.iocqes = 4;
483 
484 	memset(&admin_qpair, 0, sizeof(admin_qpair));
485 	admin_qpair.group = &group;
486 	admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
487 
488 	memset(&tgt, 0, sizeof(tgt));
489 	memset(&transport, 0, sizeof(transport));
490 	transport.ops = &tops;
491 	transport.opts.max_aq_depth = 32;
492 	transport.opts.max_queue_depth = 64;
493 	transport.opts.max_qpairs_per_ctrlr = 3;
494 	transport.tgt = &tgt;
495 
496 	memset(&qpair, 0, sizeof(qpair));
497 	qpair.transport = &transport;
498 	qpair.group = &group;
499 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
500 	TAILQ_INIT(&qpair.outstanding);
501 
502 	memset(&connect_data, 0, sizeof(connect_data));
503 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
504 	connect_data.cntlid = 0xFFFF;
505 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
506 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
507 
508 	memset(&subsystem, 0, sizeof(subsystem));
509 	subsystem.thread = spdk_get_thread();
510 	subsystem.id = 1;
511 	TAILQ_INIT(&subsystem.ctrlrs);
512 	subsystem.tgt = &tgt;
513 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
514 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
515 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
516 	subsystem.ns = ns_arr;
517 	subsystem.max_nsid = 1;
518 
519 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
520 	group.sgroups = sgroups;
521 
522 	memset(&cmd, 0, sizeof(cmd));
523 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
524 	cmd.connect_cmd.cid = 1;
525 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
526 	cmd.connect_cmd.recfmt = 0;
527 	cmd.connect_cmd.qid = 0;
528 	cmd.connect_cmd.sqsize = 31;
529 	cmd.connect_cmd.cattr = 0;
530 	cmd.connect_cmd.kato = 120000;
531 
532 	memset(&req, 0, sizeof(req));
533 	req.qpair = &qpair;
534 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
535 	req.length = sizeof(connect_data);
536 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
537 	req.cmd = &cmd;
538 	req.rsp = &rsp;
539 
540 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
541 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
542 
543 	/* Valid admin connect command */
544 	memset(&rsp, 0, sizeof(rsp));
545 	sgroups[subsystem.id].mgmt_io_outstanding++;
546 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
547 	rc = nvmf_ctrlr_cmd_connect(&req);
548 	poll_threads();
549 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
550 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
551 	CU_ASSERT(qpair.ctrlr != NULL);
552 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
553 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
554 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
555 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
556 	free(qpair.ctrlr->visible_ns);
557 	free(qpair.ctrlr);
558 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
559 	qpair.ctrlr = NULL;
560 
561 	/* Valid admin connect command with kato = 0 */
562 	cmd.connect_cmd.kato = 0;
563 	memset(&rsp, 0, sizeof(rsp));
564 	sgroups[subsystem.id].mgmt_io_outstanding++;
565 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
566 	rc = nvmf_ctrlr_cmd_connect(&req);
567 	poll_threads();
568 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
569 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
570 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
571 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
572 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
573 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
574 	free(qpair.ctrlr->visible_ns);
575 	free(qpair.ctrlr);
576 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
577 	qpair.ctrlr = NULL;
578 	cmd.connect_cmd.kato = 120000;
579 
580 	/* Invalid data length */
581 	memset(&rsp, 0, sizeof(rsp));
582 	req.length = sizeof(connect_data) - 1;
583 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
584 	rc = nvmf_ctrlr_cmd_connect(&req);
585 	poll_threads();
586 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
587 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
588 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
589 	CU_ASSERT(qpair.ctrlr == NULL);
590 	req.length = sizeof(connect_data);
591 
592 	/* Invalid recfmt */
593 	memset(&rsp, 0, sizeof(rsp));
594 	cmd.connect_cmd.recfmt = 1234;
595 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
596 	rc = nvmf_ctrlr_cmd_connect(&req);
597 	poll_threads();
598 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
599 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
600 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
601 	CU_ASSERT(qpair.ctrlr == NULL);
602 	cmd.connect_cmd.recfmt = 0;
603 
604 	/* Subsystem not found */
605 	memset(&rsp, 0, sizeof(rsp));
606 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
607 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
608 	rc = nvmf_ctrlr_cmd_connect(&req);
609 	poll_threads();
610 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
611 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
612 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
613 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
614 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
615 	CU_ASSERT(qpair.ctrlr == NULL);
616 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
617 
618 	/* Unterminated hostnqn */
619 	memset(&rsp, 0, sizeof(rsp));
620 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
621 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
622 	rc = nvmf_ctrlr_cmd_connect(&req);
623 	poll_threads();
624 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
625 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
626 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
627 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
628 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
629 	CU_ASSERT(qpair.ctrlr == NULL);
630 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
631 
632 	/* Host not allowed */
633 	memset(&rsp, 0, sizeof(rsp));
634 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
635 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
636 	rc = nvmf_ctrlr_cmd_connect(&req);
637 	poll_threads();
638 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
639 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
640 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
641 	CU_ASSERT(qpair.ctrlr == NULL);
642 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
643 
644 	/* Invalid sqsize == 0 */
645 	memset(&rsp, 0, sizeof(rsp));
646 	cmd.connect_cmd.sqsize = 0;
647 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
648 	rc = nvmf_ctrlr_cmd_connect(&req);
649 	poll_threads();
650 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
651 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
652 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
653 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
654 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
655 	CU_ASSERT(qpair.ctrlr == NULL);
656 	cmd.connect_cmd.sqsize = 31;
657 
658 	/* Invalid admin sqsize > max_aq_depth */
659 	memset(&rsp, 0, sizeof(rsp));
660 	cmd.connect_cmd.sqsize = 32;
661 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
662 	rc = nvmf_ctrlr_cmd_connect(&req);
663 	poll_threads();
664 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
665 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
666 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
667 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
668 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
669 	CU_ASSERT(qpair.ctrlr == NULL);
670 	cmd.connect_cmd.sqsize = 31;
671 
672 	/* Invalid I/O sqsize > max_queue_depth */
673 	memset(&rsp, 0, sizeof(rsp));
674 	cmd.connect_cmd.qid = 1;
675 	cmd.connect_cmd.sqsize = 64;
676 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
677 	rc = nvmf_ctrlr_cmd_connect(&req);
678 	poll_threads();
679 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
680 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
681 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
682 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
683 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
684 	CU_ASSERT(qpair.ctrlr == NULL);
685 	cmd.connect_cmd.qid = 0;
686 	cmd.connect_cmd.sqsize = 31;
687 
688 	/* Invalid cntlid for admin queue */
689 	memset(&rsp, 0, sizeof(rsp));
690 	connect_data.cntlid = 0x1234;
691 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
692 	rc = nvmf_ctrlr_cmd_connect(&req);
693 	poll_threads();
694 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
695 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
696 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
697 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
698 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
699 	CU_ASSERT(qpair.ctrlr == NULL);
700 	connect_data.cntlid = 0xFFFF;
701 
702 	ctrlr.admin_qpair = &admin_qpair;
703 	ctrlr.subsys = &subsystem;
704 
705 	/* Valid I/O queue connect command */
706 	memset(&rsp, 0, sizeof(rsp));
707 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
708 	cmd.connect_cmd.qid = 1;
709 	cmd.connect_cmd.sqsize = 63;
710 	sgroups[subsystem.id].mgmt_io_outstanding++;
711 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
712 	rc = nvmf_ctrlr_cmd_connect(&req);
713 	poll_threads();
714 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
715 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
716 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
717 	CU_ASSERT(qpair.ctrlr == &ctrlr);
718 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
719 	qpair.ctrlr = NULL;
720 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
721 	cmd.connect_cmd.sqsize = 31;
722 
723 	/* Non-existent controller */
724 	memset(&rsp, 0, sizeof(rsp));
725 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
726 	sgroups[subsystem.id].mgmt_io_outstanding++;
727 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
728 	rc = nvmf_ctrlr_cmd_connect(&req);
729 	poll_threads();
730 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
731 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
732 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
733 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
734 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
735 	CU_ASSERT(qpair.ctrlr == NULL);
736 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
737 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
738 
739 	/* I/O connect to discovery controller */
740 	memset(&rsp, 0, sizeof(rsp));
741 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
742 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
743 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
744 	sgroups[subsystem.id].mgmt_io_outstanding++;
745 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
746 	rc = nvmf_ctrlr_cmd_connect(&req);
747 	poll_threads();
748 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
749 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
750 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
751 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
752 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
753 	CU_ASSERT(qpair.ctrlr == NULL);
754 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
755 
756 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
757 	cmd.connect_cmd.qid = 0;
758 	cmd.connect_cmd.kato = 120000;
759 	memset(&rsp, 0, sizeof(rsp));
760 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
761 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
762 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
763 	sgroups[subsystem.id].mgmt_io_outstanding++;
764 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
765 	rc = nvmf_ctrlr_cmd_connect(&req);
766 	poll_threads();
767 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
768 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
769 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
770 	CU_ASSERT(qpair.ctrlr != NULL);
771 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
772 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
773 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
774 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
775 	free(qpair.ctrlr->visible_ns);
776 	free(qpair.ctrlr);
777 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
778 	qpair.ctrlr = NULL;
779 
780 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
781 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
782 	 */
783 	cmd.connect_cmd.kato = 0;
784 	memset(&rsp, 0, sizeof(rsp));
785 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT;
786 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, true);
787 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
788 	sgroups[subsystem.id].mgmt_io_outstanding++;
789 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
790 	rc = nvmf_ctrlr_cmd_connect(&req);
791 	poll_threads();
792 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
793 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
794 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
795 	CU_ASSERT(qpair.ctrlr != NULL);
796 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
797 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
798 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
799 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
800 	free(qpair.ctrlr->visible_ns);
801 	free(qpair.ctrlr);
802 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
803 	qpair.ctrlr = NULL;
804 	cmd.connect_cmd.qid = 1;
805 	cmd.connect_cmd.kato = 120000;
806 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
807 	MOCK_SET(spdk_nvmf_subsystem_is_discovery, false);
808 
809 	/* I/O connect to disabled controller */
810 	memset(&rsp, 0, sizeof(rsp));
811 	ctrlr.vcprop.cc.bits.en = 0;
812 	sgroups[subsystem.id].mgmt_io_outstanding++;
813 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
814 	rc = nvmf_ctrlr_cmd_connect(&req);
815 	poll_threads();
816 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
817 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
818 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
819 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
820 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
821 	CU_ASSERT(qpair.ctrlr == NULL);
822 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
823 	ctrlr.vcprop.cc.bits.en = 1;
824 
825 	/* I/O connect with invalid IOSQES */
826 	memset(&rsp, 0, sizeof(rsp));
827 	ctrlr.vcprop.cc.bits.iosqes = 3;
828 	sgroups[subsystem.id].mgmt_io_outstanding++;
829 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
830 	rc = nvmf_ctrlr_cmd_connect(&req);
831 	poll_threads();
832 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
833 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
834 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
835 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
836 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
837 	CU_ASSERT(qpair.ctrlr == NULL);
838 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
839 	ctrlr.vcprop.cc.bits.iosqes = 6;
840 
841 	/* I/O connect with invalid IOCQES */
842 	memset(&rsp, 0, sizeof(rsp));
843 	ctrlr.vcprop.cc.bits.iocqes = 3;
844 	sgroups[subsystem.id].mgmt_io_outstanding++;
845 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
846 	rc = nvmf_ctrlr_cmd_connect(&req);
847 	poll_threads();
848 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
849 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
850 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
851 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
852 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
853 	CU_ASSERT(qpair.ctrlr == NULL);
854 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
855 	ctrlr.vcprop.cc.bits.iocqes = 4;
856 
857 	/* I/O connect with qid that is too large */
858 	memset(&rsp, 0, sizeof(rsp));
859 	cmd.connect_cmd.qid = 3;
860 	sgroups[subsystem.id].mgmt_io_outstanding++;
861 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
862 	rc = nvmf_ctrlr_cmd_connect(&req);
863 	poll_threads();
864 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
865 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
866 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
867 	CU_ASSERT(qpair.ctrlr == NULL);
868 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
869 
870 	/* I/O connect with duplicate queue ID */
871 	memset(&rsp, 0, sizeof(rsp));
872 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
873 	cmd.connect_cmd.qid = 1;
874 	sgroups[subsystem.id].mgmt_io_outstanding++;
875 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
876 	rc = nvmf_ctrlr_cmd_connect(&req);
877 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
878 	poll_threads();
879 	/* First time, it will detect duplicate QID and schedule a retry.  So for
880 	 * now we should expect the response to still be all zeroes.
881 	 */
882 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
883 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
884 
885 	/* Now advance the clock, so that the retry poller executes. */
886 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
887 	poll_threads();
888 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
889 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
890 	CU_ASSERT(qpair.ctrlr == NULL);
891 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
892 
893 	/* I/O connect with temporarily duplicate queue ID. This covers race
894 	 * where qpair_mask bit may not yet be cleared, even though initiator
895 	 * has closed the connection.  See issue #2955. */
896 	memset(&rsp, 0, sizeof(rsp));
897 	sgroups[subsystem.id].mgmt_io_outstanding++;
898 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
899 	rc = nvmf_ctrlr_cmd_connect(&req);
900 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
901 	poll_threads();
902 	/* First time, it will detect duplicate QID and schedule a retry.  So for
903 	 * now we should expect the response to still be all zeroes.
904 	 */
905 	CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp)));
906 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1);
907 
908 	/* Now advance the clock, so that the retry poller executes. */
909 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
910 	spdk_delay_us(DUPLICATE_QID_RETRY_US * 2);
911 	poll_threads();
912 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
913 	CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED);
914 	CU_ASSERT(qpair.ctrlr == &ctrlr);
915 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
916 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
917 	qpair.ctrlr = NULL;
918 
919 	/* I/O connect when admin qpair is being destroyed */
920 	admin_qpair.group = NULL;
921 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
922 	memset(&rsp, 0, sizeof(rsp));
923 	sgroups[subsystem.id].mgmt_io_outstanding++;
924 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
925 	rc = nvmf_ctrlr_cmd_connect(&req);
926 	poll_threads();
927 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
928 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
929 	CU_ASSERT(qpair.ctrlr == NULL);
930 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
931 	admin_qpair.group = &group;
932 	admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
933 
934 	/* I/O connect when admin qpair was destroyed */
935 	ctrlr.admin_qpair = NULL;
936 	memset(&rsp, 0, sizeof(rsp));
937 	sgroups[subsystem.id].mgmt_io_outstanding++;
938 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
939 	rc = nvmf_ctrlr_cmd_connect(&req);
940 	poll_threads();
941 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
942 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
943 	CU_ASSERT(qpair.ctrlr == NULL);
944 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
945 	ctrlr.admin_qpair = &admin_qpair;
946 
947 	/* Clean up globals */
948 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
949 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
950 
951 	spdk_bit_array_free(&ctrlr.qpair_mask);
952 	free(sgroups);
953 }
954 
955 static void
956 test_get_ns_id_desc_list(void)
957 {
958 	struct spdk_nvmf_subsystem subsystem;
959 	struct spdk_nvmf_qpair qpair;
960 	struct spdk_nvmf_ctrlr ctrlr;
961 	struct spdk_nvmf_request req;
962 	struct spdk_nvmf_ns *ns_ptrs[1];
963 	struct spdk_nvmf_ns ns;
964 	union nvmf_h2c_msg cmd;
965 	union nvmf_c2h_msg rsp;
966 	struct spdk_bdev bdev;
967 	uint8_t buf[4096];
968 
969 	memset(&subsystem, 0, sizeof(subsystem));
970 	ns_ptrs[0] = &ns;
971 	subsystem.ns = ns_ptrs;
972 	subsystem.max_nsid = 1;
973 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
974 
975 	memset(&ns, 0, sizeof(ns));
976 	ns.opts.nsid = 1;
977 	ns.bdev = &bdev;
978 
979 	memset(&qpair, 0, sizeof(qpair));
980 	qpair.ctrlr = &ctrlr;
981 
982 	memset(&ctrlr, 0, sizeof(ctrlr));
983 	ctrlr.subsys = &subsystem;
984 	ctrlr.vcprop.cc.bits.en = 1;
985 	ctrlr.thread = spdk_get_thread();
986 	ctrlr.visible_ns = spdk_bit_array_create(1);
987 
988 	memset(&req, 0, sizeof(req));
989 	req.qpair = &qpair;
990 	req.cmd = &cmd;
991 	req.rsp = &rsp;
992 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
993 	req.length = sizeof(buf);
994 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
995 
996 	memset(&cmd, 0, sizeof(cmd));
997 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
998 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
999 
1000 	/* Invalid NSID */
1001 	cmd.nvme_cmd.nsid = 0;
1002 	memset(&rsp, 0, sizeof(rsp));
1003 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1004 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1005 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1006 
1007 	/* Valid NSID, but ns is inactive */
1008 	spdk_bit_array_clear(ctrlr.visible_ns, 0);
1009 	cmd.nvme_cmd.nsid = 1;
1010 	memset(&rsp, 0, sizeof(rsp));
1011 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1012 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1013 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1014 
1015 	/* Valid NSID, but ns has no IDs defined */
1016 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1017 	cmd.nvme_cmd.nsid = 1;
1018 	memset(&rsp, 0, sizeof(rsp));
1019 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1020 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1021 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1022 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
1023 
1024 	/* Valid NSID, but command not using NSID */
1025 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_KEEP_ALIVE;
1026 	memset(&rsp, 0, sizeof(rsp));
1027 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1028 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1029 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1030 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1031 
1032 	/* Valid NSID, only EUI64 defined */
1033 	ns.opts.eui64[0] = 0x11;
1034 	ns.opts.eui64[7] = 0xFF;
1035 	memset(&rsp, 0, sizeof(rsp));
1036 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1037 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1038 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1039 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
1040 	CU_ASSERT(buf[1] == 8);
1041 	CU_ASSERT(buf[4] == 0x11);
1042 	CU_ASSERT(buf[11] == 0xFF);
1043 	CU_ASSERT(buf[13] == 0);
1044 
1045 	/* Valid NSID, only NGUID defined */
1046 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
1047 	ns.opts.nguid[0] = 0x22;
1048 	ns.opts.nguid[15] = 0xEE;
1049 	memset(&rsp, 0, sizeof(rsp));
1050 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1051 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1052 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1053 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
1054 	CU_ASSERT(buf[1] == 16);
1055 	CU_ASSERT(buf[4] == 0x22);
1056 	CU_ASSERT(buf[19] == 0xEE);
1057 	CU_ASSERT(buf[21] == 0);
1058 
1059 	/* Valid NSID, both EUI64 and NGUID defined */
1060 	ns.opts.eui64[0] = 0x11;
1061 	ns.opts.eui64[7] = 0xFF;
1062 	ns.opts.nguid[0] = 0x22;
1063 	ns.opts.nguid[15] = 0xEE;
1064 	memset(&rsp, 0, sizeof(rsp));
1065 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1066 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1067 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1068 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
1069 	CU_ASSERT(buf[1] == 8);
1070 	CU_ASSERT(buf[4] == 0x11);
1071 	CU_ASSERT(buf[11] == 0xFF);
1072 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
1073 	CU_ASSERT(buf[13] == 16);
1074 	CU_ASSERT(buf[16] == 0x22);
1075 	CU_ASSERT(buf[31] == 0xEE);
1076 	CU_ASSERT(buf[33] == 0);
1077 
1078 	/* Valid NSID, EUI64, NGUID, and UUID defined */
1079 	ns.opts.eui64[0] = 0x11;
1080 	ns.opts.eui64[7] = 0xFF;
1081 	ns.opts.nguid[0] = 0x22;
1082 	ns.opts.nguid[15] = 0xEE;
1083 	ns.opts.uuid.u.raw[0] = 0x33;
1084 	ns.opts.uuid.u.raw[15] = 0xDD;
1085 	memset(&rsp, 0, sizeof(rsp));
1086 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1087 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1088 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1089 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
1090 	CU_ASSERT(buf[1] == 8);
1091 	CU_ASSERT(buf[4] == 0x11);
1092 	CU_ASSERT(buf[11] == 0xFF);
1093 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
1094 	CU_ASSERT(buf[13] == 16);
1095 	CU_ASSERT(buf[16] == 0x22);
1096 	CU_ASSERT(buf[31] == 0xEE);
1097 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
1098 	CU_ASSERT(buf[33] == 16);
1099 	CU_ASSERT(buf[36] == 0x33);
1100 	CU_ASSERT(buf[51] == 0xDD);
1101 	CU_ASSERT(buf[53] == 0);
1102 
1103 	spdk_bit_array_free(&ctrlr.visible_ns);
1104 }
1105 
1106 static void
1107 test_identify_ns(void)
1108 {
1109 	struct spdk_nvmf_subsystem subsystem = {};
1110 	struct spdk_nvmf_transport transport = {};
1111 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1112 	struct spdk_nvmf_ctrlr ctrlr = {
1113 		.subsys = &subsystem,
1114 		.admin_qpair = &admin_qpair,
1115 	};
1116 	struct spdk_nvme_cmd cmd = {};
1117 	struct spdk_nvme_cpl rsp = {};
1118 	struct spdk_nvme_ns_data nsdata = {};
1119 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
1120 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
1121 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1122 
1123 	ctrlr.visible_ns = spdk_bit_array_create(3);
1124 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1125 	spdk_bit_array_set(ctrlr.visible_ns, 2);
1126 
1127 	subsystem.ns = ns_arr;
1128 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1129 
1130 	/* Invalid NSID 0 */
1131 	cmd.nsid = 0;
1132 	memset(&nsdata, 0, sizeof(nsdata));
1133 	memset(&rsp, 0, sizeof(rsp));
1134 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1135 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1136 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1137 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1138 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1139 
1140 	/* Valid NSID 1 */
1141 	cmd.nsid = 1;
1142 	memset(&nsdata, 0, sizeof(nsdata));
1143 	memset(&rsp, 0, sizeof(rsp));
1144 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1145 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1146 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1147 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1148 	CU_ASSERT(nsdata.nsze == 1234);
1149 
1150 	/* Valid but inactive NSID 1 */
1151 	spdk_bit_array_clear(ctrlr.visible_ns, 0);
1152 	cmd.nsid = 1;
1153 	memset(&nsdata, 0, sizeof(nsdata));
1154 	memset(&rsp, 0, sizeof(rsp));
1155 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1156 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1157 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1158 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1159 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1160 
1161 	/* Valid but unallocated NSID 2 */
1162 	cmd.nsid = 2;
1163 	memset(&nsdata, 0, sizeof(nsdata));
1164 	memset(&rsp, 0, sizeof(rsp));
1165 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1166 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1167 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1168 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1169 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1170 
1171 	/* Valid NSID 3 */
1172 	cmd.nsid = 3;
1173 	memset(&nsdata, 0, sizeof(nsdata));
1174 	memset(&rsp, 0, sizeof(rsp));
1175 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1176 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1177 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1178 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1179 	CU_ASSERT(nsdata.nsze == 5678);
1180 
1181 	/* Invalid NSID 4 */
1182 	cmd.nsid = 4;
1183 	memset(&nsdata, 0, sizeof(nsdata));
1184 	memset(&rsp, 0, sizeof(rsp));
1185 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1186 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1187 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1188 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1189 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1190 
1191 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1192 	cmd.nsid = 0xFFFFFFFF;
1193 	memset(&nsdata, 0, sizeof(nsdata));
1194 	memset(&rsp, 0, sizeof(rsp));
1195 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1196 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1197 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1198 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1199 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1200 
1201 	spdk_bit_array_free(&ctrlr.visible_ns);
1202 }
1203 
1204 static void
1205 test_identify_ns_iocs_specific(void)
1206 {
1207 	struct spdk_nvmf_subsystem subsystem = {};
1208 	struct spdk_nvmf_transport transport = {};
1209 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport };
1210 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1211 	struct spdk_nvme_cmd cmd = {};
1212 	struct spdk_nvme_cpl rsp = {};
1213 	struct spdk_nvme_zns_ns_data nsdata_zns = {};
1214 	struct spdk_nvme_nvm_ns_data nsdata_nvm = {};
1215 	struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}};
1216 	struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}};
1217 	struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]};
1218 
1219 	ctrlr.visible_ns = spdk_bit_array_create(3);
1220 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1221 	spdk_bit_array_set(ctrlr.visible_ns, 1);
1222 	spdk_bit_array_set(ctrlr.visible_ns, 2);
1223 	subsystem.ns = ns_arr;
1224 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1225 
1226 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1227 
1228 	/* Invalid ZNS NSID 0 */
1229 	cmd.nsid = 0;
1230 	memset(&nsdata_zns, 0xFF, sizeof(nsdata_zns));
1231 	memset(&rsp, 0, sizeof(rsp));
1232 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1233 			&nsdata_zns, sizeof(nsdata_zns)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1234 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1235 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1236 	CU_ASSERT(spdk_mem_all_zero(&nsdata_zns, sizeof(nsdata_zns)));
1237 
1238 	/* Valid ZNS NSID 1 */
1239 	cmd.nsid = 1;
1240 	memset(&nsdata_zns, 0xFF, sizeof(nsdata_zns));
1241 	memset(&rsp, 0, sizeof(rsp));
1242 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1243 			&nsdata_zns, sizeof(nsdata_zns)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1244 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1245 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1246 	CU_ASSERT(nsdata_zns.ozcs.read_across_zone_boundaries == 1);
1247 	CU_ASSERT(nsdata_zns.mar == MAX_ACTIVE_ZONES - 1);
1248 	CU_ASSERT(nsdata_zns.mor == MAX_OPEN_ZONES - 1);
1249 	CU_ASSERT(nsdata_zns.lbafe[0].zsze == ZONE_SIZE);
1250 	nsdata_zns.ozcs.read_across_zone_boundaries = 0;
1251 	nsdata_zns.mar = 0;
1252 	nsdata_zns.mor = 0;
1253 	nsdata_zns.lbafe[0].zsze = 0;
1254 	CU_ASSERT(spdk_mem_all_zero(&nsdata_zns, sizeof(nsdata_zns)));
1255 
1256 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1257 
1258 	/* Valid NVM NSID 2 with DIF type 1 */
1259 	bdev[1].dif_type = SPDK_DIF_TYPE1;
1260 	cmd.nsid = 2;
1261 	memset(&nsdata_nvm, 0xFF, sizeof(nsdata_nvm));
1262 	memset(&rsp, 0, sizeof(rsp));
1263 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1264 			&nsdata_nvm, sizeof(nsdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1265 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1266 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1267 	CU_ASSERT(nsdata_nvm.lbstm == 0);
1268 	CU_ASSERT(nsdata_nvm.pic._16bpists == 0);
1269 	CU_ASSERT(nsdata_nvm.pic._16bpistm == 1);
1270 	CU_ASSERT(nsdata_nvm.pic.stcrs == 0);
1271 	CU_ASSERT(nsdata_nvm.elbaf[0].sts == 16);
1272 	CU_ASSERT(nsdata_nvm.elbaf[0].pif == SPDK_DIF_PI_FORMAT_32);
1273 	nsdata_nvm.pic._16bpistm = 0;
1274 	nsdata_nvm.elbaf[0].sts = 0;
1275 	nsdata_nvm.elbaf[0].pif = 0;
1276 	CU_ASSERT(spdk_mem_all_zero(&nsdata_nvm, sizeof(nsdata_nvm)));
1277 
1278 	/* Invalid NVM NSID 3 */
1279 	cmd.nsid = 0;
1280 	memset(&nsdata_nvm, 0xFF, sizeof(nsdata_nvm));
1281 	memset(&rsp, 0, sizeof(rsp));
1282 	CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1283 			&nsdata_nvm, sizeof(nsdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1284 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1285 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1286 	CU_ASSERT(spdk_mem_all_zero(&nsdata_nvm, sizeof(nsdata_nvm)));
1287 
1288 	spdk_bit_array_free(&ctrlr.visible_ns);
1289 }
1290 
1291 static void
1292 test_set_get_features(void)
1293 {
1294 	struct spdk_nvmf_subsystem subsystem = {};
1295 	struct spdk_nvmf_qpair admin_qpair = {};
1296 	enum spdk_nvme_ana_state ana_state[3];
1297 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1298 	struct spdk_nvmf_ctrlr ctrlr = {
1299 		.subsys = &subsystem,
1300 		.admin_qpair = &admin_qpair,
1301 		.listener = &listener
1302 	};
1303 	union nvmf_h2c_msg cmd = {};
1304 	union nvmf_c2h_msg rsp = {};
1305 	struct spdk_nvmf_ns ns[3];
1306 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1307 	struct spdk_nvmf_request req;
1308 	int rc;
1309 
1310 	ctrlr.visible_ns = spdk_bit_array_create(3);
1311 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1312 	spdk_bit_array_set(ctrlr.visible_ns, 2);
1313 	ns[0].anagrpid = 1;
1314 	ns[2].anagrpid = 3;
1315 	subsystem.ns = ns_arr;
1316 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1317 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1318 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1319 	admin_qpair.ctrlr = &ctrlr;
1320 	req.qpair = &admin_qpair;
1321 	cmd.nvme_cmd.nsid = 1;
1322 	req.cmd = &cmd;
1323 	req.rsp = &rsp;
1324 
1325 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1326 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1327 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1328 	ns[0].ptpl_file = "testcfg";
1329 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1330 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1331 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1332 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1333 	CU_ASSERT(ns[0].ptpl_activated == true);
1334 
1335 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1336 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1337 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1338 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1339 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1340 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1341 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1342 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1343 
1344 
1345 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1346 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1347 	cmd.nvme_cmd.cdw11 = 0x42;
1348 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1349 
1350 	rc = nvmf_ctrlr_get_features(&req);
1351 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1352 
1353 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1354 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1355 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1356 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1357 
1358 	rc = nvmf_ctrlr_get_features(&req);
1359 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1360 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1361 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1362 
1363 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1364 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1365 	cmd.nvme_cmd.cdw11 = 0x42;
1366 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1367 
1368 	rc = nvmf_ctrlr_set_features(&req);
1369 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1370 
1371 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1372 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1373 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1374 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1375 
1376 	rc = nvmf_ctrlr_set_features(&req);
1377 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1378 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1379 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1380 
1381 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1382 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1383 	cmd.nvme_cmd.cdw11 = 0x42;
1384 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1385 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1386 
1387 	rc = nvmf_ctrlr_set_features(&req);
1388 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1389 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1390 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1391 
1392 
1393 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1394 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1395 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1396 
1397 	rc = nvmf_ctrlr_get_features(&req);
1398 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1399 
1400 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1401 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1402 	cmd.nvme_cmd.cdw11 = 0x42;
1403 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1404 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1405 
1406 	rc = nvmf_ctrlr_set_features(&req);
1407 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1408 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1409 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1410 
1411 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1412 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1413 	cmd.nvme_cmd.cdw11 = 0x42;
1414 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1415 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1416 
1417 	rc = nvmf_ctrlr_set_features(&req);
1418 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1419 
1420 	spdk_bit_array_free(&ctrlr.visible_ns);
1421 }
1422 
1423 /*
1424  * Reservation Unit Test Configuration
1425  *       --------             --------    --------
1426  *      | Host A |           | Host B |  | Host C |
1427  *       --------             --------    --------
1428  *      /        \               |           |
1429  *  --------   --------       -------     -------
1430  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1431  *  --------   --------       -------     -------
1432  *    \           \              /           /
1433  *     \           \            /           /
1434  *      \           \          /           /
1435  *      --------------------------------------
1436  *     |            NAMESPACE 1               |
1437  *      --------------------------------------
1438  */
1439 
1440 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1441 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1442 
1443 static void
1444 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1445 {
1446 	/* Host A has two controllers */
1447 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1448 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1449 
1450 	/* Host B has 1 controller */
1451 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1452 
1453 	/* Host C has 1 controller */
1454 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1455 
1456 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1457 	g_ns_info.rtype = rtype;
1458 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1459 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1460 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1461 }
1462 
1463 static void
1464 test_reservation_write_exclusive(void)
1465 {
1466 	struct spdk_nvmf_request req = {};
1467 	union nvmf_h2c_msg cmd = {};
1468 	union nvmf_c2h_msg rsp = {};
1469 	int rc;
1470 
1471 	req.cmd = &cmd;
1472 	req.rsp = &rsp;
1473 
1474 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1475 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1476 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1477 
1478 	/* Test Case: Issue a Read command from Host A and Host B */
1479 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1480 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1481 	SPDK_CU_ASSERT_FATAL(rc == 0);
1482 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1483 	SPDK_CU_ASSERT_FATAL(rc == 0);
1484 
1485 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1486 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1487 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1488 	SPDK_CU_ASSERT_FATAL(rc == 0);
1489 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1490 	SPDK_CU_ASSERT_FATAL(rc < 0);
1491 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1492 
1493 	/* Test Case: Issue a Write command from Host C */
1494 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1495 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1496 	SPDK_CU_ASSERT_FATAL(rc < 0);
1497 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1498 
1499 	/* Test Case: Issue a Read command from Host B */
1500 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1501 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1502 	SPDK_CU_ASSERT_FATAL(rc == 0);
1503 
1504 	/* Unregister Host C */
1505 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1506 
1507 	/* Test Case: Read and Write commands from non-registrant Host C */
1508 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1509 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1510 	SPDK_CU_ASSERT_FATAL(rc < 0);
1511 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1512 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1513 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1514 	SPDK_CU_ASSERT_FATAL(rc == 0);
1515 }
1516 
1517 static void
1518 test_reservation_exclusive_access(void)
1519 {
1520 	struct spdk_nvmf_request req = {};
1521 	union nvmf_h2c_msg cmd = {};
1522 	union nvmf_c2h_msg rsp = {};
1523 	int rc;
1524 
1525 	req.cmd = &cmd;
1526 	req.rsp = &rsp;
1527 
1528 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1529 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1530 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1531 
1532 	/* Test Case: Issue a Read command from Host B */
1533 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1534 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1535 	SPDK_CU_ASSERT_FATAL(rc < 0);
1536 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1537 
1538 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1539 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1540 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1541 	SPDK_CU_ASSERT_FATAL(rc == 0);
1542 }
1543 
1544 static void
1545 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1546 {
1547 	struct spdk_nvmf_request req = {};
1548 	union nvmf_h2c_msg cmd = {};
1549 	union nvmf_c2h_msg rsp = {};
1550 	int rc;
1551 
1552 	req.cmd = &cmd;
1553 	req.rsp = &rsp;
1554 
1555 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1556 	ut_reservation_init(rtype);
1557 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1558 
1559 	/* Test Case: Issue a Read command from Host A and Host C */
1560 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1561 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1562 	SPDK_CU_ASSERT_FATAL(rc == 0);
1563 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1564 	SPDK_CU_ASSERT_FATAL(rc == 0);
1565 
1566 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1567 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1568 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1569 	SPDK_CU_ASSERT_FATAL(rc == 0);
1570 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1571 	SPDK_CU_ASSERT_FATAL(rc == 0);
1572 
1573 	/* Unregister Host C */
1574 	spdk_uuid_set_null(&g_ns_info.reg_hostid[2]);
1575 
1576 	/* Test Case: Read and Write commands from non-registrant Host C */
1577 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1578 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1579 	SPDK_CU_ASSERT_FATAL(rc == 0);
1580 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1581 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1582 	SPDK_CU_ASSERT_FATAL(rc < 0);
1583 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1584 }
1585 
1586 static void
1587 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1588 {
1589 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1590 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1591 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1592 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1593 }
1594 
1595 static void
1596 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1597 {
1598 	struct spdk_nvmf_request req = {};
1599 	union nvmf_h2c_msg cmd = {};
1600 	union nvmf_c2h_msg rsp = {};
1601 	int rc;
1602 
1603 	req.cmd = &cmd;
1604 	req.rsp = &rsp;
1605 
1606 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1607 	ut_reservation_init(rtype);
1608 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1609 
1610 	/* Test Case: Issue a Write command from Host B */
1611 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1612 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1613 	SPDK_CU_ASSERT_FATAL(rc == 0);
1614 
1615 	/* Unregister Host B */
1616 	spdk_uuid_set_null(&g_ns_info.reg_hostid[1]);
1617 
1618 	/* Test Case: Issue a Read command from Host B */
1619 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1620 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1621 	SPDK_CU_ASSERT_FATAL(rc < 0);
1622 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1623 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1624 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1625 	SPDK_CU_ASSERT_FATAL(rc < 0);
1626 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1627 }
1628 
1629 static void
1630 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1631 {
1632 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1633 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1634 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1635 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1636 }
1637 
1638 static void
1639 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1640 {
1641 	STAILQ_INIT(&ctrlr->async_events);
1642 }
1643 
1644 static void
1645 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1646 {
1647 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1648 
1649 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1650 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1651 		free(event);
1652 	}
1653 }
1654 
1655 static int
1656 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1657 {
1658 	int num = 0;
1659 	struct spdk_nvmf_async_event_completion *event;
1660 
1661 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1662 		num++;
1663 	}
1664 	return num;
1665 }
1666 
1667 static void
1668 test_reservation_notification_log_page(void)
1669 {
1670 	struct spdk_nvmf_ctrlr ctrlr;
1671 	struct spdk_nvmf_qpair qpair;
1672 	struct spdk_nvmf_ns ns;
1673 	struct spdk_nvmf_request req = {};
1674 	union nvmf_h2c_msg cmd = {};
1675 	union nvmf_c2h_msg rsp = {};
1676 	union spdk_nvme_async_event_completion event = {};
1677 	struct spdk_nvme_reservation_notification_log logs[3];
1678 	struct iovec iov;
1679 
1680 	memset(&ctrlr, 0, sizeof(ctrlr));
1681 	ctrlr.thread = spdk_get_thread();
1682 	TAILQ_INIT(&ctrlr.log_head);
1683 	init_pending_async_events(&ctrlr);
1684 	ns.nsid = 1;
1685 
1686 	/* Test Case: Mask all the reservation notifications */
1687 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1688 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1689 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1690 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1691 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1692 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1693 					  SPDK_NVME_RESERVATION_RELEASED);
1694 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1695 					  SPDK_NVME_RESERVATION_PREEMPTED);
1696 	poll_threads();
1697 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1698 
1699 	/* Test Case: Unmask all the reservation notifications,
1700 	 * 3 log pages are generated, and AER was triggered.
1701 	 */
1702 	ns.mask = 0;
1703 	ctrlr.num_avail_log_pages = 0;
1704 	req.cmd = &cmd;
1705 	req.rsp = &rsp;
1706 	ctrlr.aer_req[0] = &req;
1707 	ctrlr.nr_aer_reqs = 1;
1708 	req.qpair = &qpair;
1709 	TAILQ_INIT(&qpair.outstanding);
1710 	qpair.ctrlr = NULL;
1711 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
1712 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1713 
1714 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1715 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1716 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1717 					  SPDK_NVME_RESERVATION_RELEASED);
1718 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1719 					  SPDK_NVME_RESERVATION_PREEMPTED);
1720 	poll_threads();
1721 	event.raw = rsp.nvme_cpl.cdw0;
1722 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1723 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1724 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1725 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1726 
1727 	/* Test Case: Get Log Page to clear the log pages */
1728 	iov.iov_base = &logs[0];
1729 	iov.iov_len = sizeof(logs);
1730 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1731 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1732 
1733 	cleanup_pending_async_events(&ctrlr);
1734 }
1735 
1736 static void
1737 test_get_dif_ctx(void)
1738 {
1739 	struct spdk_nvmf_subsystem subsystem = {};
1740 	struct spdk_nvmf_request req = {};
1741 	struct spdk_nvmf_qpair qpair = {};
1742 	struct spdk_nvmf_ctrlr ctrlr = {};
1743 	struct spdk_nvmf_ns ns = {};
1744 	struct spdk_nvmf_ns *_ns = NULL;
1745 	struct spdk_bdev bdev = {};
1746 	union nvmf_h2c_msg cmd = {};
1747 	struct spdk_dif_ctx dif_ctx = {};
1748 	bool ret;
1749 
1750 	ctrlr.subsys = &subsystem;
1751 	ctrlr.visible_ns = spdk_bit_array_create(1);
1752 	spdk_bit_array_set(ctrlr.visible_ns, 0);
1753 
1754 	qpair.ctrlr = &ctrlr;
1755 
1756 	req.qpair = &qpair;
1757 	req.cmd = &cmd;
1758 
1759 	ns.bdev = &bdev;
1760 
1761 	ctrlr.dif_insert_or_strip = false;
1762 
1763 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1764 	CU_ASSERT(ret == false);
1765 
1766 	ctrlr.dif_insert_or_strip = true;
1767 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1768 
1769 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1770 	CU_ASSERT(ret == false);
1771 
1772 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
1773 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1774 
1775 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1776 	CU_ASSERT(ret == false);
1777 
1778 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1779 
1780 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1781 	CU_ASSERT(ret == false);
1782 
1783 	qpair.qid = 1;
1784 
1785 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1786 	CU_ASSERT(ret == false);
1787 
1788 	cmd.nvme_cmd.nsid = 1;
1789 
1790 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1791 	CU_ASSERT(ret == false);
1792 
1793 	subsystem.max_nsid = 1;
1794 	subsystem.ns = &_ns;
1795 	subsystem.ns[0] = &ns;
1796 
1797 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1798 	CU_ASSERT(ret == false);
1799 
1800 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1801 
1802 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1803 	CU_ASSERT(ret == true);
1804 
1805 	spdk_bit_array_free(&ctrlr.visible_ns);
1806 }
1807 
1808 static void
1809 test_identify_ctrlr(void)
1810 {
1811 	struct spdk_nvmf_tgt tgt = {};
1812 	struct spdk_nvmf_subsystem subsystem = {
1813 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1814 		.tgt = &tgt,
1815 	};
1816 	struct spdk_nvmf_transport_ops tops = {};
1817 	struct spdk_nvmf_transport transport = {
1818 		.ops = &tops,
1819 		.opts = {
1820 			.in_capsule_data_size = 4096,
1821 		},
1822 	};
1823 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1824 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1825 	struct spdk_nvme_ctrlr_data cdata = {};
1826 	uint32_t expected_ioccsz;
1827 
1828 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1829 
1830 	/* Check ioccsz, TCP transport */
1831 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1832 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1833 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1834 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1835 
1836 	/* Check ioccsz, RDMA transport */
1837 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1838 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1839 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1840 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1841 
1842 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1843 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1844 	ctrlr.dif_insert_or_strip = true;
1845 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1846 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1847 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1848 }
1849 
1850 static void
1851 test_identify_ctrlr_iocs_specific(void)
1852 {
1853 	struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 };
1854 	struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 };
1855 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop };
1856 	struct spdk_nvme_cmd cmd = {};
1857 	struct spdk_nvme_cpl rsp = {};
1858 	struct spdk_nvme_zns_ctrlr_data ctrlr_data = {};
1859 	struct spdk_nvme_nvm_ctrlr_data cdata_nvm = {};
1860 
1861 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS;
1862 
1863 	/* ZNS max_zone_append_size_kib no limit */
1864 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1865 	memset(&rsp, 0, sizeof(rsp));
1866 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1867 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1868 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1869 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1870 	CU_ASSERT(ctrlr_data.zasl == 0);
1871 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1872 
1873 	/* ZNS max_zone_append_size_kib = 4096 */
1874 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1875 	memset(&rsp, 0, sizeof(rsp));
1876 	subsystem.max_zone_append_size_kib = 4096;
1877 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1878 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1879 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1880 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1881 	CU_ASSERT(ctrlr_data.zasl == 0);
1882 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1883 
1884 	/* ZNS max_zone_append_size_kib = 60000 */
1885 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1886 	memset(&rsp, 0, sizeof(rsp));
1887 	subsystem.max_zone_append_size_kib = 60000;
1888 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1889 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1890 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1891 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1892 	CU_ASSERT(ctrlr_data.zasl == 3);
1893 	ctrlr_data.zasl = 0;
1894 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1895 
1896 	/* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */
1897 	memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data));
1898 	memset(&rsp, 0, sizeof(rsp));
1899 	ctrlr.vcprop.cap.bits.mpsmin = 2;
1900 	subsystem.max_zone_append_size_kib = 60000;
1901 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1902 			&ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1903 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1904 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1905 	CU_ASSERT(ctrlr_data.zasl == 1);
1906 	ctrlr_data.zasl = 0;
1907 	CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data)));
1908 	ctrlr.vcprop.cap.bits.mpsmin = 0;
1909 
1910 	cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM;
1911 
1912 	/* NVM max_discard_size_kib = 1024;
1913 	 * max_write_zeroes_size_kib = 1024;
1914 	 * mpsmin = 0;
1915 	 */
1916 	memset(&cdata_nvm, 0xFF, sizeof(cdata_nvm));
1917 	memset(&rsp, 0, sizeof(rsp));
1918 	subsystem.max_discard_size_kib = (uint64_t)1024;
1919 	subsystem.max_write_zeroes_size_kib = (uint64_t)1024;
1920 	CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp,
1921 			&cdata_nvm, sizeof(cdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1922 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1923 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1924 	CU_ASSERT(cdata_nvm.wzsl == 8);
1925 	CU_ASSERT(cdata_nvm.dmrsl == 2048);
1926 	CU_ASSERT(cdata_nvm.dmrl == 1);
1927 }
1928 
1929 static int
1930 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1931 {
1932 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1933 
1934 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1935 };
1936 
1937 static void
1938 test_custom_admin_cmd(void)
1939 {
1940 	struct spdk_nvmf_subsystem subsystem;
1941 	struct spdk_nvmf_qpair qpair;
1942 	struct spdk_nvmf_ctrlr ctrlr;
1943 	struct spdk_nvmf_request req;
1944 	struct spdk_nvmf_ns *ns_ptrs[1];
1945 	struct spdk_nvmf_ns ns;
1946 	union nvmf_h2c_msg cmd;
1947 	union nvmf_c2h_msg rsp;
1948 	struct spdk_bdev bdev;
1949 	uint8_t buf[4096];
1950 	int rc;
1951 
1952 	memset(&subsystem, 0, sizeof(subsystem));
1953 	ns_ptrs[0] = &ns;
1954 	subsystem.ns = ns_ptrs;
1955 	subsystem.max_nsid = 1;
1956 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1957 
1958 	memset(&ns, 0, sizeof(ns));
1959 	ns.opts.nsid = 1;
1960 	ns.bdev = &bdev;
1961 
1962 	memset(&qpair, 0, sizeof(qpair));
1963 	qpair.ctrlr = &ctrlr;
1964 
1965 	memset(&ctrlr, 0, sizeof(ctrlr));
1966 	ctrlr.subsys = &subsystem;
1967 	ctrlr.vcprop.cc.bits.en = 1;
1968 	ctrlr.thread = spdk_get_thread();
1969 
1970 	memset(&req, 0, sizeof(req));
1971 	req.qpair = &qpair;
1972 	req.cmd = &cmd;
1973 	req.rsp = &rsp;
1974 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1975 	req.length = sizeof(buf);
1976 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length);
1977 
1978 	memset(&cmd, 0, sizeof(cmd));
1979 	cmd.nvme_cmd.opc = 0xc1;
1980 	cmd.nvme_cmd.nsid = 0;
1981 	memset(&rsp, 0, sizeof(rsp));
1982 
1983 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1984 
1985 	/* Ensure that our hdlr is being called */
1986 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1987 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1988 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1989 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1990 }
1991 
1992 static void
1993 test_fused_compare_and_write(void)
1994 {
1995 	struct spdk_nvmf_request req = {};
1996 	struct spdk_nvmf_qpair qpair = {};
1997 	struct spdk_nvme_cmd cmd = {};
1998 	union nvmf_c2h_msg rsp = {};
1999 	struct spdk_nvmf_ctrlr ctrlr = {};
2000 	struct spdk_nvmf_subsystem subsystem = {};
2001 	struct spdk_nvmf_ns ns = {};
2002 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2003 	enum spdk_nvme_ana_state ana_state[1];
2004 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2005 	struct spdk_bdev bdev = {};
2006 
2007 	struct spdk_nvmf_poll_group group = {};
2008 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2009 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2010 	struct spdk_io_channel io_ch = {};
2011 
2012 	ns.bdev = &bdev;
2013 	ns.anagrpid = 1;
2014 
2015 	subsystem.id = 0;
2016 	subsystem.max_nsid = 1;
2017 	subsys_ns[0] = &ns;
2018 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2019 
2020 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2021 
2022 	/* Enable controller */
2023 	ctrlr.vcprop.cc.bits.en = 1;
2024 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2025 	ctrlr.listener = &listener;
2026 	ctrlr.visible_ns = spdk_bit_array_create(1);
2027 	spdk_bit_array_set(ctrlr.visible_ns, 0);
2028 
2029 	group.num_sgroups = 1;
2030 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2031 	sgroups.num_ns = 1;
2032 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2033 	ns_info.channel = &io_ch;
2034 	sgroups.ns_info = &ns_info;
2035 	TAILQ_INIT(&sgroups.queued);
2036 	group.sgroups = &sgroups;
2037 	TAILQ_INIT(&qpair.outstanding);
2038 
2039 	qpair.ctrlr = &ctrlr;
2040 	qpair.group = &group;
2041 	qpair.qid = 1;
2042 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2043 
2044 	cmd.nsid = 1;
2045 
2046 	req.qpair = &qpair;
2047 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2048 	req.rsp = &rsp;
2049 
2050 	/* SUCCESS/SUCCESS */
2051 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
2052 	cmd.opc = SPDK_NVME_OPC_COMPARE;
2053 
2054 	spdk_nvmf_request_exec(&req);
2055 	CU_ASSERT(qpair.first_fused_req != NULL);
2056 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2057 
2058 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2059 	cmd.opc = SPDK_NVME_OPC_WRITE;
2060 
2061 	spdk_nvmf_request_exec(&req);
2062 	CU_ASSERT(qpair.first_fused_req == NULL);
2063 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2064 
2065 	/* Wrong sequence */
2066 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2067 	cmd.opc = SPDK_NVME_OPC_WRITE;
2068 
2069 	spdk_nvmf_request_exec(&req);
2070 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
2071 	CU_ASSERT(qpair.first_fused_req == NULL);
2072 
2073 	/* Write as FUSE_FIRST (Wrong op code) */
2074 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
2075 	cmd.opc = SPDK_NVME_OPC_WRITE;
2076 
2077 	spdk_nvmf_request_exec(&req);
2078 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
2079 	CU_ASSERT(qpair.first_fused_req == NULL);
2080 
2081 	/* Compare as FUSE_SECOND (Wrong op code) */
2082 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
2083 	cmd.opc = SPDK_NVME_OPC_COMPARE;
2084 
2085 	spdk_nvmf_request_exec(&req);
2086 	CU_ASSERT(qpair.first_fused_req != NULL);
2087 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2088 
2089 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2090 	cmd.opc = SPDK_NVME_OPC_COMPARE;
2091 
2092 	spdk_nvmf_request_exec(&req);
2093 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
2094 	CU_ASSERT(qpair.first_fused_req == NULL);
2095 
2096 	spdk_bit_array_free(&ctrlr.visible_ns);
2097 }
2098 
2099 static void
2100 test_multi_async_event_reqs(void)
2101 {
2102 	struct spdk_nvmf_subsystem subsystem = {};
2103 	struct spdk_nvmf_qpair qpair = {};
2104 	struct spdk_nvmf_ctrlr ctrlr = {};
2105 	struct spdk_nvmf_request req[5] = {};
2106 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2107 	struct spdk_nvmf_ns ns = {};
2108 	union nvmf_h2c_msg cmd[5] = {};
2109 	union nvmf_c2h_msg rsp[5] = {};
2110 
2111 	struct spdk_nvmf_poll_group group = {};
2112 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2113 
2114 	int i;
2115 
2116 	ns_ptrs[0] = &ns;
2117 	subsystem.ns = ns_ptrs;
2118 	subsystem.max_nsid = 1;
2119 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2120 
2121 	ns.opts.nsid = 1;
2122 	group.sgroups = &sgroups;
2123 
2124 	qpair.ctrlr = &ctrlr;
2125 	qpair.group = &group;
2126 	TAILQ_INIT(&qpair.outstanding);
2127 
2128 	ctrlr.subsys = &subsystem;
2129 	ctrlr.vcprop.cc.bits.en = 1;
2130 	ctrlr.thread = spdk_get_thread();
2131 
2132 	for (i = 0; i < 5; i++) {
2133 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2134 		cmd[i].nvme_cmd.nsid = 0;
2135 		cmd[i].nvme_cmd.cid = i;
2136 
2137 		req[i].qpair = &qpair;
2138 		req[i].cmd = &cmd[i];
2139 		req[i].rsp = &rsp[i];
2140 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2141 	}
2142 
2143 	/* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */
2144 	sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS;
2145 	for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
2146 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2147 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
2148 	}
2149 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2150 
2151 	/* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */
2152 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2153 	CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS);
2154 	CU_ASSERT(rsp[4].nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2155 	CU_ASSERT(rsp[4].nvme_cpl.status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
2156 
2157 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
2158 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
2159 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2160 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2161 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
2162 
2163 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
2164 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
2165 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
2166 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
2167 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
2168 
2169 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
2170 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
2171 }
2172 
2173 static void
2174 test_get_ana_log_page_one_ns_per_anagrp(void)
2175 {
2176 #define UT_ANA_DESC_MAX_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
2177 #define UT_ANA_LOG_PAGE_MAX_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_MAX_SIZE)
2178 #define UT_ANA_DESC_SIZE(rgo) (sizeof(struct spdk_nvme_ana_group_descriptor) + (rgo ? 0 : sizeof(uint32_t)))
2179 #define UT_ANA_LOG_PAGE_SIZE(rgo) (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE(rgo))
2180 	uint32_t ana_group[3];
2181 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
2182 	struct spdk_nvmf_ctrlr ctrlr = {};
2183 	enum spdk_nvme_ana_state ana_state[3];
2184 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2185 	struct spdk_nvmf_ns ns[3];
2186 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
2187 	uint64_t offset;
2188 	uint32_t length;
2189 	int i;
2190 	char expected_page[UT_ANA_LOG_PAGE_MAX_SIZE] = {0};
2191 	char actual_page[UT_ANA_LOG_PAGE_MAX_SIZE] = {0};
2192 	struct iovec iov, iovs[2];
2193 	struct spdk_nvme_ana_page *ana_hdr;
2194 	char _ana_desc[UT_ANA_DESC_MAX_SIZE];
2195 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2196 	uint32_t rgo;
2197 
2198 	subsystem.ns = ns_arr;
2199 	subsystem.max_nsid = 3;
2200 	for (i = 0; i < 3; i++) {
2201 		subsystem.ana_group[i] = 1;
2202 	}
2203 	ctrlr.subsys = &subsystem;
2204 	ctrlr.listener = &listener;
2205 
2206 	for (i = 0; i < 3; i++) {
2207 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2208 	}
2209 
2210 	for (i = 0; i < 3; i++) {
2211 		ns_arr[i]->nsid = i + 1;
2212 		ns_arr[i]->anagrpid = i + 1;
2213 	}
2214 
2215 	for (rgo = 0; rgo <= 1; rgo++) {
2216 		memset(expected_page, 0, sizeof(expected_page));
2217 		memset(actual_page, 0, sizeof(actual_page));
2218 
2219 		/* create expected page */
2220 		ana_hdr = (void *)&expected_page[0];
2221 		ana_hdr->num_ana_group_desc = 3;
2222 		ana_hdr->change_count = 0;
2223 
2224 		/* descriptor may be unaligned. So create data and then copy it to the location. */
2225 		ana_desc = (void *)_ana_desc;
2226 		offset = sizeof(struct spdk_nvme_ana_page);
2227 
2228 		for (i = 0; i < 3; i++) {
2229 			memset(ana_desc, 0, UT_ANA_DESC_MAX_SIZE);
2230 			ana_desc->ana_group_id = ns_arr[i]->nsid;
2231 			ana_desc->num_of_nsid = rgo ? 0 : 1;
2232 			ana_desc->change_count = 0;
2233 			ana_desc->ana_state = ctrlr.listener->ana_state[i];
2234 			if (!rgo) {
2235 				ana_desc->nsid[0] = ns_arr[i]->nsid;
2236 			}
2237 			memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE(rgo));
2238 			offset += UT_ANA_DESC_SIZE(rgo);
2239 		}
2240 
2241 		/* read entire actual log page */
2242 		offset = 0;
2243 		while (offset < UT_ANA_LOG_PAGE_MAX_SIZE) {
2244 			length = spdk_min(16, UT_ANA_LOG_PAGE_MAX_SIZE - offset);
2245 			iov.iov_base = &actual_page[offset];
2246 			iov.iov_len = length;
2247 			nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0, rgo);
2248 			offset += length;
2249 		}
2250 
2251 		/* compare expected page and actual page */
2252 		CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_MAX_SIZE) == 0);
2253 
2254 		memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_MAX_SIZE);
2255 		offset = 0;
2256 		iovs[0].iov_base = &actual_page[offset];
2257 		iovs[0].iov_len = UT_ANA_LOG_PAGE_MAX_SIZE - UT_ANA_DESC_MAX_SIZE + 4;
2258 		offset += UT_ANA_LOG_PAGE_MAX_SIZE - UT_ANA_DESC_MAX_SIZE + 4;
2259 		iovs[1].iov_base = &actual_page[offset];
2260 		iovs[1].iov_len = UT_ANA_LOG_PAGE_MAX_SIZE - offset;
2261 		nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_MAX_SIZE, 0, rgo);
2262 
2263 		CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_MAX_SIZE) == 0);
2264 	}
2265 
2266 #undef UT_ANA_DESC_SIZE
2267 #undef UT_ANA_LOG_PAGE_SIZE
2268 #undef UT_ANA_DESC_MAX_SIZE
2269 #undef UT_ANA_LOG_PAGE_MAX_SIZE
2270 }
2271 
2272 static void
2273 test_get_ana_log_page_multi_ns_per_anagrp(void)
2274 {
2275 #define UT_ANA_LOG_PAGE_SIZE(rgo)	(sizeof(struct spdk_nvme_ana_page) +	\
2276 					 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2277 					 (rgo ? 0 : (sizeof(uint32_t) * 5)))
2278 #define UT_ANA_LOG_PAGE_MAX_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
2279 					 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
2280 					 sizeof(uint32_t) * 5)
2281 	struct spdk_nvmf_ns ns[5];
2282 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
2283 	uint32_t ana_group[5] = {0};
2284 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
2285 	enum spdk_nvme_ana_state ana_state[5];
2286 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
2287 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
2288 	char expected_page[UT_ANA_LOG_PAGE_MAX_SIZE] = {0};
2289 	char actual_page[UT_ANA_LOG_PAGE_MAX_SIZE] = {0};
2290 	struct iovec iov, iovs[2];
2291 	struct spdk_nvme_ana_page *ana_hdr;
2292 	char _ana_desc[UT_ANA_LOG_PAGE_MAX_SIZE];
2293 	struct spdk_nvme_ana_group_descriptor *ana_desc;
2294 	uint64_t offset;
2295 	uint32_t length;
2296 	int i;
2297 	uint32_t rgo;
2298 
2299 	subsystem.max_nsid = 5;
2300 	subsystem.ana_group[1] = 3;
2301 	subsystem.ana_group[2] = 2;
2302 	for (i = 0; i < 5; i++) {
2303 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2304 	}
2305 
2306 	for (i = 0; i < 5; i++) {
2307 		ns_arr[i]->nsid = i + 1;
2308 	}
2309 	ns_arr[0]->anagrpid = 2;
2310 	ns_arr[1]->anagrpid = 3;
2311 	ns_arr[2]->anagrpid = 2;
2312 	ns_arr[3]->anagrpid = 3;
2313 	ns_arr[4]->anagrpid = 2;
2314 
2315 	for (rgo = 0; rgo <= 1; rgo++) {
2316 		memset(expected_page, 0, sizeof(expected_page));
2317 		memset(actual_page, 0, sizeof(actual_page));
2318 
2319 		/* create expected page */
2320 		ana_hdr = (void *)&expected_page[0];
2321 		ana_hdr->num_ana_group_desc = 2;
2322 		ana_hdr->change_count = 0;
2323 
2324 		/* descriptor may be unaligned. So create data and then copy it to the location. */
2325 		ana_desc = (void *)_ana_desc;
2326 		offset = sizeof(struct spdk_nvme_ana_page);
2327 
2328 		memset(_ana_desc, 0, sizeof(_ana_desc));
2329 		ana_desc->ana_group_id = 2;
2330 		ana_desc->num_of_nsid = rgo ? 0 : 3;
2331 		ana_desc->change_count = 0;
2332 		ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2333 		if (!rgo) {
2334 			ana_desc->nsid[0] = 1;
2335 			ana_desc->nsid[1] = 3;
2336 			ana_desc->nsid[2] = 5;
2337 		}
2338 		memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2339 		       (rgo ? 0 : (sizeof(uint32_t) * 3)));
2340 		offset += sizeof(struct spdk_nvme_ana_group_descriptor) + (rgo ? 0 : (sizeof(uint32_t) * 3));
2341 
2342 		memset(_ana_desc, 0, sizeof(_ana_desc));
2343 		ana_desc->ana_group_id = 3;
2344 		ana_desc->num_of_nsid = rgo ? 0 : 2;
2345 		ana_desc->change_count = 0;
2346 		ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
2347 		if (!rgo) {
2348 			ana_desc->nsid[0] = 2;
2349 			ana_desc->nsid[1] = 4;
2350 		}
2351 		memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
2352 		       (rgo ? 0 : (sizeof(uint32_t) * 2)));
2353 
2354 		/* read entire actual log page, and compare expected page and actual page. */
2355 		offset = 0;
2356 		while (offset < UT_ANA_LOG_PAGE_MAX_SIZE) {
2357 			length = spdk_min(16, UT_ANA_LOG_PAGE_MAX_SIZE - offset);
2358 			iov.iov_base = &actual_page[offset];
2359 			iov.iov_len = length;
2360 			nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0, rgo);
2361 			offset += length;
2362 		}
2363 
2364 		CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_MAX_SIZE) == 0);
2365 
2366 		memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_MAX_SIZE);
2367 		offset = 0;
2368 		iovs[0].iov_base = &actual_page[offset];
2369 		iovs[0].iov_len = UT_ANA_LOG_PAGE_MAX_SIZE - sizeof(uint32_t) * 5;
2370 		offset += UT_ANA_LOG_PAGE_MAX_SIZE - sizeof(uint32_t) * 5;
2371 		iovs[1].iov_base = &actual_page[offset];
2372 		iovs[1].iov_len = sizeof(uint32_t) * 5;
2373 		nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_MAX_SIZE, 0, rgo);
2374 
2375 		CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_MAX_SIZE) == 0);
2376 	}
2377 
2378 #undef UT_ANA_LOG_PAGE_SIZE
2379 #undef UT_ANA_LOG_PAGE_MAX_SIZE
2380 }
2381 static void
2382 test_multi_async_events(void)
2383 {
2384 	struct spdk_nvmf_subsystem subsystem = {};
2385 	struct spdk_nvmf_qpair qpair = {};
2386 	struct spdk_nvmf_ctrlr ctrlr = {};
2387 	struct spdk_nvmf_request req[4] = {};
2388 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2389 	struct spdk_nvmf_ns ns = {};
2390 	union nvmf_h2c_msg cmd[4] = {};
2391 	union nvmf_c2h_msg rsp[4] = {};
2392 	union spdk_nvme_async_event_completion event = {};
2393 	struct spdk_nvmf_poll_group group = {};
2394 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2395 	int i;
2396 
2397 	ns_ptrs[0] = &ns;
2398 	subsystem.ns = ns_ptrs;
2399 	subsystem.max_nsid = 1;
2400 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2401 
2402 	ns.opts.nsid = 1;
2403 	group.sgroups = &sgroups;
2404 
2405 	qpair.ctrlr = &ctrlr;
2406 	qpair.group = &group;
2407 	TAILQ_INIT(&qpair.outstanding);
2408 
2409 	ctrlr.subsys = &subsystem;
2410 	ctrlr.vcprop.cc.bits.en = 1;
2411 	ctrlr.thread = spdk_get_thread();
2412 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2413 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2414 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2415 	init_pending_async_events(&ctrlr);
2416 
2417 	/* Target queue pending events when there is no outstanding AER request */
2418 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2419 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2420 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2421 
2422 	for (i = 0; i < 4; i++) {
2423 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2424 		cmd[i].nvme_cmd.nsid = 0;
2425 		cmd[i].nvme_cmd.cid = i;
2426 
2427 		req[i].qpair = &qpair;
2428 		req[i].cmd = &cmd[i];
2429 		req[i].rsp = &rsp[i];
2430 
2431 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2432 
2433 		sgroups.mgmt_io_outstanding = 1;
2434 		if (i < 3) {
2435 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2436 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2437 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2438 		} else {
2439 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2440 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2441 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2442 		}
2443 	}
2444 
2445 	event.raw = rsp[0].nvme_cpl.cdw0;
2446 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2447 	event.raw = rsp[1].nvme_cpl.cdw0;
2448 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2449 	event.raw = rsp[2].nvme_cpl.cdw0;
2450 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2451 
2452 	cleanup_pending_async_events(&ctrlr);
2453 }
2454 
2455 static void
2456 test_rae(void)
2457 {
2458 	struct spdk_nvmf_subsystem subsystem = {};
2459 	struct spdk_nvmf_qpair qpair = {};
2460 	struct spdk_nvmf_ctrlr ctrlr = {};
2461 	struct spdk_nvmf_request req[3] = {};
2462 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2463 	struct spdk_nvmf_ns ns = {};
2464 	union nvmf_h2c_msg cmd[3] = {};
2465 	union nvmf_c2h_msg rsp[3] = {};
2466 	union spdk_nvme_async_event_completion event = {};
2467 	struct spdk_nvmf_poll_group group = {};
2468 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2469 	int i;
2470 	char data[4096];
2471 
2472 	ns_ptrs[0] = &ns;
2473 	subsystem.ns = ns_ptrs;
2474 	subsystem.max_nsid = 1;
2475 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2476 
2477 	ns.opts.nsid = 1;
2478 	group.sgroups = &sgroups;
2479 
2480 	qpair.ctrlr = &ctrlr;
2481 	qpair.group = &group;
2482 	TAILQ_INIT(&qpair.outstanding);
2483 
2484 	ctrlr.subsys = &subsystem;
2485 	ctrlr.vcprop.cc.bits.en = 1;
2486 	ctrlr.thread = spdk_get_thread();
2487 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2488 	init_pending_async_events(&ctrlr);
2489 
2490 	/* Target queue pending events when there is no outstanding AER request */
2491 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2492 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2493 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2494 	/* only one event will be queued before RAE is clear */
2495 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2496 
2497 	req[0].qpair = &qpair;
2498 	req[0].cmd = &cmd[0];
2499 	req[0].rsp = &rsp[0];
2500 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2501 	cmd[0].nvme_cmd.nsid = 0;
2502 	cmd[0].nvme_cmd.cid = 0;
2503 
2504 	for (i = 1; i < 3; i++) {
2505 		req[i].qpair = &qpair;
2506 		req[i].cmd = &cmd[i];
2507 		req[i].rsp = &rsp[i];
2508 		req[i].length = sizeof(data);
2509 		SPDK_IOV_ONE(req[i].iov, &req[i].iovcnt, &data, req[i].length);
2510 
2511 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2512 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2513 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2514 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2515 			spdk_nvme_bytes_to_numd(req[i].length);
2516 		cmd[i].nvme_cmd.cid = i;
2517 	}
2518 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2519 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2520 
2521 	/* consume the pending event */
2522 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2523 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2524 	event.raw = rsp[0].nvme_cpl.cdw0;
2525 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2526 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2527 
2528 	/* get log with RAE set */
2529 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2530 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2531 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2532 
2533 	/* will not generate new event until RAE is clear */
2534 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2535 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2536 
2537 	/* get log with RAE clear */
2538 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2539 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2540 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2541 
2542 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2543 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2544 
2545 	cleanup_pending_async_events(&ctrlr);
2546 }
2547 
2548 static void
2549 test_nvmf_ctrlr_create_destruct(void)
2550 {
2551 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2552 	struct spdk_nvmf_poll_group group = {};
2553 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2554 	struct spdk_nvmf_transport transport = {};
2555 	struct spdk_nvmf_transport_ops tops = {};
2556 	struct spdk_nvmf_subsystem subsystem = {};
2557 	struct spdk_nvmf_ns *ns_arr[1] = { NULL };
2558 	struct spdk_nvmf_request req = {};
2559 	struct spdk_nvmf_qpair qpair = {};
2560 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2561 	struct spdk_nvmf_tgt tgt = {};
2562 	union nvmf_h2c_msg cmd = {};
2563 	union nvmf_c2h_msg rsp = {};
2564 	const uint8_t hostid[16] = {
2565 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2566 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2567 	};
2568 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2569 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2570 
2571 	group.thread = spdk_get_thread();
2572 	transport.ops = &tops;
2573 	transport.opts.max_aq_depth = 32;
2574 	transport.opts.max_queue_depth = 64;
2575 	transport.opts.max_qpairs_per_ctrlr = 3;
2576 	transport.opts.dif_insert_or_strip = true;
2577 	transport.tgt = &tgt;
2578 	qpair.transport = &transport;
2579 	qpair.group = &group;
2580 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
2581 	TAILQ_INIT(&qpair.outstanding);
2582 
2583 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2584 	connect_data.cntlid = 0xFFFF;
2585 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2586 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2587 
2588 	subsystem.thread = spdk_get_thread();
2589 	subsystem.id = 1;
2590 	TAILQ_INIT(&subsystem.ctrlrs);
2591 	subsystem.tgt = &tgt;
2592 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2593 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2594 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2595 	subsystem.ns = ns_arr;
2596 
2597 	group.sgroups = sgroups;
2598 
2599 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2600 	cmd.connect_cmd.cid = 1;
2601 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2602 	cmd.connect_cmd.recfmt = 0;
2603 	cmd.connect_cmd.qid = 0;
2604 	cmd.connect_cmd.sqsize = 31;
2605 	cmd.connect_cmd.cattr = 0;
2606 	cmd.connect_cmd.kato = 120000;
2607 
2608 	req.qpair = &qpair;
2609 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2610 	req.length = sizeof(connect_data);
2611 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length);
2612 	req.cmd = &cmd;
2613 	req.rsp = &rsp;
2614 
2615 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2616 	sgroups[subsystem.id].mgmt_io_outstanding++;
2617 
2618 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base);
2619 	poll_threads();
2620 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2621 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2622 	CU_ASSERT(ctrlr->subsys == &subsystem);
2623 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2624 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2625 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2626 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2627 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2628 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2629 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2630 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2631 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2632 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2633 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2634 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2635 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2636 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2637 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2638 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2639 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2640 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2641 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2642 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2643 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2644 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2645 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2646 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2647 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2648 
2649 	ctrlr->in_destruct = true;
2650 	nvmf_ctrlr_destruct(ctrlr);
2651 	poll_threads();
2652 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2653 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2654 }
2655 
2656 static void
2657 test_nvmf_ctrlr_use_zcopy(void)
2658 {
2659 	struct spdk_nvmf_subsystem subsystem = {};
2660 	struct spdk_nvmf_transport transport = {};
2661 	struct spdk_nvmf_request req = {};
2662 	struct spdk_nvmf_qpair qpair = {};
2663 	struct spdk_nvmf_ctrlr ctrlr = {};
2664 	union nvmf_h2c_msg cmd = {};
2665 	struct spdk_nvmf_ns ns = {};
2666 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2667 	struct spdk_bdev bdev = {};
2668 	struct spdk_nvmf_poll_group group = {};
2669 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2670 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2671 	struct spdk_io_channel io_ch = {};
2672 	int opc;
2673 
2674 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2675 	ns.bdev = &bdev;
2676 
2677 	subsystem.id = 0;
2678 	subsystem.max_nsid = 1;
2679 	subsys_ns[0] = &ns;
2680 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2681 
2682 	ctrlr.subsys = &subsystem;
2683 	ctrlr.visible_ns = spdk_bit_array_create(1);
2684 	spdk_bit_array_set(ctrlr.visible_ns, 0);
2685 
2686 	transport.opts.zcopy = true;
2687 
2688 	qpair.ctrlr = &ctrlr;
2689 	qpair.group = &group;
2690 	qpair.qid = 1;
2691 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2692 	qpair.transport = &transport;
2693 
2694 	group.thread = spdk_get_thread();
2695 	group.num_sgroups = 1;
2696 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2697 	sgroups.num_ns = 1;
2698 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2699 	ns_info.channel = &io_ch;
2700 	sgroups.ns_info = &ns_info;
2701 	TAILQ_INIT(&sgroups.queued);
2702 	group.sgroups = &sgroups;
2703 	TAILQ_INIT(&qpair.outstanding);
2704 
2705 	req.qpair = &qpair;
2706 	req.cmd = &cmd;
2707 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2708 
2709 	/* Admin queue */
2710 	qpair.qid = 0;
2711 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2712 	qpair.qid = 1;
2713 
2714 	/* Invalid Opcodes */
2715 	for (opc = 0; opc <= 255; opc++) {
2716 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2717 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2718 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2719 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2720 		}
2721 	}
2722 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2723 
2724 	/* Fused WRITE */
2725 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2726 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2727 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2728 
2729 	/* Non bdev */
2730 	cmd.nvme_cmd.nsid = 4;
2731 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2732 	cmd.nvme_cmd.nsid = 1;
2733 
2734 	/* ZCOPY Not supported */
2735 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2736 	ns.zcopy = true;
2737 
2738 	/* ZCOPY disabled on transport level */
2739 	transport.opts.zcopy = false;
2740 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2741 	transport.opts.zcopy = true;
2742 
2743 	/* Success */
2744 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2745 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2746 
2747 	spdk_bit_array_free(&ctrlr.visible_ns);
2748 }
2749 
2750 static void
2751 qpair_state_change_done(void *cb_arg, int status)
2752 {
2753 }
2754 
2755 static void
2756 test_spdk_nvmf_request_zcopy_start(void)
2757 {
2758 	struct spdk_nvmf_request req = {};
2759 	struct spdk_nvmf_qpair qpair = {};
2760 	struct spdk_nvmf_transport transport = {};
2761 	struct spdk_nvme_cmd cmd = {};
2762 	union nvmf_c2h_msg rsp = {};
2763 	struct spdk_nvmf_ctrlr ctrlr = {};
2764 	struct spdk_nvmf_subsystem subsystem = {};
2765 	struct spdk_nvmf_ns ns = {};
2766 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2767 	enum spdk_nvme_ana_state ana_state[1];
2768 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2769 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2770 
2771 	struct spdk_nvmf_poll_group group = {};
2772 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2773 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2774 	struct spdk_io_channel io_ch = {};
2775 
2776 	ns.bdev = &bdev;
2777 	ns.zcopy = true;
2778 	ns.anagrpid = 1;
2779 
2780 	subsystem.id = 0;
2781 	subsystem.max_nsid = 1;
2782 	subsys_ns[0] = &ns;
2783 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2784 
2785 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2786 
2787 	/* Enable controller */
2788 	ctrlr.vcprop.cc.bits.en = 1;
2789 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2790 	ctrlr.listener = &listener;
2791 	ctrlr.visible_ns = spdk_bit_array_create(1);
2792 	spdk_bit_array_set(ctrlr.visible_ns, 0);
2793 
2794 	transport.opts.zcopy = true;
2795 
2796 	group.thread = spdk_get_thread();
2797 	group.num_sgroups = 1;
2798 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2799 	sgroups.num_ns = 1;
2800 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2801 	ns_info.channel = &io_ch;
2802 	sgroups.ns_info = &ns_info;
2803 	TAILQ_INIT(&sgroups.queued);
2804 	group.sgroups = &sgroups;
2805 	TAILQ_INIT(&qpair.outstanding);
2806 
2807 	qpair.ctrlr = &ctrlr;
2808 	qpair.group = &group;
2809 	qpair.transport = &transport;
2810 	qpair.qid = 1;
2811 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2812 
2813 	cmd.nsid = 1;
2814 
2815 	req.qpair = &qpair;
2816 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2817 	req.rsp = &rsp;
2818 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2819 	cmd.opc = SPDK_NVME_OPC_READ;
2820 
2821 	/* Fail because no controller */
2822 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2823 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2824 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
2825 	qpair.ctrlr = NULL;
2826 	spdk_nvmf_request_zcopy_start(&req);
2827 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2828 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2829 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2830 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2831 	qpair.ctrlr = &ctrlr;
2832 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2833 
2834 	/* Fail because bad NSID */
2835 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2836 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2837 	cmd.nsid = 0;
2838 	spdk_nvmf_request_zcopy_start(&req);
2839 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2840 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2841 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2842 	cmd.nsid = 1;
2843 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2844 
2845 	/* Fail because bad Channel */
2846 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2847 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2848 	ns_info.channel = NULL;
2849 	spdk_nvmf_request_zcopy_start(&req);
2850 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2851 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2852 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2853 	ns_info.channel = &io_ch;
2854 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2855 
2856 	/* Queue the request because NSID is not active */
2857 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2858 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2859 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2860 	spdk_nvmf_request_zcopy_start(&req);
2861 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2862 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2863 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2864 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2865 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2866 
2867 	/* Fail because QPair is not active */
2868 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2869 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2870 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2871 	qpair.state_cb = qpair_state_change_done;
2872 	spdk_nvmf_request_zcopy_start(&req);
2873 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2874 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2875 	qpair.state_cb = NULL;
2876 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2877 
2878 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2879 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2880 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2881 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2882 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2883 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2884 	spdk_nvmf_request_zcopy_start(&req);
2885 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2886 	cmd.cdw10 = 0;
2887 	cmd.cdw12 = 0;
2888 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2889 
2890 	/* Success */
2891 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2892 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2893 	spdk_nvmf_request_zcopy_start(&req);
2894 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2895 
2896 	spdk_bit_array_free(&ctrlr.visible_ns);
2897 }
2898 
2899 static void
2900 test_zcopy_read(void)
2901 {
2902 	struct spdk_nvmf_request req = {};
2903 	struct spdk_nvmf_qpair qpair = {};
2904 	struct spdk_nvmf_transport transport = {};
2905 	struct spdk_nvme_cmd cmd = {};
2906 	union nvmf_c2h_msg rsp = {};
2907 	struct spdk_nvmf_ctrlr ctrlr = {};
2908 	struct spdk_nvmf_subsystem subsystem = {};
2909 	struct spdk_nvmf_ns ns = {};
2910 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2911 	enum spdk_nvme_ana_state ana_state[1];
2912 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2913 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2914 
2915 	struct spdk_nvmf_poll_group group = {};
2916 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2917 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2918 	struct spdk_io_channel io_ch = {};
2919 
2920 	ns.bdev = &bdev;
2921 	ns.zcopy = true;
2922 	ns.anagrpid = 1;
2923 
2924 	subsystem.id = 0;
2925 	subsystem.max_nsid = 1;
2926 	subsys_ns[0] = &ns;
2927 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2928 
2929 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2930 
2931 	/* Enable controller */
2932 	ctrlr.vcprop.cc.bits.en = 1;
2933 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2934 	ctrlr.listener = &listener;
2935 	ctrlr.visible_ns = spdk_bit_array_create(1);
2936 	spdk_bit_array_set(ctrlr.visible_ns, 0);
2937 
2938 	transport.opts.zcopy = true;
2939 
2940 	group.thread = spdk_get_thread();
2941 	group.num_sgroups = 1;
2942 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2943 	sgroups.num_ns = 1;
2944 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2945 	ns_info.channel = &io_ch;
2946 	sgroups.ns_info = &ns_info;
2947 	TAILQ_INIT(&sgroups.queued);
2948 	group.sgroups = &sgroups;
2949 	TAILQ_INIT(&qpair.outstanding);
2950 
2951 	qpair.ctrlr = &ctrlr;
2952 	qpair.group = &group;
2953 	qpair.transport = &transport;
2954 	qpair.qid = 1;
2955 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
2956 
2957 	cmd.nsid = 1;
2958 
2959 	req.qpair = &qpair;
2960 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2961 	req.rsp = &rsp;
2962 	cmd.opc = SPDK_NVME_OPC_READ;
2963 
2964 	/* Prepare for zcopy */
2965 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2966 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2967 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2968 	CU_ASSERT(ns_info.io_outstanding == 0);
2969 
2970 	/* Perform the zcopy start */
2971 	spdk_nvmf_request_zcopy_start(&req);
2972 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2973 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2974 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2975 	CU_ASSERT(ns_info.io_outstanding == 1);
2976 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2977 
2978 	/* Perform the zcopy end */
2979 	spdk_nvmf_request_zcopy_end(&req, false);
2980 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2981 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2982 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2983 	CU_ASSERT(ns_info.io_outstanding == 0);
2984 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2985 
2986 	spdk_bit_array_free(&ctrlr.visible_ns);
2987 }
2988 
2989 static void
2990 test_zcopy_write(void)
2991 {
2992 	struct spdk_nvmf_request req = {};
2993 	struct spdk_nvmf_qpair qpair = {};
2994 	struct spdk_nvmf_transport transport = {};
2995 	struct spdk_nvme_cmd cmd = {};
2996 	union nvmf_c2h_msg rsp = {};
2997 	struct spdk_nvmf_ctrlr ctrlr = {};
2998 	struct spdk_nvmf_subsystem subsystem = {};
2999 	struct spdk_nvmf_ns ns = {};
3000 	struct spdk_nvmf_ns *subsys_ns[1] = {};
3001 	enum spdk_nvme_ana_state ana_state[1];
3002 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
3003 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
3004 
3005 	struct spdk_nvmf_poll_group group = {};
3006 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
3007 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
3008 	struct spdk_io_channel io_ch = {};
3009 
3010 	ns.bdev = &bdev;
3011 	ns.zcopy = true;
3012 	ns.anagrpid = 1;
3013 
3014 	subsystem.id = 0;
3015 	subsystem.max_nsid = 1;
3016 	subsys_ns[0] = &ns;
3017 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
3018 
3019 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
3020 
3021 	/* Enable controller */
3022 	ctrlr.vcprop.cc.bits.en = 1;
3023 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
3024 	ctrlr.listener = &listener;
3025 	ctrlr.visible_ns = spdk_bit_array_create(1);
3026 	spdk_bit_array_set(ctrlr.visible_ns, 0);
3027 
3028 	transport.opts.zcopy = true;
3029 
3030 	group.thread = spdk_get_thread();
3031 	group.num_sgroups = 1;
3032 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
3033 	sgroups.num_ns = 1;
3034 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
3035 	ns_info.channel = &io_ch;
3036 	sgroups.ns_info = &ns_info;
3037 	TAILQ_INIT(&sgroups.queued);
3038 	group.sgroups = &sgroups;
3039 	TAILQ_INIT(&qpair.outstanding);
3040 
3041 	qpair.ctrlr = &ctrlr;
3042 	qpair.group = &group;
3043 	qpair.transport = &transport;
3044 	qpair.qid = 1;
3045 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
3046 
3047 	cmd.nsid = 1;
3048 
3049 	req.qpair = &qpair;
3050 	req.cmd = (union nvmf_h2c_msg *)&cmd;
3051 	req.rsp = &rsp;
3052 	cmd.opc = SPDK_NVME_OPC_WRITE;
3053 
3054 	/* Prepare for zcopy */
3055 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
3056 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
3057 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
3058 	CU_ASSERT(ns_info.io_outstanding == 0);
3059 
3060 	/* Perform the zcopy start */
3061 	spdk_nvmf_request_zcopy_start(&req);
3062 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
3063 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
3064 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
3065 	CU_ASSERT(ns_info.io_outstanding == 1);
3066 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
3067 
3068 	/* Perform the zcopy end */
3069 	spdk_nvmf_request_zcopy_end(&req, true);
3070 	CU_ASSERT(req.zcopy_bdev_io == NULL);
3071 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
3072 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
3073 	CU_ASSERT(ns_info.io_outstanding == 0);
3074 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
3075 
3076 	spdk_bit_array_free(&ctrlr.visible_ns);
3077 }
3078 
3079 static void
3080 test_nvmf_property_set(void)
3081 {
3082 	int rc;
3083 	struct spdk_nvmf_request req = {};
3084 	struct spdk_nvmf_qpair qpair = {};
3085 	struct spdk_nvmf_ctrlr ctrlr = {};
3086 	union nvmf_h2c_msg cmd = {};
3087 	union nvmf_c2h_msg rsp = {};
3088 
3089 	req.qpair = &qpair;
3090 	qpair.ctrlr = &ctrlr;
3091 	req.cmd = &cmd;
3092 	req.rsp = &rsp;
3093 
3094 	/* Invalid parameters */
3095 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
3096 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
3097 
3098 	rc = nvmf_property_set(&req);
3099 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3100 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
3101 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
3102 
3103 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
3104 
3105 	rc = nvmf_property_get(&req);
3106 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3107 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
3108 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
3109 
3110 	/* Set cc with same property size */
3111 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
3112 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
3113 
3114 	rc = nvmf_property_set(&req);
3115 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3116 
3117 	/* Emulate cc data */
3118 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
3119 
3120 	rc = nvmf_property_get(&req);
3121 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3122 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
3123 
3124 	/* Set asq with different property size */
3125 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
3126 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
3127 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
3128 
3129 	rc = nvmf_property_set(&req);
3130 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3131 
3132 	/* Emulate asq data */
3133 	ctrlr.vcprop.asq = 0xAADDADBEEF;
3134 
3135 	rc = nvmf_property_get(&req);
3136 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3137 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
3138 }
3139 
3140 static void
3141 test_nvmf_ctrlr_get_features_host_behavior_support(void)
3142 {
3143 	int rc;
3144 	struct spdk_nvmf_request req = {};
3145 	struct spdk_nvmf_qpair qpair = {};
3146 	struct spdk_nvmf_ctrlr ctrlr = {};
3147 	struct spdk_nvme_host_behavior behavior = {};
3148 	union nvmf_h2c_msg cmd = {};
3149 	union nvmf_c2h_msg rsp = {};
3150 
3151 	qpair.ctrlr = &ctrlr;
3152 	req.qpair = &qpair;
3153 	req.cmd = &cmd;
3154 	req.rsp = &rsp;
3155 
3156 	/* Invalid data */
3157 	req.length = sizeof(struct spdk_nvme_host_behavior);
3158 	req.iovcnt = 0;
3159 
3160 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
3161 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3162 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3163 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3164 
3165 	/* Wrong structure length */
3166 	req.length = sizeof(struct spdk_nvme_host_behavior) - 1;
3167 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
3168 
3169 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
3170 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3171 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3172 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3173 
3174 	/* Get Features Host Behavior Support Success */
3175 	req.length = sizeof(struct spdk_nvme_host_behavior);
3176 	SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length);
3177 
3178 	ctrlr.acre_enabled = true;
3179 	ctrlr.lbafee_enabled = true;
3180 	behavior.acre = false;
3181 	behavior.lbafee = false;
3182 
3183 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
3184 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3185 	CU_ASSERT(behavior.acre == true);
3186 	CU_ASSERT(behavior.lbafee == true);
3187 }
3188 
3189 static void
3190 test_nvmf_ctrlr_set_features_host_behavior_support(void)
3191 {
3192 	int rc;
3193 	struct spdk_nvmf_request req = {};
3194 	struct spdk_nvmf_qpair qpair = {};
3195 	struct spdk_nvmf_ctrlr ctrlr = {};
3196 	struct spdk_nvme_host_behavior host_behavior = {};
3197 	union nvmf_h2c_msg cmd = {};
3198 	union nvmf_c2h_msg rsp = {};
3199 
3200 	qpair.ctrlr = &ctrlr;
3201 	req.qpair = &qpair;
3202 	req.cmd = &cmd;
3203 	req.rsp = &rsp;
3204 	req.iov[0].iov_base = &host_behavior;
3205 	req.iov[0].iov_len = sizeof(host_behavior);
3206 
3207 	/* Invalid iovcnt */
3208 	req.iovcnt = 0;
3209 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3210 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3211 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3212 
3213 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3214 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3215 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3216 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3217 
3218 	/* Invalid iov_len */
3219 	req.iovcnt = 1;
3220 	req.iov[0].iov_len = 0;
3221 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3222 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3223 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3224 
3225 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3226 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3227 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3228 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3229 
3230 	/* acre is false but lbafee is true */
3231 	host_behavior.acre = 0;
3232 	host_behavior.lbafee = 1;
3233 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3234 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3235 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3236 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3237 
3238 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3239 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3240 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3241 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3242 	CU_ASSERT(ctrlr.acre_enabled == false);
3243 	CU_ASSERT(ctrlr.lbafee_enabled == true);
3244 
3245 	/* acre is true but lbafee is false */
3246 	host_behavior.acre = 1;
3247 	host_behavior.lbafee = 0;
3248 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
3249 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3250 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3251 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3252 
3253 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3254 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3255 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3256 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
3257 	CU_ASSERT(ctrlr.acre_enabled == true);
3258 	CU_ASSERT(ctrlr.lbafee_enabled == false);
3259 
3260 	/* Invalid acre */
3261 	host_behavior.acre = 2;
3262 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3263 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3264 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3265 
3266 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3267 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3268 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3269 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3270 
3271 	/* Invalid lbafee */
3272 	host_behavior.lbafee = 3;
3273 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
3274 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3275 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
3276 
3277 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
3278 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
3279 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
3280 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
3281 }
3282 
3283 static void
3284 test_nvmf_ctrlr_ns_attachment(void)
3285 {
3286 	struct spdk_nvmf_subsystem subsystem = {};
3287 	struct spdk_nvmf_ns ns1 = {
3288 		.nsid = 1,
3289 		.always_visible = false
3290 	};
3291 	struct spdk_nvmf_ns ns3 = {
3292 		.nsid = 3,
3293 		.always_visible = false
3294 	};
3295 	struct spdk_nvmf_ctrlr ctrlrA = {
3296 		.subsys = &subsystem
3297 	};
3298 	struct spdk_nvmf_ctrlr ctrlrB = {
3299 		.subsys = &subsystem
3300 	};
3301 	struct spdk_nvmf_host *host;
3302 	uint32_t nsid;
3303 
3304 	subsystem.max_nsid = 3;
3305 	subsystem.ns = calloc(subsystem.max_nsid, sizeof(subsystem.ns));
3306 	SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
3307 
3308 	/* nsid = 2 -> unallocated, nsid = 1,3 -> allocated */
3309 	subsystem.ns[0] = &ns1;
3310 	subsystem.ns[2] = &ns3;
3311 
3312 	snprintf(ctrlrA.hostnqn, sizeof(ctrlrA.hostnqn), "nqn.2016-06.io.spdk:host1");
3313 	ctrlrA.visible_ns = spdk_bit_array_create(subsystem.max_nsid);
3314 	SPDK_CU_ASSERT_FATAL(ctrlrA.visible_ns != NULL);
3315 	snprintf(ctrlrB.hostnqn, sizeof(ctrlrB.hostnqn), "nqn.2016-06.io.spdk:host2");
3316 	ctrlrB.visible_ns = spdk_bit_array_create(subsystem.max_nsid);
3317 	SPDK_CU_ASSERT_FATAL(ctrlrB.visible_ns != NULL);
3318 
3319 	/* Do not auto attach and no cold attach of any ctrlr */
3320 	nsid = 1;
3321 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3322 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3323 	nvmf_ctrlr_init_visible_ns(&ctrlrA);
3324 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3325 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3326 	nsid = 3;
3327 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3328 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3329 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3330 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3331 
3332 	/* Cold attach ctrlrA to namespace 1 */
3333 	nsid = 1;
3334 	host = calloc(1, sizeof(*host));
3335 	SPDK_CU_ASSERT_FATAL(host != NULL);
3336 	snprintf(host->nqn, sizeof(host->nqn), "%s", ctrlrA.hostnqn);
3337 	TAILQ_INSERT_HEAD(&ns1.hosts, host, link);
3338 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host);
3339 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3340 	nvmf_ctrlr_init_visible_ns(&ctrlrA);
3341 	CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3342 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3343 	nsid = 3;
3344 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3345 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3346 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host);
3347 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3348 
3349 	/* Detach ctrlrA from namespace 1 */
3350 	nsid = 1;
3351 	spdk_bit_array_clear(ctrlrA.visible_ns, nsid - 1);
3352 	TAILQ_REMOVE(&ns1.hosts, host, link);
3353 	free(host);
3354 
3355 	/* Auto attach any ctrlr to namespace 2 */
3356 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3357 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3358 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3359 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3360 	nsid = 3;
3361 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3362 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3363 	ns1.always_visible = true;
3364 	nvmf_ctrlr_init_visible_ns(&ctrlrA);
3365 	nsid = 1;
3366 	CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3367 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3368 	nsid = 3;
3369 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3370 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3371 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3372 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3373 	nvmf_ctrlr_init_visible_ns(&ctrlrB);
3374 	nsid = 1;
3375 	CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3376 	CU_ASSERT(spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3377 	nsid = 3;
3378 	CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1));
3379 	CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1));
3380 	CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL);
3381 	CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL);
3382 
3383 	free(ctrlrA.visible_ns);
3384 	free(ctrlrB.visible_ns);
3385 	free(subsystem.ns);
3386 }
3387 
3388 static void
3389 test_nvmf_check_qpair_active(void)
3390 {
3391 	union nvmf_c2h_msg rsp = {};
3392 	union nvmf_h2c_msg cmd = {};
3393 	struct spdk_nvmf_qpair qpair = { .outstanding = TAILQ_HEAD_INITIALIZER(qpair.outstanding) };
3394 	struct spdk_nvmf_request req = { .qpair = &qpair, .cmd = &cmd, .rsp = &rsp };
3395 	size_t i;
3396 
3397 	/* qpair is active */
3398 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
3399 	qpair.state = SPDK_NVMF_QPAIR_ENABLED;
3400 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true);
3401 
3402 	/* qpair is connecting - CONNECT is allowed */
3403 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
3404 	cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
3405 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
3406 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true);
3407 
3408 	/* qpair is connecting - other commands are disallowed */
3409 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
3410 	qpair.state = SPDK_NVMF_QPAIR_CONNECTING;
3411 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false);
3412 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
3413 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
3414 
3415 	/* qpair is authenticating - AUTHENTICATION_SEND is allowed */
3416 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
3417 	cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND;
3418 	qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING;
3419 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true);
3420 
3421 	/* qpair is authenticating - AUTHENTICATION_RECV is allowed */
3422 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
3423 	cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV;
3424 	qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING;
3425 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true);
3426 
3427 	/* qpair is authenticating - other commands are disallowed */
3428 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
3429 	qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING;
3430 	CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false);
3431 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_COMMAND_SPECIFIC);
3432 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVMF_FABRIC_SC_AUTH_REQUIRED);
3433 
3434 	/* qpair is in one of the other states - all commands are disallowed */
3435 	int disallowed_states[] = {
3436 		SPDK_NVMF_QPAIR_UNINITIALIZED,
3437 		SPDK_NVMF_QPAIR_DEACTIVATING,
3438 		SPDK_NVMF_QPAIR_ERROR,
3439 	};
3440 	qpair.state_cb = qpair_state_change_done;
3441 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
3442 	for (i = 0; i < SPDK_COUNTOF(disallowed_states); ++i) {
3443 		qpair.state = disallowed_states[i];
3444 		CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false);
3445 		CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
3446 		CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
3447 	}
3448 }
3449 
3450 int
3451 main(int argc, char **argv)
3452 {
3453 	CU_pSuite	suite = NULL;
3454 	unsigned int	num_failures;
3455 
3456 	CU_initialize_registry();
3457 
3458 	suite = CU_add_suite("nvmf", NULL, NULL);
3459 	CU_ADD_TEST(suite, test_get_log_page);
3460 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
3461 	CU_ADD_TEST(suite, test_connect);
3462 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
3463 	CU_ADD_TEST(suite, test_identify_ns);
3464 	CU_ADD_TEST(suite, test_identify_ns_iocs_specific);
3465 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
3466 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
3467 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
3468 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
3469 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
3470 	CU_ADD_TEST(suite, test_get_dif_ctx);
3471 	CU_ADD_TEST(suite, test_set_get_features);
3472 	CU_ADD_TEST(suite, test_identify_ctrlr);
3473 	CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific);
3474 	CU_ADD_TEST(suite, test_custom_admin_cmd);
3475 	CU_ADD_TEST(suite, test_fused_compare_and_write);
3476 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
3477 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
3478 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
3479 	CU_ADD_TEST(suite, test_multi_async_events);
3480 	CU_ADD_TEST(suite, test_rae);
3481 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
3482 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
3483 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
3484 	CU_ADD_TEST(suite, test_zcopy_read);
3485 	CU_ADD_TEST(suite, test_zcopy_write);
3486 	CU_ADD_TEST(suite, test_nvmf_property_set);
3487 	CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support);
3488 	CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support);
3489 	CU_ADD_TEST(suite, test_nvmf_ctrlr_ns_attachment);
3490 	CU_ADD_TEST(suite, test_nvmf_check_qpair_active);
3491 
3492 	allocate_threads(1);
3493 	set_thread(0);
3494 
3495 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
3496 	CU_cleanup_registry();
3497 
3498 	free_threads();
3499 
3500 	return num_failures;
3501 }
3502