xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision 488570ebd418ba07c9e69e65106dcc964f3bb41b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 #include "spdk_internal/mock.h"
11 #include "thread/thread_internal.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "nvmf/ctrlr.c"
15 
16 SPDK_LOG_REGISTER_COMPONENT(nvmf)
17 
18 struct spdk_bdev {
19 	int ut_mock;
20 	uint64_t blockcnt;
21 	uint32_t blocklen;
22 };
23 
24 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
25 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
26 
27 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL;
28 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *)
29 		0x8877665544332211UL;
30 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL;
31 
32 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
33 	    struct spdk_nvmf_subsystem *,
34 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
35 	    NULL);
36 
37 DEFINE_STUB(spdk_nvmf_poll_group_create,
38 	    struct spdk_nvmf_poll_group *,
39 	    (struct spdk_nvmf_tgt *tgt),
40 	    NULL);
41 
42 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
43 	    const char *,
44 	    (const struct spdk_nvmf_subsystem *subsystem),
45 	    subsystem_default_sn);
46 
47 DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
48 	    const char *,
49 	    (const struct spdk_nvmf_subsystem *subsystem),
50 	    subsystem_default_mn);
51 
52 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
53 	    bool,
54 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
55 	    true);
56 
57 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
58 	    int,
59 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
60 	    0);
61 
62 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
63 	    struct spdk_nvmf_ctrlr *,
64 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
65 	    NULL);
66 
67 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
68 	    bool,
69 	    (struct spdk_nvmf_ctrlr *ctrlr),
70 	    false);
71 
72 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
73 	    bool,
74 	    (struct spdk_nvmf_ctrlr *ctrlr),
75 	    false);
76 
77 DEFINE_STUB_V(nvmf_get_discovery_log_page,
78 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
79 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
80 
81 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
82 	    int,
83 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
84 	    0);
85 
86 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
87 	    bool,
88 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
89 	    true);
90 
91 DEFINE_STUB(nvmf_subsystem_find_listener,
92 	    struct spdk_nvmf_subsystem_listener *,
93 	    (struct spdk_nvmf_subsystem *subsystem,
94 	     const struct spdk_nvme_transport_id *trid),
95 	    (void *)0x1);
96 
97 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
98 	    int,
99 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
100 	     struct spdk_nvmf_request *req),
101 	    0);
102 
103 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
104 	    int,
105 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
106 	     struct spdk_nvmf_request *req),
107 	    0);
108 
109 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
110 	    int,
111 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
112 	     struct spdk_nvmf_request *req),
113 	    0);
114 
115 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
116 	    int,
117 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
118 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
119 	    0);
120 
121 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
122 	    int,
123 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
124 	     struct spdk_nvmf_request *req),
125 	    0);
126 
127 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
128 	    int,
129 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
130 	     struct spdk_nvmf_request *req),
131 	    0);
132 
133 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
134 	    int,
135 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
136 	     struct spdk_nvmf_request *req),
137 	    0);
138 
139 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
140 	    int,
141 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
142 	     struct spdk_nvmf_request *req),
143 	    0);
144 
145 DEFINE_STUB(nvmf_transport_req_complete,
146 	    int,
147 	    (struct spdk_nvmf_request *req),
148 	    0);
149 
150 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
151 
152 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
153 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
154 	     struct spdk_dif_ctx *dif_ctx),
155 	    true);
156 
157 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
158 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
159 
160 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
161 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
162 
163 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem,
164 		struct spdk_nvmf_ctrlr *ctrlr));
165 
166 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
167 	    int,
168 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
169 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
170 	    0);
171 
172 DEFINE_STUB(nvmf_transport_req_free,
173 	    int,
174 	    (struct spdk_nvmf_request *req),
175 	    0);
176 
177 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
178 	    int,
179 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
180 	     struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
181 	    0);
182 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
183 				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
184 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
185 
186 int
187 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
188 {
189 	return 0;
190 }
191 
192 void
193 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
194 			    bool dif_insert_or_strip)
195 {
196 	uint64_t num_blocks;
197 
198 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
199 	num_blocks = ns->bdev->blockcnt;
200 	nsdata->nsze = num_blocks;
201 	nsdata->ncap = num_blocks;
202 	nsdata->nuse = num_blocks;
203 	nsdata->nlbaf = 0;
204 	nsdata->flbas.format = 0;
205 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
206 }
207 
208 struct spdk_nvmf_ns *
209 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
210 {
211 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
212 	return subsystem->ns[0];
213 }
214 
215 struct spdk_nvmf_ns *
216 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
217 				struct spdk_nvmf_ns *prev_ns)
218 {
219 	uint32_t nsid;
220 
221 	SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL);
222 	nsid = prev_ns->nsid;
223 
224 	if (nsid >= subsystem->max_nsid) {
225 		return NULL;
226 	}
227 	for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
228 		if (subsystem->ns[nsid - 1]) {
229 			return subsystem->ns[nsid - 1];
230 		}
231 	}
232 	return NULL;
233 }
234 
235 bool
236 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
237 {
238 	return true;
239 }
240 
241 int
242 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
243 			    struct spdk_bdev_desc *desc,
244 			    struct spdk_io_channel *ch,
245 			    struct spdk_nvmf_request *req)
246 {
247 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
248 	uint64_t start_lba;
249 	uint64_t num_blocks;
250 
251 	start_lba = from_le64(&req->cmd->nvme_cmd.cdw10);
252 	num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1;
253 
254 	if ((start_lba + num_blocks) > bdev->blockcnt) {
255 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
256 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
257 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
258 	}
259 
260 	if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
261 		req->zcopy_bdev_io = zcopy_start_bdev_io_write;
262 	} else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) {
263 		req->zcopy_bdev_io = zcopy_start_bdev_io_read;
264 	} else {
265 		req->zcopy_bdev_io = zcopy_start_bdev_io_fail;
266 	}
267 
268 
269 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
270 }
271 
272 void
273 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
274 {
275 	req->zcopy_bdev_io = NULL;
276 	spdk_nvmf_request_complete(req);
277 }
278 
279 static void
280 test_get_log_page(void)
281 {
282 	struct spdk_nvmf_subsystem subsystem = {};
283 	struct spdk_nvmf_request req = {};
284 	struct spdk_nvmf_qpair qpair = {};
285 	struct spdk_nvmf_ctrlr ctrlr = {};
286 	union nvmf_h2c_msg cmd = {};
287 	union nvmf_c2h_msg rsp = {};
288 	char data[4096];
289 
290 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
291 
292 	ctrlr.subsys = &subsystem;
293 
294 	qpair.ctrlr = &ctrlr;
295 
296 	req.qpair = &qpair;
297 	req.cmd = &cmd;
298 	req.rsp = &rsp;
299 	req.data = &data;
300 	req.length = sizeof(data);
301 
302 	/* Get Log Page - all valid */
303 	memset(&cmd, 0, sizeof(cmd));
304 	memset(&rsp, 0, sizeof(rsp));
305 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
306 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
307 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
308 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
309 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
310 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
311 
312 	/* Get Log Page with invalid log ID */
313 	memset(&cmd, 0, sizeof(cmd));
314 	memset(&rsp, 0, sizeof(rsp));
315 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
316 	cmd.nvme_cmd.cdw10 = 0;
317 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
318 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
319 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
320 
321 	/* Get Log Page with invalid offset (not dword aligned) */
322 	memset(&cmd, 0, sizeof(cmd));
323 	memset(&rsp, 0, sizeof(rsp));
324 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
325 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
326 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
327 	cmd.nvme_cmd.cdw12 = 2;
328 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
329 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
330 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
331 
332 	/* Get Log Page without data buffer */
333 	memset(&cmd, 0, sizeof(cmd));
334 	memset(&rsp, 0, sizeof(rsp));
335 	req.data = NULL;
336 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
337 	cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
338 	cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length);
339 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
340 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
341 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
342 	req.data = data;
343 }
344 
345 static void
346 test_process_fabrics_cmd(void)
347 {
348 	struct	spdk_nvmf_request req = {};
349 	int	ret;
350 	struct	spdk_nvmf_qpair req_qpair = {};
351 	union	nvmf_h2c_msg  req_cmd = {};
352 	union	nvmf_c2h_msg   req_rsp = {};
353 
354 	req.qpair = &req_qpair;
355 	req.cmd  = &req_cmd;
356 	req.rsp  = &req_rsp;
357 	req.qpair->ctrlr = NULL;
358 
359 	/* No ctrlr and invalid command check */
360 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
361 	ret = nvmf_ctrlr_process_fabrics_cmd(&req);
362 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
363 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
364 }
365 
366 static bool
367 nvme_status_success(const struct spdk_nvme_status *status)
368 {
369 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
370 }
371 
372 static void
373 test_connect(void)
374 {
375 	struct spdk_nvmf_fabric_connect_data connect_data;
376 	struct spdk_nvmf_poll_group group;
377 	struct spdk_nvmf_subsystem_poll_group *sgroups;
378 	struct spdk_nvmf_transport transport;
379 	struct spdk_nvmf_transport_ops tops = {};
380 	struct spdk_nvmf_subsystem subsystem;
381 	struct spdk_nvmf_request req;
382 	struct spdk_nvmf_qpair admin_qpair;
383 	struct spdk_nvmf_qpair qpair;
384 	struct spdk_nvmf_qpair qpair2;
385 	struct spdk_nvmf_ctrlr ctrlr;
386 	struct spdk_nvmf_tgt tgt;
387 	union nvmf_h2c_msg cmd;
388 	union nvmf_c2h_msg rsp;
389 	const uint8_t hostid[16] = {
390 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
391 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
392 	};
393 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
394 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
395 	int rc;
396 
397 	memset(&group, 0, sizeof(group));
398 	group.thread = spdk_get_thread();
399 
400 	memset(&ctrlr, 0, sizeof(ctrlr));
401 	ctrlr.subsys = &subsystem;
402 	ctrlr.qpair_mask = spdk_bit_array_create(3);
403 	SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
404 	ctrlr.vcprop.cc.bits.en = 1;
405 	ctrlr.vcprop.cc.bits.iosqes = 6;
406 	ctrlr.vcprop.cc.bits.iocqes = 4;
407 
408 	memset(&admin_qpair, 0, sizeof(admin_qpair));
409 	admin_qpair.group = &group;
410 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
411 
412 	memset(&tgt, 0, sizeof(tgt));
413 	memset(&transport, 0, sizeof(transport));
414 	transport.ops = &tops;
415 	transport.opts.max_aq_depth = 32;
416 	transport.opts.max_queue_depth = 64;
417 	transport.opts.max_qpairs_per_ctrlr = 3;
418 	transport.tgt = &tgt;
419 
420 	memset(&qpair, 0, sizeof(qpair));
421 	qpair.transport = &transport;
422 	qpair.group = &group;
423 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
424 	TAILQ_INIT(&qpair.outstanding);
425 
426 	memset(&connect_data, 0, sizeof(connect_data));
427 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
428 	connect_data.cntlid = 0xFFFF;
429 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
430 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
431 
432 	memset(&subsystem, 0, sizeof(subsystem));
433 	subsystem.thread = spdk_get_thread();
434 	subsystem.id = 1;
435 	TAILQ_INIT(&subsystem.ctrlrs);
436 	subsystem.tgt = &tgt;
437 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
438 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
439 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
440 
441 	sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
442 	group.sgroups = sgroups;
443 
444 	memset(&cmd, 0, sizeof(cmd));
445 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
446 	cmd.connect_cmd.cid = 1;
447 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
448 	cmd.connect_cmd.recfmt = 0;
449 	cmd.connect_cmd.qid = 0;
450 	cmd.connect_cmd.sqsize = 31;
451 	cmd.connect_cmd.cattr = 0;
452 	cmd.connect_cmd.kato = 120000;
453 
454 	memset(&req, 0, sizeof(req));
455 	req.qpair = &qpair;
456 	req.length = sizeof(connect_data);
457 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
458 	req.data = &connect_data;
459 	req.cmd = &cmd;
460 	req.rsp = &rsp;
461 
462 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
463 	MOCK_SET(spdk_nvmf_poll_group_create, &group);
464 
465 	/* Valid admin connect command */
466 	memset(&rsp, 0, sizeof(rsp));
467 	sgroups[subsystem.id].mgmt_io_outstanding++;
468 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
469 	rc = nvmf_ctrlr_cmd_connect(&req);
470 	poll_threads();
471 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
472 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
473 	CU_ASSERT(qpair.ctrlr != NULL);
474 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
475 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
476 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
477 	free(qpair.ctrlr);
478 	qpair.ctrlr = NULL;
479 
480 	/* Valid admin connect command with kato = 0 */
481 	cmd.connect_cmd.kato = 0;
482 	memset(&rsp, 0, sizeof(rsp));
483 	sgroups[subsystem.id].mgmt_io_outstanding++;
484 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
485 	rc = nvmf_ctrlr_cmd_connect(&req);
486 	poll_threads();
487 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
488 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
489 	CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
490 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
491 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
492 	free(qpair.ctrlr);
493 	qpair.ctrlr = NULL;
494 	cmd.connect_cmd.kato = 120000;
495 
496 	/* Invalid data length */
497 	memset(&rsp, 0, sizeof(rsp));
498 	req.length = sizeof(connect_data) - 1;
499 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
500 	rc = nvmf_ctrlr_cmd_connect(&req);
501 	poll_threads();
502 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
503 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
504 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
505 	CU_ASSERT(qpair.ctrlr == NULL);
506 	req.length = sizeof(connect_data);
507 
508 	/* Invalid recfmt */
509 	memset(&rsp, 0, sizeof(rsp));
510 	cmd.connect_cmd.recfmt = 1234;
511 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
512 	rc = nvmf_ctrlr_cmd_connect(&req);
513 	poll_threads();
514 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
515 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
516 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
517 	CU_ASSERT(qpair.ctrlr == NULL);
518 	cmd.connect_cmd.recfmt = 0;
519 
520 	/* Subsystem not found */
521 	memset(&rsp, 0, sizeof(rsp));
522 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
523 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
524 	rc = nvmf_ctrlr_cmd_connect(&req);
525 	poll_threads();
526 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
527 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
528 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
529 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
530 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
531 	CU_ASSERT(qpair.ctrlr == NULL);
532 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
533 
534 	/* Unterminated hostnqn */
535 	memset(&rsp, 0, sizeof(rsp));
536 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
537 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
538 	rc = nvmf_ctrlr_cmd_connect(&req);
539 	poll_threads();
540 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
541 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
542 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
543 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
544 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
545 	CU_ASSERT(qpair.ctrlr == NULL);
546 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
547 
548 	/* Host not allowed */
549 	memset(&rsp, 0, sizeof(rsp));
550 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
551 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
552 	rc = nvmf_ctrlr_cmd_connect(&req);
553 	poll_threads();
554 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
555 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
556 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
557 	CU_ASSERT(qpair.ctrlr == NULL);
558 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
559 
560 	/* Invalid sqsize == 0 */
561 	memset(&rsp, 0, sizeof(rsp));
562 	cmd.connect_cmd.sqsize = 0;
563 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
564 	rc = nvmf_ctrlr_cmd_connect(&req);
565 	poll_threads();
566 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
567 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
568 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
569 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
570 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
571 	CU_ASSERT(qpair.ctrlr == NULL);
572 	cmd.connect_cmd.sqsize = 31;
573 
574 	/* Invalid admin sqsize > max_aq_depth */
575 	memset(&rsp, 0, sizeof(rsp));
576 	cmd.connect_cmd.sqsize = 32;
577 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
578 	rc = nvmf_ctrlr_cmd_connect(&req);
579 	poll_threads();
580 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
581 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
582 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
583 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
584 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
585 	CU_ASSERT(qpair.ctrlr == NULL);
586 	cmd.connect_cmd.sqsize = 31;
587 
588 	/* Invalid I/O sqsize > max_queue_depth */
589 	memset(&rsp, 0, sizeof(rsp));
590 	cmd.connect_cmd.qid = 1;
591 	cmd.connect_cmd.sqsize = 64;
592 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
593 	rc = nvmf_ctrlr_cmd_connect(&req);
594 	poll_threads();
595 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
596 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
597 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
598 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
599 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
600 	CU_ASSERT(qpair.ctrlr == NULL);
601 	cmd.connect_cmd.qid = 0;
602 	cmd.connect_cmd.sqsize = 31;
603 
604 	/* Invalid cntlid for admin queue */
605 	memset(&rsp, 0, sizeof(rsp));
606 	connect_data.cntlid = 0x1234;
607 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
608 	rc = nvmf_ctrlr_cmd_connect(&req);
609 	poll_threads();
610 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
611 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
612 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
613 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
614 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
615 	CU_ASSERT(qpair.ctrlr == NULL);
616 	connect_data.cntlid = 0xFFFF;
617 
618 	ctrlr.admin_qpair = &admin_qpair;
619 	ctrlr.subsys = &subsystem;
620 
621 	/* Valid I/O queue connect command */
622 	memset(&rsp, 0, sizeof(rsp));
623 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
624 	cmd.connect_cmd.qid = 1;
625 	cmd.connect_cmd.sqsize = 63;
626 	sgroups[subsystem.id].mgmt_io_outstanding++;
627 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
628 	rc = nvmf_ctrlr_cmd_connect(&req);
629 	poll_threads();
630 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
631 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
632 	CU_ASSERT(qpair.ctrlr == &ctrlr);
633 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
634 	qpair.ctrlr = NULL;
635 	cmd.connect_cmd.sqsize = 31;
636 
637 	/* Non-existent controller */
638 	memset(&rsp, 0, sizeof(rsp));
639 	MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
640 	sgroups[subsystem.id].mgmt_io_outstanding++;
641 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
642 	rc = nvmf_ctrlr_cmd_connect(&req);
643 	poll_threads();
644 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
645 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
646 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
647 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
648 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
649 	CU_ASSERT(qpair.ctrlr == NULL);
650 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
651 	MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
652 
653 	/* I/O connect to discovery controller */
654 	memset(&rsp, 0, sizeof(rsp));
655 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
656 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
657 	sgroups[subsystem.id].mgmt_io_outstanding++;
658 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
659 	rc = nvmf_ctrlr_cmd_connect(&req);
660 	poll_threads();
661 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
662 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
663 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
664 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
665 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
666 	CU_ASSERT(qpair.ctrlr == NULL);
667 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
668 
669 	/* I/O connect to discovery controller with keep-alive-timeout != 0 */
670 	cmd.connect_cmd.qid = 0;
671 	cmd.connect_cmd.kato = 120000;
672 	memset(&rsp, 0, sizeof(rsp));
673 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
674 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
675 	sgroups[subsystem.id].mgmt_io_outstanding++;
676 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
677 	rc = nvmf_ctrlr_cmd_connect(&req);
678 	poll_threads();
679 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
680 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
681 	CU_ASSERT(qpair.ctrlr != NULL);
682 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
683 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
684 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
685 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
686 	free(qpair.ctrlr);
687 	qpair.ctrlr = NULL;
688 
689 	/* I/O connect to discovery controller with keep-alive-timeout == 0.
690 	 *  Then, a fixed timeout value is set to keep-alive-timeout.
691 	 */
692 	cmd.connect_cmd.kato = 0;
693 	memset(&rsp, 0, sizeof(rsp));
694 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
695 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
696 	sgroups[subsystem.id].mgmt_io_outstanding++;
697 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
698 	rc = nvmf_ctrlr_cmd_connect(&req);
699 	poll_threads();
700 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
701 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
702 	CU_ASSERT(qpair.ctrlr != NULL);
703 	CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
704 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
705 	nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
706 	spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
707 	free(qpair.ctrlr);
708 	qpair.ctrlr = NULL;
709 	cmd.connect_cmd.qid = 1;
710 	cmd.connect_cmd.kato = 120000;
711 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
712 
713 	/* I/O connect to disabled controller */
714 	memset(&rsp, 0, sizeof(rsp));
715 	ctrlr.vcprop.cc.bits.en = 0;
716 	sgroups[subsystem.id].mgmt_io_outstanding++;
717 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
718 	rc = nvmf_ctrlr_cmd_connect(&req);
719 	poll_threads();
720 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
721 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
722 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
723 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
724 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
725 	CU_ASSERT(qpair.ctrlr == NULL);
726 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
727 	ctrlr.vcprop.cc.bits.en = 1;
728 
729 	/* I/O connect with invalid IOSQES */
730 	memset(&rsp, 0, sizeof(rsp));
731 	ctrlr.vcprop.cc.bits.iosqes = 3;
732 	sgroups[subsystem.id].mgmt_io_outstanding++;
733 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
734 	rc = nvmf_ctrlr_cmd_connect(&req);
735 	poll_threads();
736 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
737 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
738 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
739 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
740 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
741 	CU_ASSERT(qpair.ctrlr == NULL);
742 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
743 	ctrlr.vcprop.cc.bits.iosqes = 6;
744 
745 	/* I/O connect with invalid IOCQES */
746 	memset(&rsp, 0, sizeof(rsp));
747 	ctrlr.vcprop.cc.bits.iocqes = 3;
748 	sgroups[subsystem.id].mgmt_io_outstanding++;
749 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
750 	rc = nvmf_ctrlr_cmd_connect(&req);
751 	poll_threads();
752 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
753 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
754 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
755 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
756 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
757 	CU_ASSERT(qpair.ctrlr == NULL);
758 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
759 	ctrlr.vcprop.cc.bits.iocqes = 4;
760 
761 	/* I/O connect with too many existing qpairs */
762 	memset(&rsp, 0, sizeof(rsp));
763 	spdk_bit_array_set(ctrlr.qpair_mask, 0);
764 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
765 	spdk_bit_array_set(ctrlr.qpair_mask, 2);
766 	sgroups[subsystem.id].mgmt_io_outstanding++;
767 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
768 	rc = nvmf_ctrlr_cmd_connect(&req);
769 	poll_threads();
770 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
771 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
772 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
773 	CU_ASSERT(qpair.ctrlr == NULL);
774 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
775 	spdk_bit_array_clear(ctrlr.qpair_mask, 0);
776 	spdk_bit_array_clear(ctrlr.qpair_mask, 1);
777 	spdk_bit_array_clear(ctrlr.qpair_mask, 2);
778 
779 	/* I/O connect with duplicate queue ID */
780 	memset(&rsp, 0, sizeof(rsp));
781 	memset(&qpair2, 0, sizeof(qpair2));
782 	qpair2.group = &group;
783 	qpair2.qid = 1;
784 	spdk_bit_array_set(ctrlr.qpair_mask, 1);
785 	cmd.connect_cmd.qid = 1;
786 	sgroups[subsystem.id].mgmt_io_outstanding++;
787 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
788 	rc = nvmf_ctrlr_cmd_connect(&req);
789 	poll_threads();
790 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
791 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
792 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
793 	CU_ASSERT(qpair.ctrlr == NULL);
794 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
795 
796 	/* I/O connect when admin qpair is being destroyed */
797 	admin_qpair.group = NULL;
798 	admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
799 	memset(&rsp, 0, sizeof(rsp));
800 	sgroups[subsystem.id].mgmt_io_outstanding++;
801 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
802 	rc = nvmf_ctrlr_cmd_connect(&req);
803 	poll_threads();
804 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
805 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
806 	CU_ASSERT(qpair.ctrlr == NULL);
807 	CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
808 	admin_qpair.group = &group;
809 	admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
810 
811 	/* Clean up globals */
812 	MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
813 	MOCK_CLEAR(spdk_nvmf_poll_group_create);
814 
815 	spdk_bit_array_free(&ctrlr.qpair_mask);
816 	free(sgroups);
817 }
818 
819 static void
820 test_get_ns_id_desc_list(void)
821 {
822 	struct spdk_nvmf_subsystem subsystem;
823 	struct spdk_nvmf_qpair qpair;
824 	struct spdk_nvmf_ctrlr ctrlr;
825 	struct spdk_nvmf_request req;
826 	struct spdk_nvmf_ns *ns_ptrs[1];
827 	struct spdk_nvmf_ns ns;
828 	union nvmf_h2c_msg cmd;
829 	union nvmf_c2h_msg rsp;
830 	struct spdk_bdev bdev;
831 	uint8_t buf[4096];
832 
833 	memset(&subsystem, 0, sizeof(subsystem));
834 	ns_ptrs[0] = &ns;
835 	subsystem.ns = ns_ptrs;
836 	subsystem.max_nsid = 1;
837 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
838 
839 	memset(&ns, 0, sizeof(ns));
840 	ns.opts.nsid = 1;
841 	ns.bdev = &bdev;
842 
843 	memset(&qpair, 0, sizeof(qpair));
844 	qpair.ctrlr = &ctrlr;
845 
846 	memset(&ctrlr, 0, sizeof(ctrlr));
847 	ctrlr.subsys = &subsystem;
848 	ctrlr.vcprop.cc.bits.en = 1;
849 	ctrlr.thread = spdk_get_thread();
850 
851 	memset(&req, 0, sizeof(req));
852 	req.qpair = &qpair;
853 	req.cmd = &cmd;
854 	req.rsp = &rsp;
855 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
856 	req.data = buf;
857 	req.length = sizeof(buf);
858 	req.iovcnt = 1;
859 	req.iov[0].iov_base = req.data;
860 	req.iov[0].iov_len = req.length;
861 
862 	memset(&cmd, 0, sizeof(cmd));
863 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
864 	cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
865 
866 	/* Invalid NSID */
867 	cmd.nvme_cmd.nsid = 0;
868 	memset(&rsp, 0, sizeof(rsp));
869 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
870 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
871 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
872 
873 	/* Valid NSID, but ns has no IDs defined */
874 	cmd.nvme_cmd.nsid = 1;
875 	memset(&rsp, 0, sizeof(rsp));
876 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
877 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
878 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
879 	CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
880 
881 	/* Valid NSID, only EUI64 defined */
882 	ns.opts.eui64[0] = 0x11;
883 	ns.opts.eui64[7] = 0xFF;
884 	memset(&rsp, 0, sizeof(rsp));
885 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
886 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
887 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
888 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
889 	CU_ASSERT(buf[1] == 8);
890 	CU_ASSERT(buf[4] == 0x11);
891 	CU_ASSERT(buf[11] == 0xFF);
892 	CU_ASSERT(buf[13] == 0);
893 
894 	/* Valid NSID, only NGUID defined */
895 	memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
896 	ns.opts.nguid[0] = 0x22;
897 	ns.opts.nguid[15] = 0xEE;
898 	memset(&rsp, 0, sizeof(rsp));
899 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
900 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
901 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
902 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
903 	CU_ASSERT(buf[1] == 16);
904 	CU_ASSERT(buf[4] == 0x22);
905 	CU_ASSERT(buf[19] == 0xEE);
906 	CU_ASSERT(buf[21] == 0);
907 
908 	/* Valid NSID, both EUI64 and NGUID defined */
909 	ns.opts.eui64[0] = 0x11;
910 	ns.opts.eui64[7] = 0xFF;
911 	ns.opts.nguid[0] = 0x22;
912 	ns.opts.nguid[15] = 0xEE;
913 	memset(&rsp, 0, sizeof(rsp));
914 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
915 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
916 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
917 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
918 	CU_ASSERT(buf[1] == 8);
919 	CU_ASSERT(buf[4] == 0x11);
920 	CU_ASSERT(buf[11] == 0xFF);
921 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
922 	CU_ASSERT(buf[13] == 16);
923 	CU_ASSERT(buf[16] == 0x22);
924 	CU_ASSERT(buf[31] == 0xEE);
925 	CU_ASSERT(buf[33] == 0);
926 
927 	/* Valid NSID, EUI64, NGUID, and UUID defined */
928 	ns.opts.eui64[0] = 0x11;
929 	ns.opts.eui64[7] = 0xFF;
930 	ns.opts.nguid[0] = 0x22;
931 	ns.opts.nguid[15] = 0xEE;
932 	ns.opts.uuid.u.raw[0] = 0x33;
933 	ns.opts.uuid.u.raw[15] = 0xDD;
934 	memset(&rsp, 0, sizeof(rsp));
935 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
936 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
937 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
938 	CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
939 	CU_ASSERT(buf[1] == 8);
940 	CU_ASSERT(buf[4] == 0x11);
941 	CU_ASSERT(buf[11] == 0xFF);
942 	CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
943 	CU_ASSERT(buf[13] == 16);
944 	CU_ASSERT(buf[16] == 0x22);
945 	CU_ASSERT(buf[31] == 0xEE);
946 	CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
947 	CU_ASSERT(buf[33] == 16);
948 	CU_ASSERT(buf[36] == 0x33);
949 	CU_ASSERT(buf[51] == 0xDD);
950 	CU_ASSERT(buf[53] == 0);
951 }
952 
953 static void
954 test_identify_ns(void)
955 {
956 	struct spdk_nvmf_subsystem subsystem = {};
957 	struct spdk_nvmf_transport transport = {};
958 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
959 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
960 	struct spdk_nvme_cmd cmd = {};
961 	struct spdk_nvme_cpl rsp = {};
962 	struct spdk_nvme_ns_data nsdata = {};
963 	struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
964 	struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
965 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
966 
967 	subsystem.ns = ns_arr;
968 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
969 
970 	/* Invalid NSID 0 */
971 	cmd.nsid = 0;
972 	memset(&nsdata, 0, sizeof(nsdata));
973 	memset(&rsp, 0, sizeof(rsp));
974 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
975 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
976 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
977 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
978 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
979 
980 	/* Valid NSID 1 */
981 	cmd.nsid = 1;
982 	memset(&nsdata, 0, sizeof(nsdata));
983 	memset(&rsp, 0, sizeof(rsp));
984 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
985 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
986 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
987 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
988 	CU_ASSERT(nsdata.nsze == 1234);
989 
990 	/* Valid but inactive NSID 2 */
991 	cmd.nsid = 2;
992 	memset(&nsdata, 0, sizeof(nsdata));
993 	memset(&rsp, 0, sizeof(rsp));
994 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
995 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
996 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
997 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
998 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
999 
1000 	/* Valid NSID 3 */
1001 	cmd.nsid = 3;
1002 	memset(&nsdata, 0, sizeof(nsdata));
1003 	memset(&rsp, 0, sizeof(rsp));
1004 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1005 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1006 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1007 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
1008 	CU_ASSERT(nsdata.nsze == 5678);
1009 
1010 	/* Invalid NSID 4 */
1011 	cmd.nsid = 4;
1012 	memset(&nsdata, 0, sizeof(nsdata));
1013 	memset(&rsp, 0, sizeof(rsp));
1014 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1015 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1016 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1017 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1018 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1019 
1020 	/* Invalid NSID 0xFFFFFFFF (NS management not supported) */
1021 	cmd.nsid = 0xFFFFFFFF;
1022 	memset(&nsdata, 0, sizeof(nsdata));
1023 	memset(&rsp, 0, sizeof(rsp));
1024 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
1025 					      &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1026 	CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
1027 	CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1028 	CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
1029 }
1030 
1031 static void
1032 test_set_get_features(void)
1033 {
1034 	struct spdk_nvmf_subsystem subsystem = {};
1035 	struct spdk_nvmf_qpair admin_qpair = {};
1036 	enum spdk_nvme_ana_state ana_state[3];
1037 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1038 	struct spdk_nvmf_ctrlr ctrlr = {
1039 		.subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener
1040 	};
1041 	union nvmf_h2c_msg cmd = {};
1042 	union nvmf_c2h_msg rsp = {};
1043 	struct spdk_nvmf_ns ns[3];
1044 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
1045 	struct spdk_nvmf_request req;
1046 	int rc;
1047 
1048 	ns[0].anagrpid = 1;
1049 	ns[2].anagrpid = 3;
1050 	subsystem.ns = ns_arr;
1051 	subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
1052 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1053 	listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1054 	admin_qpair.ctrlr = &ctrlr;
1055 	req.qpair = &admin_qpair;
1056 	cmd.nvme_cmd.nsid = 1;
1057 	req.cmd = &cmd;
1058 	req.rsp = &rsp;
1059 
1060 	/* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1061 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1062 	cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
1063 	ns[0].ptpl_file = "testcfg";
1064 	rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
1065 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1066 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1067 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
1068 	CU_ASSERT(ns[0].ptpl_activated == true);
1069 
1070 	/* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
1071 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1072 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
1073 	rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
1074 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1075 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1076 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1077 	CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
1078 
1079 
1080 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1081 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1082 	cmd.nvme_cmd.cdw11 = 0x42;
1083 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1084 
1085 	rc = nvmf_ctrlr_get_features(&req);
1086 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1087 
1088 	/* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1089 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
1090 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1091 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1092 
1093 	rc = nvmf_ctrlr_get_features(&req);
1094 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1095 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1096 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1097 
1098 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
1099 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1100 	cmd.nvme_cmd.cdw11 = 0x42;
1101 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1102 
1103 	rc = nvmf_ctrlr_set_features(&req);
1104 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1105 
1106 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
1107 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1108 	cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
1109 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1110 
1111 	rc = nvmf_ctrlr_set_features(&req);
1112 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1113 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1114 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1115 
1116 	/* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
1117 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1118 	cmd.nvme_cmd.cdw11 = 0x42;
1119 	cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
1120 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
1121 
1122 	rc = nvmf_ctrlr_set_features(&req);
1123 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1124 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1125 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1126 
1127 
1128 	/* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
1129 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1130 	cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1131 
1132 	rc = nvmf_ctrlr_get_features(&req);
1133 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1134 
1135 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
1136 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1137 	cmd.nvme_cmd.cdw11 = 0x42;
1138 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
1139 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1140 
1141 	rc = nvmf_ctrlr_set_features(&req);
1142 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1143 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1144 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1145 
1146 	/* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
1147 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1148 	cmd.nvme_cmd.cdw11 = 0x42;
1149 	cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
1150 	cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
1151 
1152 	rc = nvmf_ctrlr_set_features(&req);
1153 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1154 }
1155 
1156 /*
1157  * Reservation Unit Test Configuration
1158  *       --------             --------    --------
1159  *      | Host A |           | Host B |  | Host C |
1160  *       --------             --------    --------
1161  *      /        \               |           |
1162  *  --------   --------       -------     -------
1163  * |Ctrlr1_A| |Ctrlr2_A|     |Ctrlr_B|   |Ctrlr_C|
1164  *  --------   --------       -------     -------
1165  *    \           \              /           /
1166  *     \           \            /           /
1167  *      \           \          /           /
1168  *      --------------------------------------
1169  *     |            NAMESPACE 1               |
1170  *      --------------------------------------
1171  */
1172 
1173 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
1174 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
1175 
1176 static void
1177 ut_reservation_init(enum spdk_nvme_reservation_type rtype)
1178 {
1179 	/* Host A has two controllers */
1180 	spdk_uuid_generate(&g_ctrlr1_A.hostid);
1181 	spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
1182 
1183 	/* Host B has 1 controller */
1184 	spdk_uuid_generate(&g_ctrlr_B.hostid);
1185 
1186 	/* Host C has 1 controller */
1187 	spdk_uuid_generate(&g_ctrlr_C.hostid);
1188 
1189 	memset(&g_ns_info, 0, sizeof(g_ns_info));
1190 	g_ns_info.rtype = rtype;
1191 	g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
1192 	g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
1193 	g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
1194 }
1195 
1196 static void
1197 test_reservation_write_exclusive(void)
1198 {
1199 	struct spdk_nvmf_request req = {};
1200 	union nvmf_h2c_msg cmd = {};
1201 	union nvmf_c2h_msg rsp = {};
1202 	int rc;
1203 
1204 	req.cmd = &cmd;
1205 	req.rsp = &rsp;
1206 
1207 	/* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
1208 	ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
1209 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1210 
1211 	/* Test Case: Issue a Read command from Host A and Host B */
1212 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1213 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1214 	SPDK_CU_ASSERT_FATAL(rc == 0);
1215 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1216 	SPDK_CU_ASSERT_FATAL(rc == 0);
1217 
1218 	/* Test Case: Issue a DSM Write command from Host A and Host B */
1219 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1220 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1221 	SPDK_CU_ASSERT_FATAL(rc == 0);
1222 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1223 	SPDK_CU_ASSERT_FATAL(rc < 0);
1224 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1225 
1226 	/* Test Case: Issue a Write command from Host C */
1227 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1228 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1229 	SPDK_CU_ASSERT_FATAL(rc < 0);
1230 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1231 
1232 	/* Test Case: Issue a Read command from Host B */
1233 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1234 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1235 	SPDK_CU_ASSERT_FATAL(rc == 0);
1236 
1237 	/* Unregister Host C */
1238 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1239 
1240 	/* Test Case: Read and Write commands from non-registrant Host C */
1241 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1242 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1243 	SPDK_CU_ASSERT_FATAL(rc < 0);
1244 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1245 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1246 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1247 	SPDK_CU_ASSERT_FATAL(rc == 0);
1248 }
1249 
1250 static void
1251 test_reservation_exclusive_access(void)
1252 {
1253 	struct spdk_nvmf_request req = {};
1254 	union nvmf_h2c_msg cmd = {};
1255 	union nvmf_c2h_msg rsp = {};
1256 	int rc;
1257 
1258 	req.cmd = &cmd;
1259 	req.rsp = &rsp;
1260 
1261 	/* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
1262 	ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
1263 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1264 
1265 	/* Test Case: Issue a Read command from Host B */
1266 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1267 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1268 	SPDK_CU_ASSERT_FATAL(rc < 0);
1269 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1270 
1271 	/* Test Case: Issue a Reservation Release command from a valid Registrant */
1272 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1273 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1274 	SPDK_CU_ASSERT_FATAL(rc == 0);
1275 }
1276 
1277 static void
1278 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1279 {
1280 	struct spdk_nvmf_request req = {};
1281 	union nvmf_h2c_msg cmd = {};
1282 	union nvmf_c2h_msg rsp = {};
1283 	int rc;
1284 
1285 	req.cmd = &cmd;
1286 	req.rsp = &rsp;
1287 
1288 	/* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
1289 	ut_reservation_init(rtype);
1290 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1291 
1292 	/* Test Case: Issue a Read command from Host A and Host C */
1293 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1294 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1295 	SPDK_CU_ASSERT_FATAL(rc == 0);
1296 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1297 	SPDK_CU_ASSERT_FATAL(rc == 0);
1298 
1299 	/* Test Case: Issue a DSM Write command from Host A and Host C */
1300 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1301 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
1302 	SPDK_CU_ASSERT_FATAL(rc == 0);
1303 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1304 	SPDK_CU_ASSERT_FATAL(rc == 0);
1305 
1306 	/* Unregister Host C */
1307 	memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
1308 
1309 	/* Test Case: Read and Write commands from non-registrant Host C */
1310 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1311 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1312 	SPDK_CU_ASSERT_FATAL(rc == 0);
1313 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1314 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
1315 	SPDK_CU_ASSERT_FATAL(rc < 0);
1316 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1317 }
1318 
1319 static void
1320 test_reservation_write_exclusive_regs_only_and_all_regs(void)
1321 {
1322 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1323 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
1324 	_test_reservation_write_exclusive_regs_only_and_all_regs(
1325 		SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
1326 }
1327 
1328 static void
1329 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
1330 {
1331 	struct spdk_nvmf_request req = {};
1332 	union nvmf_h2c_msg cmd = {};
1333 	union nvmf_c2h_msg rsp = {};
1334 	int rc;
1335 
1336 	req.cmd = &cmd;
1337 	req.rsp = &rsp;
1338 
1339 	/* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
1340 	ut_reservation_init(rtype);
1341 	g_ns_info.holder_id = g_ctrlr1_A.hostid;
1342 
1343 	/* Test Case: Issue a Write command from Host B */
1344 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1345 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1346 	SPDK_CU_ASSERT_FATAL(rc == 0);
1347 
1348 	/* Unregister Host B */
1349 	memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
1350 
1351 	/* Test Case: Issue a Read command from Host B */
1352 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
1353 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1354 	SPDK_CU_ASSERT_FATAL(rc < 0);
1355 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1356 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
1357 	rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
1358 	SPDK_CU_ASSERT_FATAL(rc < 0);
1359 	SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
1360 }
1361 
1362 static void
1363 test_reservation_exclusive_access_regs_only_and_all_regs(void)
1364 {
1365 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1366 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
1367 	_test_reservation_exclusive_access_regs_only_and_all_regs(
1368 		SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
1369 }
1370 
1371 static void
1372 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1373 {
1374 	STAILQ_INIT(&ctrlr->async_events);
1375 }
1376 
1377 static void
1378 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1379 {
1380 	struct spdk_nvmf_async_event_completion *event, *event_tmp;
1381 
1382 	STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
1383 		STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
1384 		free(event);
1385 	}
1386 }
1387 
1388 static int
1389 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
1390 {
1391 	int num = 0;
1392 	struct spdk_nvmf_async_event_completion *event;
1393 
1394 	STAILQ_FOREACH(event, &ctrlr->async_events, link) {
1395 		num++;
1396 	}
1397 	return num;
1398 }
1399 
1400 static void
1401 test_reservation_notification_log_page(void)
1402 {
1403 	struct spdk_nvmf_ctrlr ctrlr;
1404 	struct spdk_nvmf_qpair qpair;
1405 	struct spdk_nvmf_ns ns;
1406 	struct spdk_nvmf_request req = {};
1407 	union nvmf_h2c_msg cmd = {};
1408 	union nvmf_c2h_msg rsp = {};
1409 	union spdk_nvme_async_event_completion event = {};
1410 	struct spdk_nvme_reservation_notification_log logs[3];
1411 	struct iovec iov;
1412 
1413 	memset(&ctrlr, 0, sizeof(ctrlr));
1414 	ctrlr.thread = spdk_get_thread();
1415 	TAILQ_INIT(&ctrlr.log_head);
1416 	init_pending_async_events(&ctrlr);
1417 	ns.nsid = 1;
1418 
1419 	/* Test Case: Mask all the reservation notifications */
1420 	ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
1421 		  SPDK_NVME_RESERVATION_RELEASED_MASK |
1422 		  SPDK_NVME_RESERVATION_PREEMPTED_MASK;
1423 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1424 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1425 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1426 					  SPDK_NVME_RESERVATION_RELEASED);
1427 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1428 					  SPDK_NVME_RESERVATION_PREEMPTED);
1429 	poll_threads();
1430 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
1431 
1432 	/* Test Case: Unmask all the reservation notifications,
1433 	 * 3 log pages are generated, and AER was triggered.
1434 	 */
1435 	ns.mask = 0;
1436 	ctrlr.num_avail_log_pages = 0;
1437 	req.cmd = &cmd;
1438 	req.rsp = &rsp;
1439 	ctrlr.aer_req[0] = &req;
1440 	ctrlr.nr_aer_reqs = 1;
1441 	req.qpair = &qpair;
1442 	TAILQ_INIT(&qpair.outstanding);
1443 	qpair.ctrlr = NULL;
1444 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1445 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
1446 
1447 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1448 					  SPDK_NVME_REGISTRATION_PREEMPTED);
1449 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1450 					  SPDK_NVME_RESERVATION_RELEASED);
1451 	nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
1452 					  SPDK_NVME_RESERVATION_PREEMPTED);
1453 	poll_threads();
1454 	event.raw = rsp.nvme_cpl.cdw0;
1455 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
1456 	SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
1457 	SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
1458 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
1459 
1460 	/* Test Case: Get Log Page to clear the log pages */
1461 	iov.iov_base = &logs[0];
1462 	iov.iov_len = sizeof(logs);
1463 	nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0);
1464 	SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
1465 
1466 	cleanup_pending_async_events(&ctrlr);
1467 }
1468 
1469 static void
1470 test_get_dif_ctx(void)
1471 {
1472 	struct spdk_nvmf_subsystem subsystem = {};
1473 	struct spdk_nvmf_request req = {};
1474 	struct spdk_nvmf_qpair qpair = {};
1475 	struct spdk_nvmf_ctrlr ctrlr = {};
1476 	struct spdk_nvmf_ns ns = {};
1477 	struct spdk_nvmf_ns *_ns = NULL;
1478 	struct spdk_bdev bdev = {};
1479 	union nvmf_h2c_msg cmd = {};
1480 	struct spdk_dif_ctx dif_ctx = {};
1481 	bool ret;
1482 
1483 	ctrlr.subsys = &subsystem;
1484 
1485 	qpair.ctrlr = &ctrlr;
1486 
1487 	req.qpair = &qpair;
1488 	req.cmd = &cmd;
1489 
1490 	ns.bdev = &bdev;
1491 
1492 	ctrlr.dif_insert_or_strip = false;
1493 
1494 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1495 	CU_ASSERT(ret == false);
1496 
1497 	ctrlr.dif_insert_or_strip = true;
1498 	qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
1499 
1500 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1501 	CU_ASSERT(ret == false);
1502 
1503 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1504 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
1505 
1506 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1507 	CU_ASSERT(ret == false);
1508 
1509 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
1510 
1511 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1512 	CU_ASSERT(ret == false);
1513 
1514 	qpair.qid = 1;
1515 
1516 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1517 	CU_ASSERT(ret == false);
1518 
1519 	cmd.nvme_cmd.nsid = 1;
1520 
1521 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1522 	CU_ASSERT(ret == false);
1523 
1524 	subsystem.max_nsid = 1;
1525 	subsystem.ns = &_ns;
1526 	subsystem.ns[0] = &ns;
1527 
1528 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1529 	CU_ASSERT(ret == false);
1530 
1531 	cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
1532 
1533 	ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
1534 	CU_ASSERT(ret == true);
1535 }
1536 
1537 static void
1538 test_identify_ctrlr(void)
1539 {
1540 	struct spdk_nvmf_tgt tgt = {};
1541 	struct spdk_nvmf_subsystem subsystem = {
1542 		.subtype = SPDK_NVMF_SUBTYPE_NVME,
1543 		.tgt = &tgt,
1544 	};
1545 	struct spdk_nvmf_transport_ops tops = {};
1546 	struct spdk_nvmf_transport transport = {
1547 		.ops = &tops,
1548 		.opts = {
1549 			.in_capsule_data_size = 4096,
1550 		},
1551 	};
1552 	struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
1553 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
1554 	struct spdk_nvme_ctrlr_data cdata = {};
1555 	uint32_t expected_ioccsz;
1556 
1557 	nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
1558 
1559 	/* Check ioccsz, TCP transport */
1560 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1561 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1562 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1563 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1564 
1565 	/* Check ioccsz, RDMA transport */
1566 	tops.type = SPDK_NVME_TRANSPORT_RDMA;
1567 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1568 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1569 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1570 
1571 	/* Check ioccsz, TCP transport with dif_insert_or_strip */
1572 	tops.type = SPDK_NVME_TRANSPORT_TCP;
1573 	ctrlr.dif_insert_or_strip = true;
1574 	expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
1575 	CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1576 	CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
1577 }
1578 
1579 static int
1580 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
1581 {
1582 	req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
1583 
1584 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1585 };
1586 
1587 static void
1588 test_custom_admin_cmd(void)
1589 {
1590 	struct spdk_nvmf_subsystem subsystem;
1591 	struct spdk_nvmf_qpair qpair;
1592 	struct spdk_nvmf_ctrlr ctrlr;
1593 	struct spdk_nvmf_request req;
1594 	struct spdk_nvmf_ns *ns_ptrs[1];
1595 	struct spdk_nvmf_ns ns;
1596 	union nvmf_h2c_msg cmd;
1597 	union nvmf_c2h_msg rsp;
1598 	struct spdk_bdev bdev;
1599 	uint8_t buf[4096];
1600 	int rc;
1601 
1602 	memset(&subsystem, 0, sizeof(subsystem));
1603 	ns_ptrs[0] = &ns;
1604 	subsystem.ns = ns_ptrs;
1605 	subsystem.max_nsid = 1;
1606 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1607 
1608 	memset(&ns, 0, sizeof(ns));
1609 	ns.opts.nsid = 1;
1610 	ns.bdev = &bdev;
1611 
1612 	memset(&qpair, 0, sizeof(qpair));
1613 	qpair.ctrlr = &ctrlr;
1614 
1615 	memset(&ctrlr, 0, sizeof(ctrlr));
1616 	ctrlr.subsys = &subsystem;
1617 	ctrlr.vcprop.cc.bits.en = 1;
1618 	ctrlr.thread = spdk_get_thread();
1619 
1620 	memset(&req, 0, sizeof(req));
1621 	req.qpair = &qpair;
1622 	req.cmd = &cmd;
1623 	req.rsp = &rsp;
1624 	req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
1625 	req.data = buf;
1626 	req.length = sizeof(buf);
1627 
1628 	memset(&cmd, 0, sizeof(cmd));
1629 	cmd.nvme_cmd.opc = 0xc1;
1630 	cmd.nvme_cmd.nsid = 0;
1631 	memset(&rsp, 0, sizeof(rsp));
1632 
1633 	spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
1634 
1635 	/* Ensure that our hdlr is being called */
1636 	rc = nvmf_ctrlr_process_admin_cmd(&req);
1637 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1638 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1639 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
1640 }
1641 
1642 static void
1643 test_fused_compare_and_write(void)
1644 {
1645 	struct spdk_nvmf_request req = {};
1646 	struct spdk_nvmf_qpair qpair = {};
1647 	struct spdk_nvme_cmd cmd = {};
1648 	union nvmf_c2h_msg rsp = {};
1649 	struct spdk_nvmf_ctrlr ctrlr = {};
1650 	struct spdk_nvmf_subsystem subsystem = {};
1651 	struct spdk_nvmf_ns ns = {};
1652 	struct spdk_nvmf_ns *subsys_ns[1] = {};
1653 	enum spdk_nvme_ana_state ana_state[1];
1654 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1655 	struct spdk_bdev bdev = {};
1656 
1657 	struct spdk_nvmf_poll_group group = {};
1658 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1659 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
1660 	struct spdk_io_channel io_ch = {};
1661 
1662 	ns.bdev = &bdev;
1663 	ns.anagrpid = 1;
1664 
1665 	subsystem.id = 0;
1666 	subsystem.max_nsid = 1;
1667 	subsys_ns[0] = &ns;
1668 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
1669 
1670 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1671 
1672 	/* Enable controller */
1673 	ctrlr.vcprop.cc.bits.en = 1;
1674 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
1675 	ctrlr.listener = &listener;
1676 
1677 	group.num_sgroups = 1;
1678 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1679 	sgroups.num_ns = 1;
1680 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1681 	ns_info.channel = &io_ch;
1682 	sgroups.ns_info = &ns_info;
1683 	TAILQ_INIT(&sgroups.queued);
1684 	group.sgroups = &sgroups;
1685 	TAILQ_INIT(&qpair.outstanding);
1686 
1687 	qpair.ctrlr = &ctrlr;
1688 	qpair.group = &group;
1689 	qpair.qid = 1;
1690 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1691 
1692 	cmd.nsid = 1;
1693 
1694 	req.qpair = &qpair;
1695 	req.cmd = (union nvmf_h2c_msg *)&cmd;
1696 	req.rsp = &rsp;
1697 
1698 	/* SUCCESS/SUCCESS */
1699 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1700 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1701 
1702 	spdk_nvmf_request_exec(&req);
1703 	CU_ASSERT(qpair.first_fused_req != NULL);
1704 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1705 
1706 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1707 	cmd.opc = SPDK_NVME_OPC_WRITE;
1708 
1709 	spdk_nvmf_request_exec(&req);
1710 	CU_ASSERT(qpair.first_fused_req == NULL);
1711 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1712 
1713 	/* Wrong sequence */
1714 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1715 	cmd.opc = SPDK_NVME_OPC_WRITE;
1716 
1717 	spdk_nvmf_request_exec(&req);
1718 	CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
1719 	CU_ASSERT(qpair.first_fused_req == NULL);
1720 
1721 	/* Write as FUSE_FIRST (Wrong op code) */
1722 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1723 	cmd.opc = SPDK_NVME_OPC_WRITE;
1724 
1725 	spdk_nvmf_request_exec(&req);
1726 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1727 	CU_ASSERT(qpair.first_fused_req == NULL);
1728 
1729 	/* Compare as FUSE_SECOND (Wrong op code) */
1730 	cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
1731 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1732 
1733 	spdk_nvmf_request_exec(&req);
1734 	CU_ASSERT(qpair.first_fused_req != NULL);
1735 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
1736 
1737 	cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
1738 	cmd.opc = SPDK_NVME_OPC_COMPARE;
1739 
1740 	spdk_nvmf_request_exec(&req);
1741 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1742 	CU_ASSERT(qpair.first_fused_req == NULL);
1743 }
1744 
1745 static void
1746 test_multi_async_event_reqs(void)
1747 {
1748 	struct spdk_nvmf_subsystem subsystem = {};
1749 	struct spdk_nvmf_qpair qpair = {};
1750 	struct spdk_nvmf_ctrlr ctrlr = {};
1751 	struct spdk_nvmf_request req[5] = {};
1752 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
1753 	struct spdk_nvmf_ns ns = {};
1754 	union nvmf_h2c_msg cmd[5] = {};
1755 	union nvmf_c2h_msg rsp[5] = {};
1756 
1757 	struct spdk_nvmf_poll_group group = {};
1758 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
1759 
1760 	int i;
1761 
1762 	ns_ptrs[0] = &ns;
1763 	subsystem.ns = ns_ptrs;
1764 	subsystem.max_nsid = 1;
1765 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
1766 
1767 	ns.opts.nsid = 1;
1768 	group.sgroups = &sgroups;
1769 
1770 	qpair.ctrlr = &ctrlr;
1771 	qpair.group = &group;
1772 	TAILQ_INIT(&qpair.outstanding);
1773 
1774 	ctrlr.subsys = &subsystem;
1775 	ctrlr.vcprop.cc.bits.en = 1;
1776 	ctrlr.thread = spdk_get_thread();
1777 
1778 	for (i = 0; i < 5; i++) {
1779 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1780 		cmd[i].nvme_cmd.nsid = 1;
1781 		cmd[i].nvme_cmd.cid = i;
1782 
1783 		req[i].qpair = &qpair;
1784 		req[i].cmd = &cmd[i];
1785 		req[i].rsp = &rsp[i];
1786 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
1787 	}
1788 
1789 	/* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
1790 	sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS;
1791 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1792 		CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
1793 		CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
1794 	}
1795 	CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
1796 
1797 	/* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
1798 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
1799 	CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
1800 	CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
1801 	CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1802 
1803 	/* Test if the aer_reqs keep continuous when abort a req in the middle */
1804 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
1805 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1806 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1807 	CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
1808 
1809 	CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
1810 	CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
1811 	CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
1812 	CU_ASSERT(ctrlr.aer_req[2] == NULL);
1813 	CU_ASSERT(ctrlr.nr_aer_reqs == 2);
1814 
1815 	TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
1816 	TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
1817 }
1818 
1819 static void
1820 test_get_ana_log_page_one_ns_per_anagrp(void)
1821 {
1822 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t))
1823 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE)
1824 	uint32_t ana_group[3];
1825 	struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group };
1826 	struct spdk_nvmf_ctrlr ctrlr = {};
1827 	enum spdk_nvme_ana_state ana_state[3];
1828 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
1829 	struct spdk_nvmf_ns ns[3];
1830 	struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]};
1831 	uint64_t offset;
1832 	uint32_t length;
1833 	int i;
1834 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1835 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1836 	struct iovec iov, iovs[2];
1837 	struct spdk_nvme_ana_page *ana_hdr;
1838 	char _ana_desc[UT_ANA_DESC_SIZE];
1839 	struct spdk_nvme_ana_group_descriptor *ana_desc;
1840 
1841 	subsystem.ns = ns_arr;
1842 	subsystem.max_nsid = 3;
1843 	for (i = 0; i < 3; i++) {
1844 		subsystem.ana_group[i] = 1;
1845 	}
1846 	ctrlr.subsys = &subsystem;
1847 	ctrlr.listener = &listener;
1848 
1849 	for (i = 0; i < 3; i++) {
1850 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1851 	}
1852 
1853 	for (i = 0; i < 3; i++) {
1854 		ns_arr[i]->nsid = i + 1;
1855 		ns_arr[i]->anagrpid = i + 1;
1856 	}
1857 
1858 	/* create expected page */
1859 	ana_hdr = (void *)&expected_page[0];
1860 	ana_hdr->num_ana_group_desc = 3;
1861 	ana_hdr->change_count = 0;
1862 
1863 	/* descriptor may be unaligned. So create data and then copy it to the location. */
1864 	ana_desc = (void *)_ana_desc;
1865 	offset = sizeof(struct spdk_nvme_ana_page);
1866 
1867 	for (i = 0; i < 3; i++) {
1868 		memset(ana_desc, 0, UT_ANA_DESC_SIZE);
1869 		ana_desc->ana_group_id = ns_arr[i]->nsid;
1870 		ana_desc->num_of_nsid = 1;
1871 		ana_desc->change_count = 0;
1872 		ana_desc->ana_state = ctrlr.listener->ana_state[i];
1873 		ana_desc->nsid[0] = ns_arr[i]->nsid;
1874 		memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE);
1875 		offset += UT_ANA_DESC_SIZE;
1876 	}
1877 
1878 	/* read entire actual log page */
1879 	offset = 0;
1880 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
1881 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
1882 		iov.iov_base = &actual_page[offset];
1883 		iov.iov_len = length;
1884 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
1885 		offset += length;
1886 	}
1887 
1888 	/* compare expected page and actual page */
1889 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1890 
1891 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
1892 	offset = 0;
1893 	iovs[0].iov_base = &actual_page[offset];
1894 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1895 	offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4;
1896 	iovs[1].iov_base = &actual_page[offset];
1897 	iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset;
1898 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
1899 
1900 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1901 
1902 #undef UT_ANA_DESC_SIZE
1903 #undef UT_ANA_LOG_PAGE_SIZE
1904 }
1905 
1906 static void
1907 test_get_ana_log_page_multi_ns_per_anagrp(void)
1908 {
1909 #define UT_ANA_LOG_PAGE_SIZE	(sizeof(struct spdk_nvme_ana_page) +	\
1910 				 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 +	\
1911 				 sizeof(uint32_t) * 5)
1912 	struct spdk_nvmf_ns ns[5];
1913 	struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]};
1914 	uint32_t ana_group[5] = {0};
1915 	struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, };
1916 	enum spdk_nvme_ana_state ana_state[5];
1917 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, };
1918 	struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, };
1919 	char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1920 	char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0};
1921 	struct iovec iov, iovs[2];
1922 	struct spdk_nvme_ana_page *ana_hdr;
1923 	char _ana_desc[UT_ANA_LOG_PAGE_SIZE];
1924 	struct spdk_nvme_ana_group_descriptor *ana_desc;
1925 	uint64_t offset;
1926 	uint32_t length;
1927 	int i;
1928 
1929 	subsystem.max_nsid = 5;
1930 	subsystem.ana_group[1] = 3;
1931 	subsystem.ana_group[2] = 2;
1932 	for (i = 0; i < 5; i++) {
1933 		listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE;
1934 	}
1935 
1936 	for (i = 0; i < 5; i++) {
1937 		ns_arr[i]->nsid = i + 1;
1938 	}
1939 	ns_arr[0]->anagrpid = 2;
1940 	ns_arr[1]->anagrpid = 3;
1941 	ns_arr[2]->anagrpid = 2;
1942 	ns_arr[3]->anagrpid = 3;
1943 	ns_arr[4]->anagrpid = 2;
1944 
1945 	/* create expected page */
1946 	ana_hdr = (void *)&expected_page[0];
1947 	ana_hdr->num_ana_group_desc = 2;
1948 	ana_hdr->change_count = 0;
1949 
1950 	/* descriptor may be unaligned. So create data and then copy it to the location. */
1951 	ana_desc = (void *)_ana_desc;
1952 	offset = sizeof(struct spdk_nvme_ana_page);
1953 
1954 	memset(_ana_desc, 0, sizeof(_ana_desc));
1955 	ana_desc->ana_group_id = 2;
1956 	ana_desc->num_of_nsid = 3;
1957 	ana_desc->change_count = 0;
1958 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1959 	ana_desc->nsid[0] = 1;
1960 	ana_desc->nsid[1] = 3;
1961 	ana_desc->nsid[2] = 5;
1962 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
1963 	       sizeof(uint32_t) * 3);
1964 	offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3;
1965 
1966 	memset(_ana_desc, 0, sizeof(_ana_desc));
1967 	ana_desc->ana_group_id = 3;
1968 	ana_desc->num_of_nsid = 2;
1969 	ana_desc->change_count = 0;
1970 	ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
1971 	ana_desc->nsid[0] = 2;
1972 	ana_desc->nsid[1] = 4;
1973 	memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) +
1974 	       sizeof(uint32_t) * 2);
1975 
1976 	/* read entire actual log page, and compare expected page and actual page. */
1977 	offset = 0;
1978 	while (offset < UT_ANA_LOG_PAGE_SIZE) {
1979 		length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset);
1980 		iov.iov_base = &actual_page[offset];
1981 		iov.iov_len = length;
1982 		nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0);
1983 		offset += length;
1984 	}
1985 
1986 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1987 
1988 	memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE);
1989 	offset = 0;
1990 	iovs[0].iov_base = &actual_page[offset];
1991 	iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
1992 	offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5;
1993 	iovs[1].iov_base = &actual_page[offset];
1994 	iovs[1].iov_len = sizeof(uint32_t) * 5;
1995 	nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0);
1996 
1997 	CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
1998 
1999 #undef UT_ANA_LOG_PAGE_SIZE
2000 }
2001 static void
2002 test_multi_async_events(void)
2003 {
2004 	struct spdk_nvmf_subsystem subsystem = {};
2005 	struct spdk_nvmf_qpair qpair = {};
2006 	struct spdk_nvmf_ctrlr ctrlr = {};
2007 	struct spdk_nvmf_request req[4] = {};
2008 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2009 	struct spdk_nvmf_ns ns = {};
2010 	union nvmf_h2c_msg cmd[4] = {};
2011 	union nvmf_c2h_msg rsp[4] = {};
2012 	union spdk_nvme_async_event_completion event = {};
2013 	struct spdk_nvmf_poll_group group = {};
2014 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2015 	int i;
2016 
2017 	ns_ptrs[0] = &ns;
2018 	subsystem.ns = ns_ptrs;
2019 	subsystem.max_nsid = 1;
2020 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2021 
2022 	ns.opts.nsid = 1;
2023 	group.sgroups = &sgroups;
2024 
2025 	qpair.ctrlr = &ctrlr;
2026 	qpair.group = &group;
2027 	TAILQ_INIT(&qpair.outstanding);
2028 
2029 	ctrlr.subsys = &subsystem;
2030 	ctrlr.vcprop.cc.bits.en = 1;
2031 	ctrlr.thread = spdk_get_thread();
2032 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2033 	ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
2034 	ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
2035 	init_pending_async_events(&ctrlr);
2036 
2037 	/* Target queue pending events when there is no outstanding AER request */
2038 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2039 	nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
2040 	nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
2041 
2042 	for (i = 0; i < 4; i++) {
2043 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2044 		cmd[i].nvme_cmd.nsid = 1;
2045 		cmd[i].nvme_cmd.cid = i;
2046 
2047 		req[i].qpair = &qpair;
2048 		req[i].cmd = &cmd[i];
2049 		req[i].rsp = &rsp[i];
2050 
2051 		TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
2052 
2053 		sgroups.mgmt_io_outstanding = 1;
2054 		if (i < 3) {
2055 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2056 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2057 			CU_ASSERT(ctrlr.nr_aer_reqs == 0);
2058 		} else {
2059 			CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
2060 			CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
2061 			CU_ASSERT(ctrlr.nr_aer_reqs == 1);
2062 		}
2063 	}
2064 
2065 	event.raw = rsp[0].nvme_cpl.cdw0;
2066 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2067 	event.raw = rsp[1].nvme_cpl.cdw0;
2068 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
2069 	event.raw = rsp[2].nvme_cpl.cdw0;
2070 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
2071 
2072 	cleanup_pending_async_events(&ctrlr);
2073 }
2074 
2075 static void
2076 test_rae(void)
2077 {
2078 	struct spdk_nvmf_subsystem subsystem = {};
2079 	struct spdk_nvmf_qpair qpair = {};
2080 	struct spdk_nvmf_ctrlr ctrlr = {};
2081 	struct spdk_nvmf_request req[3] = {};
2082 	struct spdk_nvmf_ns *ns_ptrs[1] = {};
2083 	struct spdk_nvmf_ns ns = {};
2084 	union nvmf_h2c_msg cmd[3] = {};
2085 	union nvmf_c2h_msg rsp[3] = {};
2086 	union spdk_nvme_async_event_completion event = {};
2087 	struct spdk_nvmf_poll_group group = {};
2088 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2089 	int i;
2090 	char data[4096];
2091 
2092 	ns_ptrs[0] = &ns;
2093 	subsystem.ns = ns_ptrs;
2094 	subsystem.max_nsid = 1;
2095 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2096 
2097 	ns.opts.nsid = 1;
2098 	group.sgroups = &sgroups;
2099 
2100 	qpair.ctrlr = &ctrlr;
2101 	qpair.group = &group;
2102 	TAILQ_INIT(&qpair.outstanding);
2103 
2104 	ctrlr.subsys = &subsystem;
2105 	ctrlr.vcprop.cc.bits.en = 1;
2106 	ctrlr.thread = spdk_get_thread();
2107 	ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
2108 	init_pending_async_events(&ctrlr);
2109 
2110 	/* Target queue pending events when there is no outstanding AER request */
2111 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2112 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2113 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2114 	/* only one event will be queued before RAE is clear */
2115 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2116 
2117 	req[0].qpair = &qpair;
2118 	req[0].cmd = &cmd[0];
2119 	req[0].rsp = &rsp[0];
2120 	cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
2121 	cmd[0].nvme_cmd.nsid = 1;
2122 	cmd[0].nvme_cmd.cid = 0;
2123 
2124 	for (i = 1; i < 3; i++) {
2125 		req[i].qpair = &qpair;
2126 		req[i].cmd = &cmd[i];
2127 		req[i].rsp = &rsp[i];
2128 		req[i].data = &data;
2129 		req[i].length = sizeof(data);
2130 
2131 		cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
2132 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid =
2133 			SPDK_NVME_LOG_CHANGED_NS_LIST;
2134 		cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl =
2135 			spdk_nvme_bytes_to_numd(req[i].length);
2136 		cmd[i].nvme_cmd.cid = i;
2137 	}
2138 	cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1;
2139 	cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0;
2140 
2141 	/* consume the pending event */
2142 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link);
2143 	CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2144 	event.raw = rsp[0].nvme_cpl.cdw0;
2145 	CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
2146 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2147 
2148 	/* get log with RAE set */
2149 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2150 	CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2151 	CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2152 
2153 	/* will not generate new event until RAE is clear */
2154 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2155 	CU_ASSERT(num_pending_async_events(&ctrlr) == 0);
2156 
2157 	/* get log with RAE clear */
2158 	CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2159 	CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2160 	CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2161 
2162 	nvmf_ctrlr_async_event_ns_notice(&ctrlr);
2163 	CU_ASSERT(num_pending_async_events(&ctrlr) == 1);
2164 
2165 	cleanup_pending_async_events(&ctrlr);
2166 }
2167 
2168 static void
2169 test_nvmf_ctrlr_create_destruct(void)
2170 {
2171 	struct spdk_nvmf_fabric_connect_data connect_data = {};
2172 	struct spdk_nvmf_poll_group group = {};
2173 	struct spdk_nvmf_subsystem_poll_group sgroups[2] = {};
2174 	struct spdk_nvmf_transport transport = {};
2175 	struct spdk_nvmf_transport_ops tops = {};
2176 	struct spdk_nvmf_subsystem subsystem = {};
2177 	struct spdk_nvmf_request req = {};
2178 	struct spdk_nvmf_qpair qpair = {};
2179 	struct spdk_nvmf_ctrlr *ctrlr = NULL;
2180 	struct spdk_nvmf_tgt tgt = {};
2181 	union nvmf_h2c_msg cmd = {};
2182 	union nvmf_c2h_msg rsp = {};
2183 	const uint8_t hostid[16] = {
2184 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2185 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
2186 	};
2187 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
2188 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
2189 
2190 	group.thread = spdk_get_thread();
2191 	transport.ops = &tops;
2192 	transport.opts.max_aq_depth = 32;
2193 	transport.opts.max_queue_depth = 64;
2194 	transport.opts.max_qpairs_per_ctrlr = 3;
2195 	transport.opts.dif_insert_or_strip = true;
2196 	transport.tgt = &tgt;
2197 	qpair.transport = &transport;
2198 	qpair.group = &group;
2199 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2200 	TAILQ_INIT(&qpair.outstanding);
2201 
2202 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
2203 	connect_data.cntlid = 0xFFFF;
2204 	snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
2205 	snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
2206 
2207 	subsystem.thread = spdk_get_thread();
2208 	subsystem.id = 1;
2209 	TAILQ_INIT(&subsystem.ctrlrs);
2210 	subsystem.tgt = &tgt;
2211 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2212 	subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2213 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
2214 
2215 	group.sgroups = sgroups;
2216 
2217 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2218 	cmd.connect_cmd.cid = 1;
2219 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2220 	cmd.connect_cmd.recfmt = 0;
2221 	cmd.connect_cmd.qid = 0;
2222 	cmd.connect_cmd.sqsize = 31;
2223 	cmd.connect_cmd.cattr = 0;
2224 	cmd.connect_cmd.kato = 120000;
2225 
2226 	req.qpair = &qpair;
2227 	req.length = sizeof(connect_data);
2228 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
2229 	req.data = &connect_data;
2230 	req.cmd = &cmd;
2231 	req.rsp = &rsp;
2232 
2233 	TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
2234 	sgroups[subsystem.id].mgmt_io_outstanding++;
2235 
2236 	ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.data);
2237 	poll_threads();
2238 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
2239 	CU_ASSERT(req.qpair->ctrlr == ctrlr);
2240 	CU_ASSERT(ctrlr->subsys == &subsystem);
2241 	CU_ASSERT(ctrlr->thread == req.qpair->group->thread);
2242 	CU_ASSERT(ctrlr->disconnect_in_progress == false);
2243 	CU_ASSERT(ctrlr->qpair_mask != NULL);
2244 	CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000);
2245 	CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1);
2246 	CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1);
2247 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1);
2248 	CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1);
2249 	CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16));
2250 	CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1);
2251 	CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63);
2252 	CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0);
2253 	CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500);
2254 	CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0);
2255 	CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM);
2256 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0);
2257 	CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0);
2258 	CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1);
2259 	CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3);
2260 	CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0);
2261 	CU_ASSERT(ctrlr->vcprop.cc.raw == 0);
2262 	CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0);
2263 	CU_ASSERT(ctrlr->vcprop.csts.raw == 0);
2264 	CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
2265 	CU_ASSERT(ctrlr->dif_insert_or_strip == true);
2266 
2267 	ctrlr->in_destruct = true;
2268 	nvmf_ctrlr_destruct(ctrlr);
2269 	poll_threads();
2270 	CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));
2271 	CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding));
2272 }
2273 
2274 static void
2275 test_nvmf_ctrlr_use_zcopy(void)
2276 {
2277 	struct spdk_nvmf_subsystem subsystem = {};
2278 	struct spdk_nvmf_transport transport = {};
2279 	struct spdk_nvmf_request req = {};
2280 	struct spdk_nvmf_qpair qpair = {};
2281 	struct spdk_nvmf_ctrlr ctrlr = {};
2282 	union nvmf_h2c_msg cmd = {};
2283 	struct spdk_nvmf_ns ns = {};
2284 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2285 	struct spdk_bdev bdev = {};
2286 	struct spdk_nvmf_poll_group group = {};
2287 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2288 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2289 	struct spdk_io_channel io_ch = {};
2290 	int opc;
2291 
2292 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
2293 	ns.bdev = &bdev;
2294 
2295 	subsystem.id = 0;
2296 	subsystem.max_nsid = 1;
2297 	subsys_ns[0] = &ns;
2298 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2299 
2300 	ctrlr.subsys = &subsystem;
2301 
2302 	transport.opts.zcopy = true;
2303 
2304 	qpair.ctrlr = &ctrlr;
2305 	qpair.group = &group;
2306 	qpair.qid = 1;
2307 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2308 	qpair.transport = &transport;
2309 
2310 	group.thread = spdk_get_thread();
2311 	group.num_sgroups = 1;
2312 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2313 	sgroups.num_ns = 1;
2314 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2315 	ns_info.channel = &io_ch;
2316 	sgroups.ns_info = &ns_info;
2317 	TAILQ_INIT(&sgroups.queued);
2318 	group.sgroups = &sgroups;
2319 	TAILQ_INIT(&qpair.outstanding);
2320 
2321 	req.qpair = &qpair;
2322 	req.cmd = &cmd;
2323 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2324 
2325 	/* Admin queue */
2326 	qpair.qid = 0;
2327 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2328 	qpair.qid = 1;
2329 
2330 	/* Invalid Opcodes */
2331 	for (opc = 0; opc <= 255; opc++) {
2332 		cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc;
2333 		if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) &&
2334 		    (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) {
2335 			CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2336 		}
2337 	}
2338 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
2339 
2340 	/* Fused WRITE */
2341 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
2342 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2343 	cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE;
2344 
2345 	/* Non bdev */
2346 	cmd.nvme_cmd.nsid = 4;
2347 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2348 	cmd.nvme_cmd.nsid = 1;
2349 
2350 	/* ZCOPY Not supported */
2351 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2352 	ns.zcopy = true;
2353 
2354 	/* ZCOPY disabled on transport level */
2355 	transport.opts.zcopy = false;
2356 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
2357 	transport.opts.zcopy = true;
2358 
2359 	/* Success */
2360 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2361 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2362 }
2363 
2364 static void
2365 qpair_state_change_done(void *cb_arg, int status)
2366 {
2367 }
2368 
2369 static void
2370 test_spdk_nvmf_request_zcopy_start(void)
2371 {
2372 	struct spdk_nvmf_request req = {};
2373 	struct spdk_nvmf_qpair qpair = {};
2374 	struct spdk_nvmf_transport transport = {};
2375 	struct spdk_nvme_cmd cmd = {};
2376 	union nvmf_c2h_msg rsp = {};
2377 	struct spdk_nvmf_ctrlr ctrlr = {};
2378 	struct spdk_nvmf_subsystem subsystem = {};
2379 	struct spdk_nvmf_ns ns = {};
2380 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2381 	enum spdk_nvme_ana_state ana_state[1];
2382 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2383 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2384 
2385 	struct spdk_nvmf_poll_group group = {};
2386 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2387 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2388 	struct spdk_io_channel io_ch = {};
2389 
2390 	ns.bdev = &bdev;
2391 	ns.zcopy = true;
2392 	ns.anagrpid = 1;
2393 
2394 	subsystem.id = 0;
2395 	subsystem.max_nsid = 1;
2396 	subsys_ns[0] = &ns;
2397 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2398 
2399 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2400 
2401 	/* Enable controller */
2402 	ctrlr.vcprop.cc.bits.en = 1;
2403 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2404 	ctrlr.listener = &listener;
2405 
2406 	transport.opts.zcopy = true;
2407 
2408 	group.thread = spdk_get_thread();
2409 	group.num_sgroups = 1;
2410 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2411 	sgroups.num_ns = 1;
2412 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2413 	ns_info.channel = &io_ch;
2414 	sgroups.ns_info = &ns_info;
2415 	TAILQ_INIT(&sgroups.queued);
2416 	group.sgroups = &sgroups;
2417 	TAILQ_INIT(&qpair.outstanding);
2418 
2419 	qpair.ctrlr = &ctrlr;
2420 	qpair.group = &group;
2421 	qpair.transport = &transport;
2422 	qpair.qid = 1;
2423 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2424 
2425 	cmd.nsid = 1;
2426 
2427 	req.qpair = &qpair;
2428 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2429 	req.rsp = &rsp;
2430 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2431 	cmd.opc = SPDK_NVME_OPC_READ;
2432 
2433 	/* Fail because no controller */
2434 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2435 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2436 	qpair.ctrlr = NULL;
2437 	spdk_nvmf_request_zcopy_start(&req);
2438 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2439 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2440 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
2441 	qpair.ctrlr = &ctrlr;
2442 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2443 
2444 	/* Fail because bad NSID */
2445 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2446 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2447 	cmd.nsid = 0;
2448 	spdk_nvmf_request_zcopy_start(&req);
2449 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2450 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2451 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2452 	cmd.nsid = 1;
2453 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2454 
2455 	/* Fail because bad Channel */
2456 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2457 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2458 	ns_info.channel = NULL;
2459 	spdk_nvmf_request_zcopy_start(&req);
2460 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2461 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC);
2462 	CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2463 	ns_info.channel = &io_ch;
2464 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2465 
2466 	/* Queue the requet because NSID is not active */
2467 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2468 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2469 	ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING;
2470 	spdk_nvmf_request_zcopy_start(&req);
2471 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT);
2472 	CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req);
2473 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2474 	TAILQ_REMOVE(&sgroups.queued, &req, link);
2475 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2476 
2477 	/* Fail because QPair is not active */
2478 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2479 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2480 	qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
2481 	qpair.state_cb = qpair_state_change_done;
2482 	spdk_nvmf_request_zcopy_start(&req);
2483 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED);
2484 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2485 	qpair.state_cb = NULL;
2486 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2487 
2488 	/* Fail because nvmf_bdev_ctrlr_zcopy_start fails */
2489 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2490 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2491 	cmd.cdw10 = bdev.blockcnt;	/* SLBA: CDW10 and CDW11 */
2492 	cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
2493 	req.length = (cmd.cdw12 + 1) * bdev.blocklen;
2494 	spdk_nvmf_request_zcopy_start(&req);
2495 	CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED);
2496 	cmd.cdw10 = 0;
2497 	cmd.cdw12 = 0;
2498 	req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
2499 
2500 	/* Success */
2501 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2502 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2503 	spdk_nvmf_request_zcopy_start(&req);
2504 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2505 }
2506 
2507 static void
2508 test_zcopy_read(void)
2509 {
2510 	struct spdk_nvmf_request req = {};
2511 	struct spdk_nvmf_qpair qpair = {};
2512 	struct spdk_nvmf_transport transport = {};
2513 	struct spdk_nvme_cmd cmd = {};
2514 	union nvmf_c2h_msg rsp = {};
2515 	struct spdk_nvmf_ctrlr ctrlr = {};
2516 	struct spdk_nvmf_subsystem subsystem = {};
2517 	struct spdk_nvmf_ns ns = {};
2518 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2519 	enum spdk_nvme_ana_state ana_state[1];
2520 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2521 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2522 
2523 	struct spdk_nvmf_poll_group group = {};
2524 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2525 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2526 	struct spdk_io_channel io_ch = {};
2527 
2528 	ns.bdev = &bdev;
2529 	ns.zcopy = true;
2530 	ns.anagrpid = 1;
2531 
2532 	subsystem.id = 0;
2533 	subsystem.max_nsid = 1;
2534 	subsys_ns[0] = &ns;
2535 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2536 
2537 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2538 
2539 	/* Enable controller */
2540 	ctrlr.vcprop.cc.bits.en = 1;
2541 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2542 	ctrlr.listener = &listener;
2543 
2544 	transport.opts.zcopy = true;
2545 
2546 	group.thread = spdk_get_thread();
2547 	group.num_sgroups = 1;
2548 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2549 	sgroups.num_ns = 1;
2550 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2551 	ns_info.channel = &io_ch;
2552 	sgroups.ns_info = &ns_info;
2553 	TAILQ_INIT(&sgroups.queued);
2554 	group.sgroups = &sgroups;
2555 	TAILQ_INIT(&qpair.outstanding);
2556 
2557 	qpair.ctrlr = &ctrlr;
2558 	qpair.group = &group;
2559 	qpair.transport = &transport;
2560 	qpair.qid = 1;
2561 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2562 
2563 	cmd.nsid = 1;
2564 
2565 	req.qpair = &qpair;
2566 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2567 	req.rsp = &rsp;
2568 	cmd.opc = SPDK_NVME_OPC_READ;
2569 
2570 	/* Prepare for zcopy */
2571 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2572 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2573 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2574 	CU_ASSERT(ns_info.io_outstanding == 0);
2575 
2576 	/* Perform the zcopy start */
2577 	spdk_nvmf_request_zcopy_start(&req);
2578 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2579 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read);
2580 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2581 	CU_ASSERT(ns_info.io_outstanding == 1);
2582 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2583 
2584 	/* Perform the zcopy end */
2585 	spdk_nvmf_request_zcopy_end(&req, false);
2586 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2587 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2588 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2589 	CU_ASSERT(ns_info.io_outstanding == 0);
2590 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2591 }
2592 
2593 static void
2594 test_zcopy_write(void)
2595 {
2596 	struct spdk_nvmf_request req = {};
2597 	struct spdk_nvmf_qpair qpair = {};
2598 	struct spdk_nvmf_transport transport = {};
2599 	struct spdk_nvme_cmd cmd = {};
2600 	union nvmf_c2h_msg rsp = {};
2601 	struct spdk_nvmf_ctrlr ctrlr = {};
2602 	struct spdk_nvmf_subsystem subsystem = {};
2603 	struct spdk_nvmf_ns ns = {};
2604 	struct spdk_nvmf_ns *subsys_ns[1] = {};
2605 	enum spdk_nvme_ana_state ana_state[1];
2606 	struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state };
2607 	struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512};
2608 
2609 	struct spdk_nvmf_poll_group group = {};
2610 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
2611 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
2612 	struct spdk_io_channel io_ch = {};
2613 
2614 	ns.bdev = &bdev;
2615 	ns.zcopy = true;
2616 	ns.anagrpid = 1;
2617 
2618 	subsystem.id = 0;
2619 	subsystem.max_nsid = 1;
2620 	subsys_ns[0] = &ns;
2621 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
2622 
2623 	listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE;
2624 
2625 	/* Enable controller */
2626 	ctrlr.vcprop.cc.bits.en = 1;
2627 	ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
2628 	ctrlr.listener = &listener;
2629 
2630 	transport.opts.zcopy = true;
2631 
2632 	group.thread = spdk_get_thread();
2633 	group.num_sgroups = 1;
2634 	sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2635 	sgroups.num_ns = 1;
2636 	ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
2637 	ns_info.channel = &io_ch;
2638 	sgroups.ns_info = &ns_info;
2639 	TAILQ_INIT(&sgroups.queued);
2640 	group.sgroups = &sgroups;
2641 	TAILQ_INIT(&qpair.outstanding);
2642 
2643 	qpair.ctrlr = &ctrlr;
2644 	qpair.group = &group;
2645 	qpair.transport = &transport;
2646 	qpair.qid = 1;
2647 	qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
2648 
2649 	cmd.nsid = 1;
2650 
2651 	req.qpair = &qpair;
2652 	req.cmd = (union nvmf_h2c_msg *)&cmd;
2653 	req.rsp = &rsp;
2654 	cmd.opc = SPDK_NVME_OPC_WRITE;
2655 
2656 	/* Prepare for zcopy */
2657 	CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
2658 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT);
2659 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2660 	CU_ASSERT(ns_info.io_outstanding == 0);
2661 
2662 	/* Perform the zcopy start */
2663 	spdk_nvmf_request_zcopy_start(&req);
2664 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE);
2665 	CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write);
2666 	CU_ASSERT(qpair.outstanding.tqh_first == &req);
2667 	CU_ASSERT(ns_info.io_outstanding == 1);
2668 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2669 
2670 	/* Perform the zcopy end */
2671 	spdk_nvmf_request_zcopy_end(&req, true);
2672 	CU_ASSERT(req.zcopy_bdev_io == NULL);
2673 	CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE);
2674 	CU_ASSERT(qpair.outstanding.tqh_first == NULL);
2675 	CU_ASSERT(ns_info.io_outstanding == 0);
2676 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
2677 }
2678 
2679 static void
2680 test_nvmf_property_set(void)
2681 {
2682 	int rc;
2683 	struct spdk_nvmf_request req = {};
2684 	struct spdk_nvmf_qpair qpair = {};
2685 	struct spdk_nvmf_ctrlr ctrlr = {};
2686 	union nvmf_h2c_msg cmd = {};
2687 	union nvmf_c2h_msg rsp = {};
2688 
2689 	req.qpair = &qpair;
2690 	qpair.ctrlr = &ctrlr;
2691 	req.cmd = &cmd;
2692 	req.rsp = &rsp;
2693 
2694 	/* Invalid parameters */
2695 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2696 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs);
2697 
2698 	rc = nvmf_property_set(&req);
2699 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2700 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2701 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2702 
2703 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms);
2704 
2705 	rc = nvmf_property_get(&req);
2706 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2707 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
2708 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
2709 
2710 	/* Set cc with same property size */
2711 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2712 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2713 
2714 	rc = nvmf_property_set(&req);
2715 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2716 
2717 	/* Emulate cc data */
2718 	ctrlr.vcprop.cc.raw = 0xDEADBEEF;
2719 
2720 	rc = nvmf_property_get(&req);
2721 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2722 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF);
2723 
2724 	/* Set asq with different property size */
2725 	memset(req.rsp, 0, sizeof(union nvmf_c2h_msg));
2726 	cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4;
2727 	cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq);
2728 
2729 	rc = nvmf_property_set(&req);
2730 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2731 
2732 	/* Emulate asq data */
2733 	ctrlr.vcprop.asq = 0xAADDADBEEF;
2734 
2735 	rc = nvmf_property_get(&req);
2736 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2737 	CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF);
2738 }
2739 
2740 static void
2741 test_nvmf_ctrlr_get_features_host_behavior_support(void)
2742 {
2743 	int rc;
2744 	struct spdk_nvmf_request req = {};
2745 	struct spdk_nvmf_qpair qpair = {};
2746 	struct spdk_nvmf_ctrlr ctrlr = {};
2747 	struct spdk_nvme_host_behavior *host_behavior;
2748 	struct spdk_nvme_host_behavior behavior = {};
2749 	union nvmf_h2c_msg cmd = {};
2750 	union nvmf_c2h_msg rsp = {};
2751 
2752 	qpair.ctrlr = &ctrlr;
2753 	req.qpair = &qpair;
2754 	req.cmd = &cmd;
2755 	req.rsp = &rsp;
2756 
2757 	/* Invalid data */
2758 	req.data = NULL;
2759 	req.length = sizeof(struct spdk_nvme_host_behavior);
2760 
2761 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2762 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2763 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2764 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2765 	CU_ASSERT(req.data == NULL);
2766 
2767 	/* Wrong structure length */
2768 	req.data = &behavior;
2769 	req.length = sizeof(struct spdk_nvme_host_behavior) - 1;
2770 
2771 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2772 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2773 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2774 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2775 
2776 	/* Get Features Host Behavior Support Success */
2777 	req.data = &behavior;
2778 	req.length = sizeof(struct spdk_nvme_host_behavior);
2779 	ctrlr.acre_enabled = true;
2780 	host_behavior = (struct spdk_nvme_host_behavior *)req.data;
2781 	host_behavior->acre = false;
2782 
2783 	rc = nvmf_ctrlr_get_features_host_behavior_support(&req);
2784 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2785 	CU_ASSERT(host_behavior->acre == true);
2786 }
2787 
2788 static void
2789 test_nvmf_ctrlr_set_features_host_behavior_support(void)
2790 {
2791 	int rc;
2792 	struct spdk_nvmf_request req = {};
2793 	struct spdk_nvmf_qpair qpair = {};
2794 	struct spdk_nvmf_ctrlr ctrlr = {};
2795 	struct spdk_nvme_host_behavior host_behavior = {};
2796 	union nvmf_h2c_msg cmd = {};
2797 	union nvmf_c2h_msg rsp = {};
2798 
2799 	qpair.ctrlr = &ctrlr;
2800 	req.qpair = &qpair;
2801 	req.cmd = &cmd;
2802 	req.rsp = &rsp;
2803 	req.iov[0].iov_base = &host_behavior;
2804 	req.iov[0].iov_len = sizeof(host_behavior);
2805 
2806 	/* Invalid iovcnt */
2807 	req.iovcnt = 0;
2808 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2809 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2810 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
2811 
2812 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
2813 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2814 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2815 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2816 
2817 	/* Invalid iov_len */
2818 	req.iovcnt = 1;
2819 	req.iov[0].iov_len = 0;
2820 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2821 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2822 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
2823 
2824 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
2825 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2826 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2827 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2828 
2829 	/* acre is false */
2830 	host_behavior.acre = 0;
2831 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
2832 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2833 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2834 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
2835 
2836 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
2837 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2838 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2839 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2840 	CU_ASSERT(ctrlr.acre_enabled == false);
2841 
2842 	/* acre is true */
2843 	host_behavior.acre = 1;
2844 	req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior);
2845 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2846 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2847 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
2848 
2849 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
2850 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2851 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2852 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
2853 	CU_ASSERT(ctrlr.acre_enabled == true);
2854 
2855 	/* Invalid acre */
2856 	host_behavior.acre = 2;
2857 	rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
2858 	req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2859 	req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
2860 
2861 	rc = nvmf_ctrlr_set_features_host_behavior_support(&req);
2862 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
2863 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
2864 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
2865 }
2866 
2867 int main(int argc, char **argv)
2868 {
2869 	CU_pSuite	suite = NULL;
2870 	unsigned int	num_failures;
2871 
2872 	CU_set_error_action(CUEA_ABORT);
2873 	CU_initialize_registry();
2874 
2875 	suite = CU_add_suite("nvmf", NULL, NULL);
2876 	CU_ADD_TEST(suite, test_get_log_page);
2877 	CU_ADD_TEST(suite, test_process_fabrics_cmd);
2878 	CU_ADD_TEST(suite, test_connect);
2879 	CU_ADD_TEST(suite, test_get_ns_id_desc_list);
2880 	CU_ADD_TEST(suite, test_identify_ns);
2881 	CU_ADD_TEST(suite, test_reservation_write_exclusive);
2882 	CU_ADD_TEST(suite, test_reservation_exclusive_access);
2883 	CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
2884 	CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
2885 	CU_ADD_TEST(suite, test_reservation_notification_log_page);
2886 	CU_ADD_TEST(suite, test_get_dif_ctx);
2887 	CU_ADD_TEST(suite, test_set_get_features);
2888 	CU_ADD_TEST(suite, test_identify_ctrlr);
2889 	CU_ADD_TEST(suite, test_custom_admin_cmd);
2890 	CU_ADD_TEST(suite, test_fused_compare_and_write);
2891 	CU_ADD_TEST(suite, test_multi_async_event_reqs);
2892 	CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp);
2893 	CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp);
2894 	CU_ADD_TEST(suite, test_multi_async_events);
2895 	CU_ADD_TEST(suite, test_rae);
2896 	CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct);
2897 	CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy);
2898 	CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start);
2899 	CU_ADD_TEST(suite, test_zcopy_read);
2900 	CU_ADD_TEST(suite, test_zcopy_write);
2901 	CU_ADD_TEST(suite, test_nvmf_property_set);
2902 	CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support);
2903 	CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support);
2904 
2905 	allocate_threads(1);
2906 	set_thread(0);
2907 
2908 	CU_basic_set_mode(CU_BRM_VERBOSE);
2909 	CU_basic_run_tests();
2910 	num_failures = CU_get_number_of_failures();
2911 	CU_cleanup_registry();
2912 
2913 	free_threads();
2914 
2915 	return num_failures;
2916 }
2917