xref: /spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c (revision 3ef479ab163d96d6fd7f28b256d2a93ab42afd8e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "spdk_internal/mock.h"
38 
39 #include "ctrlr.c"
40 
41 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
42 
43 
44 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
45 	    struct spdk_nvmf_subsystem *,
46 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
47 	    NULL)
48 
49 DEFINE_STUB(spdk_nvmf_poll_group_create,
50 	    struct spdk_nvmf_poll_group *,
51 	    (struct spdk_nvmf_tgt *tgt),
52 	    NULL)
53 
54 DEFINE_STUB_V(spdk_nvmf_poll_group_destroy,
55 	      (struct spdk_nvmf_poll_group *group))
56 
57 DEFINE_STUB_V(spdk_nvmf_transport_qpair_fini,
58 	      (struct spdk_nvmf_qpair *qpair))
59 
60 DEFINE_STUB(spdk_nvmf_poll_group_add,
61 	    int,
62 	    (struct spdk_nvmf_poll_group *group, struct spdk_nvmf_qpair *qpair),
63 	    0)
64 
65 DEFINE_STUB(spdk_nvmf_poll_group_remove,
66 	    int,
67 	    (struct spdk_nvmf_poll_group *group, struct spdk_nvmf_qpair *qpair),
68 	    0)
69 
70 DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
71 	    const char *,
72 	    (const struct spdk_nvmf_subsystem *subsystem),
73 	    NULL)
74 
75 DEFINE_STUB(spdk_nvmf_subsystem_get_ns,
76 	    struct spdk_nvmf_ns *,
77 	    (struct spdk_nvmf_subsystem *subsystem, uint32_t nsid),
78 	    NULL)
79 
80 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
81 	    struct spdk_nvmf_ns *,
82 	    (struct spdk_nvmf_subsystem *subsystem),
83 	    NULL)
84 
85 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
86 	    struct spdk_nvmf_ns *,
87 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
88 	    NULL)
89 
90 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
91 	    bool,
92 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
93 	    true)
94 
95 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr,
96 	    int,
97 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
98 	    0)
99 
100 DEFINE_STUB_V(spdk_nvmf_subsystem_remove_ctrlr,
101 	      (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr))
102 
103 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr,
104 	    struct spdk_nvmf_ctrlr *,
105 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
106 	    NULL)
107 
108 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported,
109 	    bool,
110 	    (struct spdk_nvmf_ctrlr *ctrlr),
111 	    false)
112 
113 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported,
114 	    bool,
115 	    (struct spdk_nvmf_ctrlr *ctrlr),
116 	    false)
117 
118 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_identify_ns,
119 	    int,
120 	    (struct spdk_bdev *bdev, struct spdk_nvme_ns_data *nsdata),
121 	    -1)
122 
123 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page,
124 	      (struct spdk_nvmf_tgt *tgt, void *buffer, uint64_t offset, uint32_t length))
125 
126 DEFINE_STUB(spdk_nvmf_request_complete,
127 	    int,
128 	    (struct spdk_nvmf_request *req),
129 	    -1)
130 
131 DEFINE_STUB(spdk_nvmf_request_abort,
132 	    int,
133 	    (struct spdk_nvmf_request *req),
134 	    -1)
135 
136 static void
137 test_get_log_page(void)
138 {
139 	struct spdk_nvmf_subsystem subsystem = {};
140 	struct spdk_nvmf_request req = {};
141 	struct spdk_nvmf_qpair qpair = {};
142 	struct spdk_nvmf_ctrlr ctrlr = {};
143 	union nvmf_h2c_msg cmd = {};
144 	union nvmf_c2h_msg rsp = {};
145 	char data[4096];
146 
147 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
148 
149 	ctrlr.subsys = &subsystem;
150 
151 	qpair.ctrlr = &ctrlr;
152 
153 	req.qpair = &qpair;
154 	req.cmd = &cmd;
155 	req.rsp = &rsp;
156 	req.data = &data;
157 	req.length = sizeof(data);
158 
159 	/* Get Log Page - all valid */
160 	memset(&cmd, 0, sizeof(cmd));
161 	memset(&rsp, 0, sizeof(rsp));
162 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
163 	cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16;
164 	CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
165 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
166 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
167 
168 	/* Get Log Page with invalid log ID */
169 	memset(&cmd, 0, sizeof(cmd));
170 	memset(&rsp, 0, sizeof(rsp));
171 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
172 	cmd.nvme_cmd.cdw10 = 0;
173 	CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
174 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
175 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
176 
177 	/* Get Log Page with invalid offset (not dword aligned) */
178 	memset(&cmd, 0, sizeof(cmd));
179 	memset(&rsp, 0, sizeof(rsp));
180 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
181 	cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16;
182 	cmd.nvme_cmd.cdw12 = 2;
183 	CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
184 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
185 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
186 
187 	/* Get Log Page without data buffer */
188 	memset(&cmd, 0, sizeof(cmd));
189 	memset(&rsp, 0, sizeof(rsp));
190 	req.data = NULL;
191 	cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
192 	cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16;
193 	CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
194 	CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
195 	CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
196 	req.data = data;
197 }
198 
199 static void
200 test_process_fabrics_cmd(void)
201 {
202 	struct	spdk_nvmf_request req = {};
203 	int	ret;
204 	struct	spdk_nvmf_qpair req_qpair = {};
205 	union	nvmf_h2c_msg  req_cmd = {};
206 	union	nvmf_c2h_msg   req_rsp = {};
207 
208 	req.qpair = &req_qpair;
209 	req.cmd  = &req_cmd;
210 	req.rsp  = &req_rsp;
211 	req.qpair->ctrlr = NULL;
212 
213 	/* No ctrlr and invalid command check */
214 	req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
215 	ret = spdk_nvmf_ctrlr_process_fabrics_cmd(&req);
216 	CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
217 	CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
218 }
219 
220 static bool
221 nvme_status_success(const struct spdk_nvme_status *status)
222 {
223 	return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
224 }
225 
226 static void
227 test_connect(void)
228 {
229 	struct spdk_nvmf_fabric_connect_data connect_data;
230 	struct spdk_nvmf_poll_group group;
231 	struct spdk_nvmf_transport transport;
232 	struct spdk_nvmf_subsystem subsystem;
233 	struct spdk_nvmf_request req;
234 	struct spdk_nvmf_qpair qpair;
235 	struct spdk_nvmf_qpair qpair2;
236 	struct spdk_nvmf_ctrlr ctrlr;
237 	struct spdk_nvmf_tgt tgt;
238 	union nvmf_h2c_msg cmd;
239 	union nvmf_c2h_msg rsp;
240 	const uint8_t hostid[16] = {
241 		0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
242 		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
243 	};
244 	const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
245 	const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
246 	int rc;
247 
248 	memset(&group, 0, sizeof(group));
249 
250 	memset(&ctrlr, 0, sizeof(ctrlr));
251 	TAILQ_INIT(&ctrlr.qpairs);
252 	ctrlr.subsys = &subsystem;
253 	ctrlr.vcprop.cc.bits.en = 1;
254 	ctrlr.vcprop.cc.bits.iosqes = 6;
255 	ctrlr.vcprop.cc.bits.iocqes = 4;
256 	ctrlr.max_qpairs_allowed = 3;
257 
258 	memset(&tgt, 0, sizeof(tgt));
259 	tgt.opts.max_queue_depth = 64;
260 	tgt.opts.max_qpairs_per_ctrlr = 3;
261 
262 	memset(&transport, 0, sizeof(transport));
263 	transport.tgt = &tgt;
264 
265 	memset(&qpair, 0, sizeof(qpair));
266 	qpair.transport = &transport;
267 
268 	memset(&connect_data, 0, sizeof(connect_data));
269 	memcpy(connect_data.hostid, hostid, sizeof(hostid));
270 	connect_data.cntlid = 0xFFFF;
271 	strncpy(connect_data.subnqn, subnqn, sizeof(connect_data.subnqn));
272 	strncpy(connect_data.hostnqn, hostnqn, sizeof(connect_data.hostnqn));
273 
274 	memset(&subsystem, 0, sizeof(subsystem));
275 	subsystem.id = 1;
276 	TAILQ_INIT(&subsystem.ctrlrs);
277 	subsystem.tgt = &tgt;
278 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
279 	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
280 
281 	memset(&cmd, 0, sizeof(cmd));
282 	cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
283 	cmd.connect_cmd.cid = 1;
284 	cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
285 	cmd.connect_cmd.recfmt = 0;
286 	cmd.connect_cmd.qid = 0;
287 	cmd.connect_cmd.sqsize = 31;
288 	cmd.connect_cmd.cattr = 0;
289 	cmd.connect_cmd.kato = 120000;
290 
291 	memset(&req, 0, sizeof(req));
292 	req.qpair = &qpair;
293 	req.length = sizeof(connect_data);
294 	req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
295 	req.data = &connect_data;
296 	req.cmd = &cmd;
297 	req.rsp = &rsp;
298 
299 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, struct spdk_nvmf_subsystem *, &subsystem);
300 	MOCK_SET(spdk_nvmf_poll_group_create, struct spdk_nvmf_poll_group *, &group);
301 
302 	/* Valid admin connect command */
303 	memset(&rsp, 0, sizeof(rsp));
304 	rc = spdk_nvmf_ctrlr_connect(&req);
305 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
306 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
307 	CU_ASSERT(qpair.ctrlr != NULL);
308 	free(qpair.ctrlr);
309 	qpair.ctrlr = NULL;
310 
311 	/* Invalid data length */
312 	memset(&rsp, 0, sizeof(rsp));
313 	req.length = sizeof(connect_data) - 1;
314 	rc = spdk_nvmf_ctrlr_connect(&req);
315 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
316 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
317 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
318 	CU_ASSERT(qpair.ctrlr == NULL);
319 	req.length = sizeof(connect_data);
320 
321 	/* Invalid recfmt */
322 	memset(&rsp, 0, sizeof(rsp));
323 	cmd.connect_cmd.recfmt = 1234;
324 	rc = spdk_nvmf_ctrlr_connect(&req);
325 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
326 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
327 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
328 	CU_ASSERT(qpair.ctrlr == NULL);
329 	cmd.connect_cmd.recfmt = 0;
330 
331 	/* Unterminated subnqn */
332 	memset(&rsp, 0, sizeof(rsp));
333 	memset(connect_data.subnqn, 'a', sizeof(connect_data.subnqn));
334 	rc = spdk_nvmf_ctrlr_connect(&req);
335 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
336 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
337 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
338 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
339 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
340 	CU_ASSERT(qpair.ctrlr == NULL);
341 	strncpy(connect_data.subnqn, subnqn, sizeof(connect_data.subnqn));
342 
343 	/* Subsystem not found */
344 	memset(&rsp, 0, sizeof(rsp));
345 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, struct spdk_nvmf_subsystem *, NULL);
346 	rc = spdk_nvmf_ctrlr_connect(&req);
347 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
348 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
349 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
350 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
351 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
352 	CU_ASSERT(qpair.ctrlr == NULL);
353 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, struct spdk_nvmf_subsystem *, &subsystem);
354 
355 	/* Unterminated hostnqn */
356 	memset(&rsp, 0, sizeof(rsp));
357 	memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
358 	rc = spdk_nvmf_ctrlr_connect(&req);
359 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
360 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
361 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
362 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
363 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
364 	CU_ASSERT(qpair.ctrlr == NULL);
365 	strncpy(connect_data.hostnqn, hostnqn, sizeof(connect_data.hostnqn));
366 
367 	/* Host not allowed */
368 	memset(&rsp, 0, sizeof(rsp));
369 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, bool, false);
370 	rc = spdk_nvmf_ctrlr_connect(&req);
371 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
372 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
373 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
374 	CU_ASSERT(qpair.ctrlr == NULL);
375 	MOCK_SET(spdk_nvmf_subsystem_host_allowed, bool, true);
376 
377 	/* Invalid sqsize == 0 */
378 	memset(&rsp, 0, sizeof(rsp));
379 	cmd.connect_cmd.sqsize = 0;
380 	rc = spdk_nvmf_ctrlr_connect(&req);
381 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
382 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
383 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
384 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
385 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
386 	CU_ASSERT(qpair.ctrlr == NULL);
387 	cmd.connect_cmd.sqsize = 31;
388 
389 	/* Invalid sqsize > max_queue_depth */
390 	memset(&rsp, 0, sizeof(rsp));
391 	cmd.connect_cmd.sqsize = 64;
392 	rc = spdk_nvmf_ctrlr_connect(&req);
393 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
394 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
395 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
396 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
397 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
398 	CU_ASSERT(qpair.ctrlr == NULL);
399 	cmd.connect_cmd.sqsize = 31;
400 
401 	/* Invalid cntlid for admin queue */
402 	memset(&rsp, 0, sizeof(rsp));
403 	connect_data.cntlid = 0x1234;
404 	rc = spdk_nvmf_ctrlr_connect(&req);
405 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
406 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
407 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
408 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
409 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
410 	CU_ASSERT(qpair.ctrlr == NULL);
411 	connect_data.cntlid = 0xFFFF;
412 
413 	/* Valid I/O queue connect command */
414 	memset(&rsp, 0, sizeof(rsp));
415 	MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *, &ctrlr);
416 	cmd.connect_cmd.qid = 1;
417 	rc = spdk_nvmf_ctrlr_connect(&req);
418 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
419 	CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
420 	CU_ASSERT(qpair.ctrlr == &ctrlr);
421 	qpair.ctrlr = NULL;
422 	ctrlr.num_qpairs = 0;
423 	TAILQ_INIT(&ctrlr.qpairs);
424 
425 	/* Non-existent controller */
426 	memset(&rsp, 0, sizeof(rsp));
427 	MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *, NULL);
428 	rc = spdk_nvmf_ctrlr_connect(&req);
429 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
430 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
431 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
432 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
433 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
434 	CU_ASSERT(qpair.ctrlr == NULL);
435 	MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *, &ctrlr);
436 
437 	/* I/O connect to discovery controller */
438 	memset(&rsp, 0, sizeof(rsp));
439 	subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
440 	rc = spdk_nvmf_ctrlr_connect(&req);
441 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
442 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
443 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
444 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
445 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
446 	CU_ASSERT(qpair.ctrlr == NULL);
447 	subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
448 
449 	/* I/O connect to disabled controller */
450 	memset(&rsp, 0, sizeof(rsp));
451 	ctrlr.vcprop.cc.bits.en = 0;
452 	rc = spdk_nvmf_ctrlr_connect(&req);
453 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
454 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
455 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
456 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
457 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
458 	CU_ASSERT(qpair.ctrlr == NULL);
459 	ctrlr.vcprop.cc.bits.en = 1;
460 
461 	/* I/O connect with invalid IOSQES */
462 	memset(&rsp, 0, sizeof(rsp));
463 	ctrlr.vcprop.cc.bits.iosqes = 3;
464 	rc = spdk_nvmf_ctrlr_connect(&req);
465 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
466 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
467 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
468 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
469 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
470 	CU_ASSERT(qpair.ctrlr == NULL);
471 	ctrlr.vcprop.cc.bits.iosqes = 6;
472 
473 	/* I/O connect with invalid IOCQES */
474 	memset(&rsp, 0, sizeof(rsp));
475 	ctrlr.vcprop.cc.bits.iocqes = 3;
476 	rc = spdk_nvmf_ctrlr_connect(&req);
477 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
478 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
479 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
480 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
481 	CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
482 	CU_ASSERT(qpair.ctrlr == NULL);
483 	ctrlr.vcprop.cc.bits.iocqes = 4;
484 
485 	/* I/O connect with too many existing qpairs */
486 	memset(&rsp, 0, sizeof(rsp));
487 	ctrlr.num_qpairs = 3;
488 	rc = spdk_nvmf_ctrlr_connect(&req);
489 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
490 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
491 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY);
492 	CU_ASSERT(qpair.ctrlr == NULL);
493 	ctrlr.num_qpairs = 0;
494 
495 	/* I/O connect with duplicate queue ID */
496 	memset(&rsp, 0, sizeof(rsp));
497 	memset(&qpair2, 0, sizeof(qpair2));
498 	qpair2.qid = 1;
499 	TAILQ_INSERT_TAIL(&ctrlr.qpairs, &qpair, link);
500 	cmd.connect_cmd.qid = 1;
501 	rc = spdk_nvmf_ctrlr_connect(&req);
502 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
503 	CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
504 	CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
505 	CU_ASSERT(qpair.ctrlr == NULL);
506 	TAILQ_INIT(&ctrlr.qpairs);
507 
508 	/* Clean up globals */
509 	MOCK_SET(spdk_nvmf_tgt_find_subsystem, struct spdk_nvmf_subsystem *, NULL);
510 	MOCK_SET(spdk_nvmf_poll_group_create, struct spdk_nvmf_poll_group *, NULL);
511 }
512 
513 int main(int argc, char **argv)
514 {
515 	CU_pSuite	suite = NULL;
516 	unsigned int	num_failures;
517 
518 	if (CU_initialize_registry() != CUE_SUCCESS) {
519 		return CU_get_error();
520 	}
521 
522 	suite = CU_add_suite("nvmf", NULL, NULL);
523 	if (suite == NULL) {
524 		CU_cleanup_registry();
525 		return CU_get_error();
526 	}
527 
528 	if (
529 		CU_add_test(suite, "get_log_page", test_get_log_page) == NULL ||
530 		CU_add_test(suite, "process_fabrics_cmd", test_process_fabrics_cmd) == NULL ||
531 		CU_add_test(suite, "connect", test_connect) == NULL
532 	) {
533 		CU_cleanup_registry();
534 		return CU_get_error();
535 	}
536 
537 	CU_basic_set_mode(CU_BRM_VERBOSE);
538 	CU_basic_run_tests();
539 	num_failures = CU_get_number_of_failures();
540 	CU_cleanup_registry();
541 	return num_failures;
542 }
543