1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 39 #include "nvmf/ctrlr.c" 40 41 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 42 43 struct spdk_bdev { 44 int ut_mock; 45 }; 46 47 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 48 struct spdk_nvmf_subsystem *, 49 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 50 NULL) 51 52 DEFINE_STUB(spdk_nvmf_poll_group_create, 53 struct spdk_nvmf_poll_group *, 54 (struct spdk_nvmf_tgt *tgt), 55 NULL) 56 57 DEFINE_STUB_V(spdk_nvmf_poll_group_destroy, 58 (struct spdk_nvmf_poll_group *group)) 59 60 DEFINE_STUB_V(spdk_nvmf_transport_qpair_fini, 61 (struct spdk_nvmf_qpair *qpair)) 62 63 DEFINE_STUB(spdk_nvmf_poll_group_add, 64 int, 65 (struct spdk_nvmf_poll_group *group, struct spdk_nvmf_qpair *qpair), 66 0) 67 68 DEFINE_STUB(spdk_nvmf_poll_group_remove, 69 int, 70 (struct spdk_nvmf_poll_group *group, struct spdk_nvmf_qpair *qpair), 71 0) 72 73 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 74 const char *, 75 (const struct spdk_nvmf_subsystem *subsystem), 76 NULL) 77 78 DEFINE_STUB(spdk_nvmf_subsystem_get_ns, 79 struct spdk_nvmf_ns *, 80 (struct spdk_nvmf_subsystem *subsystem, uint32_t nsid), 81 NULL) 82 83 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 84 struct spdk_nvmf_ns *, 85 (struct spdk_nvmf_subsystem *subsystem), 86 NULL) 87 88 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 89 struct spdk_nvmf_ns *, 90 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 91 NULL) 92 93 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 94 bool, 95 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 96 true) 97 98 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr, 99 int, 100 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 101 0) 102 103 DEFINE_STUB_V(spdk_nvmf_subsystem_remove_ctrlr, 104 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)) 105 106 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr, 107 struct spdk_nvmf_ctrlr *, 108 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 109 NULL) 110 111 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported, 112 bool, 113 (struct spdk_nvmf_ctrlr *ctrlr), 114 false) 115 116 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported, 117 bool, 118 (struct spdk_nvmf_ctrlr *ctrlr), 119 false) 120 121 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_identify_ns, 122 int, 123 (struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata), 124 -1) 125 126 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page, 127 (struct spdk_nvmf_tgt *tgt, void *buffer, uint64_t offset, uint32_t length)) 128 129 DEFINE_STUB(spdk_nvmf_request_complete, 130 int, 131 (struct spdk_nvmf_request *req), 132 -1) 133 134 DEFINE_STUB(spdk_nvmf_request_abort, 135 int, 136 (struct spdk_nvmf_request *req), 137 -1) 138 139 static void 140 test_get_log_page(void) 141 { 142 struct spdk_nvmf_subsystem subsystem = {}; 143 struct spdk_nvmf_request req = {}; 144 struct spdk_nvmf_qpair qpair = {}; 145 struct spdk_nvmf_ctrlr ctrlr = {}; 146 union nvmf_h2c_msg cmd = {}; 147 union nvmf_c2h_msg rsp = {}; 148 char data[4096]; 149 150 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 151 152 ctrlr.subsys = &subsystem; 153 154 qpair.ctrlr = &ctrlr; 155 156 req.qpair = &qpair; 157 req.cmd = &cmd; 158 req.rsp = &rsp; 159 req.data = &data; 160 req.length = sizeof(data); 161 162 /* Get Log Page - all valid */ 163 memset(&cmd, 0, sizeof(cmd)); 164 memset(&rsp, 0, sizeof(rsp)); 165 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 166 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 167 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 168 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 169 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 170 171 /* Get Log Page with invalid log ID */ 172 memset(&cmd, 0, sizeof(cmd)); 173 memset(&rsp, 0, sizeof(rsp)); 174 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 175 cmd.nvme_cmd.cdw10 = 0; 176 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 177 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 178 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 179 180 /* Get Log Page with invalid offset (not dword aligned) */ 181 memset(&cmd, 0, sizeof(cmd)); 182 memset(&rsp, 0, sizeof(rsp)); 183 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 184 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 185 cmd.nvme_cmd.cdw12 = 2; 186 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 187 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 188 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 189 190 /* Get Log Page without data buffer */ 191 memset(&cmd, 0, sizeof(cmd)); 192 memset(&rsp, 0, sizeof(rsp)); 193 req.data = NULL; 194 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 195 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 196 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 197 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 198 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 199 req.data = data; 200 } 201 202 static void 203 test_process_fabrics_cmd(void) 204 { 205 struct spdk_nvmf_request req = {}; 206 int ret; 207 struct spdk_nvmf_qpair req_qpair = {}; 208 union nvmf_h2c_msg req_cmd = {}; 209 union nvmf_c2h_msg req_rsp = {}; 210 211 req.qpair = &req_qpair; 212 req.cmd = &req_cmd; 213 req.rsp = &req_rsp; 214 req.qpair->ctrlr = NULL; 215 216 /* No ctrlr and invalid command check */ 217 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 218 ret = spdk_nvmf_ctrlr_process_fabrics_cmd(&req); 219 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 220 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 221 } 222 223 static bool 224 nvme_status_success(const struct spdk_nvme_status *status) 225 { 226 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 227 } 228 229 void 230 spdk_thread_send_msg(const struct spdk_thread *thread, spdk_thread_fn fn, void *ctx) 231 { 232 fn(ctx); 233 } 234 235 static void 236 test_connect(void) 237 { 238 struct spdk_nvmf_fabric_connect_data connect_data; 239 struct spdk_nvmf_poll_group group; 240 struct spdk_nvmf_transport transport; 241 struct spdk_nvmf_subsystem subsystem; 242 struct spdk_nvmf_request req; 243 struct spdk_nvmf_qpair admin_qpair; 244 struct spdk_nvmf_qpair qpair; 245 struct spdk_nvmf_qpair qpair2; 246 struct spdk_nvmf_ctrlr ctrlr; 247 struct spdk_nvmf_tgt tgt; 248 union nvmf_h2c_msg cmd; 249 union nvmf_c2h_msg rsp; 250 const uint8_t hostid[16] = { 251 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 252 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 253 }; 254 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 255 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 256 int rc; 257 258 memset(&group, 0, sizeof(group)); 259 260 memset(&ctrlr, 0, sizeof(ctrlr)); 261 TAILQ_INIT(&ctrlr.qpairs); 262 ctrlr.subsys = &subsystem; 263 ctrlr.vcprop.cc.bits.en = 1; 264 ctrlr.vcprop.cc.bits.iosqes = 6; 265 ctrlr.vcprop.cc.bits.iocqes = 4; 266 ctrlr.max_qpairs_allowed = 3; 267 268 memset(&admin_qpair, 0, sizeof(admin_qpair)); 269 TAILQ_INSERT_TAIL(&ctrlr.qpairs, &admin_qpair, link); 270 admin_qpair.group = &group; 271 272 memset(&tgt, 0, sizeof(tgt)); 273 tgt.opts.max_queue_depth = 64; 274 tgt.opts.max_qpairs_per_ctrlr = 3; 275 276 memset(&transport, 0, sizeof(transport)); 277 transport.tgt = &tgt; 278 279 memset(&qpair, 0, sizeof(qpair)); 280 qpair.transport = &transport; 281 qpair.group = &group; 282 283 memset(&connect_data, 0, sizeof(connect_data)); 284 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 285 connect_data.cntlid = 0xFFFF; 286 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 287 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 288 289 memset(&subsystem, 0, sizeof(subsystem)); 290 subsystem.id = 1; 291 TAILQ_INIT(&subsystem.ctrlrs); 292 subsystem.tgt = &tgt; 293 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 294 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 295 296 memset(&cmd, 0, sizeof(cmd)); 297 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 298 cmd.connect_cmd.cid = 1; 299 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 300 cmd.connect_cmd.recfmt = 0; 301 cmd.connect_cmd.qid = 0; 302 cmd.connect_cmd.sqsize = 31; 303 cmd.connect_cmd.cattr = 0; 304 cmd.connect_cmd.kato = 120000; 305 306 memset(&req, 0, sizeof(req)); 307 req.qpair = &qpair; 308 req.length = sizeof(connect_data); 309 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 310 req.data = &connect_data; 311 req.cmd = &cmd; 312 req.rsp = &rsp; 313 314 MOCK_SET(spdk_nvmf_tgt_find_subsystem, struct spdk_nvmf_subsystem *, &subsystem); 315 MOCK_SET(spdk_nvmf_poll_group_create, struct spdk_nvmf_poll_group *, &group); 316 317 /* Valid admin connect command */ 318 memset(&rsp, 0, sizeof(rsp)); 319 rc = spdk_nvmf_ctrlr_connect(&req); 320 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 321 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 322 CU_ASSERT(qpair.ctrlr != NULL); 323 free(qpair.ctrlr); 324 qpair.ctrlr = NULL; 325 326 /* Invalid data length */ 327 memset(&rsp, 0, sizeof(rsp)); 328 req.length = sizeof(connect_data) - 1; 329 rc = spdk_nvmf_ctrlr_connect(&req); 330 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 331 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 332 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 333 CU_ASSERT(qpair.ctrlr == NULL); 334 req.length = sizeof(connect_data); 335 336 /* Invalid recfmt */ 337 memset(&rsp, 0, sizeof(rsp)); 338 cmd.connect_cmd.recfmt = 1234; 339 rc = spdk_nvmf_ctrlr_connect(&req); 340 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 341 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 342 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 343 CU_ASSERT(qpair.ctrlr == NULL); 344 cmd.connect_cmd.recfmt = 0; 345 346 /* Unterminated subnqn */ 347 memset(&rsp, 0, sizeof(rsp)); 348 memset(connect_data.subnqn, 'a', sizeof(connect_data.subnqn)); 349 rc = spdk_nvmf_ctrlr_connect(&req); 350 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 351 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 352 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 353 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 354 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 355 CU_ASSERT(qpair.ctrlr == NULL); 356 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 357 358 /* Subsystem not found */ 359 memset(&rsp, 0, sizeof(rsp)); 360 MOCK_SET(spdk_nvmf_tgt_find_subsystem, struct spdk_nvmf_subsystem *, NULL); 361 rc = spdk_nvmf_ctrlr_connect(&req); 362 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 363 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 364 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 365 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 366 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 367 CU_ASSERT(qpair.ctrlr == NULL); 368 MOCK_SET(spdk_nvmf_tgt_find_subsystem, struct spdk_nvmf_subsystem *, &subsystem); 369 370 /* Unterminated hostnqn */ 371 memset(&rsp, 0, sizeof(rsp)); 372 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 373 rc = spdk_nvmf_ctrlr_connect(&req); 374 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 375 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 376 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 377 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 378 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 379 CU_ASSERT(qpair.ctrlr == NULL); 380 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 381 382 /* Host not allowed */ 383 memset(&rsp, 0, sizeof(rsp)); 384 MOCK_SET(spdk_nvmf_subsystem_host_allowed, bool, false); 385 rc = spdk_nvmf_ctrlr_connect(&req); 386 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 387 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 388 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 389 CU_ASSERT(qpair.ctrlr == NULL); 390 MOCK_SET(spdk_nvmf_subsystem_host_allowed, bool, true); 391 392 /* Invalid sqsize == 0 */ 393 memset(&rsp, 0, sizeof(rsp)); 394 cmd.connect_cmd.sqsize = 0; 395 rc = spdk_nvmf_ctrlr_connect(&req); 396 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 397 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 398 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 399 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 400 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 401 CU_ASSERT(qpair.ctrlr == NULL); 402 cmd.connect_cmd.sqsize = 31; 403 404 /* Invalid sqsize > max_queue_depth */ 405 memset(&rsp, 0, sizeof(rsp)); 406 cmd.connect_cmd.sqsize = 64; 407 rc = spdk_nvmf_ctrlr_connect(&req); 408 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 409 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 410 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 411 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 412 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 413 CU_ASSERT(qpair.ctrlr == NULL); 414 cmd.connect_cmd.sqsize = 31; 415 416 /* Invalid cntlid for admin queue */ 417 memset(&rsp, 0, sizeof(rsp)); 418 connect_data.cntlid = 0x1234; 419 rc = spdk_nvmf_ctrlr_connect(&req); 420 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 421 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 422 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 423 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 424 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 425 CU_ASSERT(qpair.ctrlr == NULL); 426 connect_data.cntlid = 0xFFFF; 427 428 ctrlr.admin_qpair = &admin_qpair; 429 ctrlr.subsys = &subsystem; 430 431 /* Valid I/O queue connect command */ 432 memset(&rsp, 0, sizeof(rsp)); 433 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *, &ctrlr); 434 cmd.connect_cmd.qid = 1; 435 rc = spdk_nvmf_ctrlr_connect(&req); 436 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 437 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 438 CU_ASSERT(qpair.ctrlr == &ctrlr); 439 qpair.ctrlr = NULL; 440 ctrlr.num_qpairs = 0; 441 TAILQ_INIT(&ctrlr.qpairs); 442 443 /* Non-existent controller */ 444 memset(&rsp, 0, sizeof(rsp)); 445 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *, NULL); 446 rc = spdk_nvmf_ctrlr_connect(&req); 447 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 448 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 449 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 450 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 451 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 452 CU_ASSERT(qpair.ctrlr == NULL); 453 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *, &ctrlr); 454 455 /* I/O connect to discovery controller */ 456 memset(&rsp, 0, sizeof(rsp)); 457 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 458 rc = spdk_nvmf_ctrlr_connect(&req); 459 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 460 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 461 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 462 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 463 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 464 CU_ASSERT(qpair.ctrlr == NULL); 465 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 466 467 /* I/O connect to disabled controller */ 468 memset(&rsp, 0, sizeof(rsp)); 469 ctrlr.vcprop.cc.bits.en = 0; 470 rc = spdk_nvmf_ctrlr_connect(&req); 471 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 472 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 473 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 474 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 475 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 476 CU_ASSERT(qpair.ctrlr == NULL); 477 ctrlr.vcprop.cc.bits.en = 1; 478 479 /* I/O connect with invalid IOSQES */ 480 memset(&rsp, 0, sizeof(rsp)); 481 ctrlr.vcprop.cc.bits.iosqes = 3; 482 rc = spdk_nvmf_ctrlr_connect(&req); 483 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 484 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 485 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 486 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 487 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 488 CU_ASSERT(qpair.ctrlr == NULL); 489 ctrlr.vcprop.cc.bits.iosqes = 6; 490 491 /* I/O connect with invalid IOCQES */ 492 memset(&rsp, 0, sizeof(rsp)); 493 ctrlr.vcprop.cc.bits.iocqes = 3; 494 rc = spdk_nvmf_ctrlr_connect(&req); 495 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 496 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 497 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 498 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 499 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 500 CU_ASSERT(qpair.ctrlr == NULL); 501 ctrlr.vcprop.cc.bits.iocqes = 4; 502 503 /* I/O connect with too many existing qpairs */ 504 memset(&rsp, 0, sizeof(rsp)); 505 ctrlr.num_qpairs = 3; 506 rc = spdk_nvmf_ctrlr_connect(&req); 507 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 508 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 509 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY); 510 CU_ASSERT(qpair.ctrlr == NULL); 511 ctrlr.num_qpairs = 0; 512 513 /* I/O connect with duplicate queue ID */ 514 memset(&rsp, 0, sizeof(rsp)); 515 memset(&qpair2, 0, sizeof(qpair2)); 516 qpair2.group = &group; 517 qpair2.qid = 1; 518 TAILQ_INSERT_TAIL(&ctrlr.qpairs, &qpair, link); 519 cmd.connect_cmd.qid = 1; 520 rc = spdk_nvmf_ctrlr_connect(&req); 521 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 522 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 523 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 524 CU_ASSERT(qpair.ctrlr == NULL); 525 TAILQ_INIT(&ctrlr.qpairs); 526 527 /* Clean up globals */ 528 MOCK_SET(spdk_nvmf_tgt_find_subsystem, struct spdk_nvmf_subsystem *, NULL); 529 MOCK_SET(spdk_nvmf_poll_group_create, struct spdk_nvmf_poll_group *, NULL); 530 } 531 532 static void 533 test_get_ns_id_desc_list(void) 534 { 535 struct spdk_nvmf_subsystem subsystem; 536 struct spdk_nvmf_qpair qpair; 537 struct spdk_nvmf_ctrlr ctrlr; 538 struct spdk_nvmf_request req; 539 struct spdk_nvmf_ns *ns_ptrs[1]; 540 struct spdk_nvmf_ns ns; 541 union nvmf_h2c_msg cmd; 542 union nvmf_c2h_msg rsp; 543 struct spdk_bdev bdev; 544 uint8_t buf[4096]; 545 546 memset(&subsystem, 0, sizeof(subsystem)); 547 ns_ptrs[0] = &ns; 548 subsystem.ns = ns_ptrs; 549 subsystem.max_nsid = 1; 550 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 551 552 memset(&ns, 0, sizeof(ns)); 553 ns.opts.nsid = 1; 554 ns.bdev = &bdev; 555 556 memset(&qpair, 0, sizeof(qpair)); 557 qpair.ctrlr = &ctrlr; 558 559 memset(&ctrlr, 0, sizeof(ctrlr)); 560 ctrlr.subsys = &subsystem; 561 ctrlr.vcprop.cc.bits.en = 1; 562 563 memset(&req, 0, sizeof(req)); 564 req.qpair = &qpair; 565 req.cmd = &cmd; 566 req.rsp = &rsp; 567 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 568 req.data = buf; 569 req.length = sizeof(buf); 570 571 memset(&cmd, 0, sizeof(cmd)); 572 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 573 cmd.nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 574 575 /* Invalid NSID */ 576 cmd.nvme_cmd.nsid = 0; 577 memset(&rsp, 0, sizeof(rsp)); 578 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 579 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 580 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 581 582 /* Valid NSID, but ns has no IDs defined */ 583 cmd.nvme_cmd.nsid = 1; 584 memset(&rsp, 0, sizeof(rsp)); 585 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 586 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 587 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 588 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 589 590 /* Valid NSID, only EUI64 defined */ 591 ns.opts.eui64[0] = 0x11; 592 ns.opts.eui64[7] = 0xFF; 593 memset(&rsp, 0, sizeof(rsp)); 594 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 595 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 596 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 597 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 598 CU_ASSERT(buf[1] == 8); 599 CU_ASSERT(buf[4] == 0x11); 600 CU_ASSERT(buf[11] == 0xFF); 601 CU_ASSERT(buf[13] == 0); 602 603 /* Valid NSID, only NGUID defined */ 604 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 605 ns.opts.nguid[0] = 0x22; 606 ns.opts.nguid[15] = 0xEE; 607 memset(&rsp, 0, sizeof(rsp)); 608 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 609 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 610 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 611 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 612 CU_ASSERT(buf[1] == 16); 613 CU_ASSERT(buf[4] == 0x22); 614 CU_ASSERT(buf[19] == 0xEE); 615 CU_ASSERT(buf[21] == 0); 616 617 /* Valid NSID, both EUI64 and NGUID defined */ 618 ns.opts.eui64[0] = 0x11; 619 ns.opts.eui64[7] = 0xFF; 620 ns.opts.nguid[0] = 0x22; 621 ns.opts.nguid[15] = 0xEE; 622 memset(&rsp, 0, sizeof(rsp)); 623 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 624 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 625 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 626 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 627 CU_ASSERT(buf[1] == 8); 628 CU_ASSERT(buf[4] == 0x11); 629 CU_ASSERT(buf[11] == 0xFF); 630 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 631 CU_ASSERT(buf[13] == 16); 632 CU_ASSERT(buf[16] == 0x22); 633 CU_ASSERT(buf[31] == 0xEE); 634 CU_ASSERT(buf[33] == 0); 635 636 /* Valid NSID, EUI64, NGUID, and UUID defined */ 637 ns.opts.eui64[0] = 0x11; 638 ns.opts.eui64[7] = 0xFF; 639 ns.opts.nguid[0] = 0x22; 640 ns.opts.nguid[15] = 0xEE; 641 ns.opts.uuid.u.raw[0] = 0x33; 642 ns.opts.uuid.u.raw[15] = 0xDD; 643 memset(&rsp, 0, sizeof(rsp)); 644 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 645 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 646 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 647 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 648 CU_ASSERT(buf[1] == 8); 649 CU_ASSERT(buf[4] == 0x11); 650 CU_ASSERT(buf[11] == 0xFF); 651 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 652 CU_ASSERT(buf[13] == 16); 653 CU_ASSERT(buf[16] == 0x22); 654 CU_ASSERT(buf[31] == 0xEE); 655 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 656 CU_ASSERT(buf[33] == 16); 657 CU_ASSERT(buf[36] == 0x33); 658 CU_ASSERT(buf[51] == 0xDD); 659 CU_ASSERT(buf[53] == 0); 660 } 661 662 int main(int argc, char **argv) 663 { 664 CU_pSuite suite = NULL; 665 unsigned int num_failures; 666 667 if (CU_initialize_registry() != CUE_SUCCESS) { 668 return CU_get_error(); 669 } 670 671 suite = CU_add_suite("nvmf", NULL, NULL); 672 if (suite == NULL) { 673 CU_cleanup_registry(); 674 return CU_get_error(); 675 } 676 677 if ( 678 CU_add_test(suite, "get_log_page", test_get_log_page) == NULL || 679 CU_add_test(suite, "process_fabrics_cmd", test_process_fabrics_cmd) == NULL || 680 CU_add_test(suite, "connect", test_connect) == NULL || 681 CU_add_test(suite, "get_ns_id_desc_list", test_get_ns_id_desc_list) == NULL 682 ) { 683 CU_cleanup_registry(); 684 return CU_get_error(); 685 } 686 687 CU_basic_set_mode(CU_BRM_VERBOSE); 688 CU_basic_run_tests(); 689 num_failures = CU_get_number_of_failures(); 690 CU_cleanup_registry(); 691 return num_failures; 692 } 693