1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 51 struct spdk_nvmf_subsystem *, 52 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 53 NULL); 54 55 DEFINE_STUB(spdk_nvmf_poll_group_create, 56 struct spdk_nvmf_poll_group *, 57 (struct spdk_nvmf_tgt *tgt), 58 NULL); 59 60 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 61 const char *, 62 (const struct spdk_nvmf_subsystem *subsystem), 63 NULL); 64 65 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 66 struct spdk_nvmf_ns *, 67 (struct spdk_nvmf_subsystem *subsystem), 68 NULL); 69 70 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 71 struct spdk_nvmf_ns *, 72 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 73 NULL); 74 75 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 76 bool, 77 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 78 true); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr, 81 int, 82 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 83 0); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr, 86 struct spdk_nvmf_ctrlr *, 87 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 88 NULL); 89 90 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported, 91 bool, 92 (struct spdk_nvmf_ctrlr *ctrlr), 93 false); 94 95 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported, 96 bool, 97 (struct spdk_nvmf_ctrlr *ctrlr), 98 false); 99 100 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page, 101 (struct spdk_nvmf_tgt *tgt, struct iovec *iov, uint32_t iovcnt, uint64_t offset, uint32_t length)); 102 103 DEFINE_STUB(spdk_nvmf_request_complete, 104 int, 105 (struct spdk_nvmf_request *req), 106 -1); 107 108 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 109 int, 110 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 111 0); 112 113 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 114 bool, 115 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvme_transport_id *trid), 116 true); 117 118 DEFINE_STUB(spdk_nvmf_transport_qpair_set_sqsize, 119 int, 120 (struct spdk_nvmf_qpair *qpair), 121 0); 122 123 int 124 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 125 { 126 return 0; 127 } 128 129 void 130 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata) 131 { 132 uint64_t num_blocks; 133 134 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 135 num_blocks = ns->bdev->blockcnt; 136 nsdata->nsze = num_blocks; 137 nsdata->ncap = num_blocks; 138 nsdata->nuse = num_blocks; 139 nsdata->nlbaf = 0; 140 nsdata->flbas.format = 0; 141 nsdata->lbaf[0].lbads = spdk_u32log2(512); 142 } 143 144 static void 145 test_get_log_page(void) 146 { 147 struct spdk_nvmf_subsystem subsystem = {}; 148 struct spdk_nvmf_request req = {}; 149 struct spdk_nvmf_qpair qpair = {}; 150 struct spdk_nvmf_ctrlr ctrlr = {}; 151 union nvmf_h2c_msg cmd = {}; 152 union nvmf_c2h_msg rsp = {}; 153 char data[4096]; 154 155 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 156 157 ctrlr.subsys = &subsystem; 158 159 qpair.ctrlr = &ctrlr; 160 161 req.qpair = &qpair; 162 req.cmd = &cmd; 163 req.rsp = &rsp; 164 req.data = &data; 165 req.length = sizeof(data); 166 167 /* Get Log Page - all valid */ 168 memset(&cmd, 0, sizeof(cmd)); 169 memset(&rsp, 0, sizeof(rsp)); 170 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 171 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 172 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 173 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 174 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 175 176 /* Get Log Page with invalid log ID */ 177 memset(&cmd, 0, sizeof(cmd)); 178 memset(&rsp, 0, sizeof(rsp)); 179 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 180 cmd.nvme_cmd.cdw10 = 0; 181 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 182 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 183 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 184 185 /* Get Log Page with invalid offset (not dword aligned) */ 186 memset(&cmd, 0, sizeof(cmd)); 187 memset(&rsp, 0, sizeof(rsp)); 188 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 189 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 190 cmd.nvme_cmd.cdw12 = 2; 191 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 192 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 193 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 194 195 /* Get Log Page without data buffer */ 196 memset(&cmd, 0, sizeof(cmd)); 197 memset(&rsp, 0, sizeof(rsp)); 198 req.data = NULL; 199 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 200 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 201 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 202 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 203 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 204 req.data = data; 205 } 206 207 static void 208 test_process_fabrics_cmd(void) 209 { 210 struct spdk_nvmf_request req = {}; 211 int ret; 212 struct spdk_nvmf_qpair req_qpair = {}; 213 union nvmf_h2c_msg req_cmd = {}; 214 union nvmf_c2h_msg req_rsp = {}; 215 216 req.qpair = &req_qpair; 217 req.cmd = &req_cmd; 218 req.rsp = &req_rsp; 219 req.qpair->ctrlr = NULL; 220 221 /* No ctrlr and invalid command check */ 222 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 223 ret = spdk_nvmf_ctrlr_process_fabrics_cmd(&req); 224 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 225 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 226 } 227 228 static bool 229 nvme_status_success(const struct spdk_nvme_status *status) 230 { 231 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 232 } 233 234 static void 235 test_connect(void) 236 { 237 struct spdk_nvmf_fabric_connect_data connect_data; 238 struct spdk_nvmf_poll_group group; 239 struct spdk_nvmf_transport transport; 240 struct spdk_nvmf_subsystem subsystem; 241 struct spdk_nvmf_request req; 242 struct spdk_nvmf_qpair admin_qpair; 243 struct spdk_nvmf_qpair qpair; 244 struct spdk_nvmf_qpair qpair2; 245 struct spdk_nvmf_ctrlr ctrlr; 246 struct spdk_nvmf_tgt tgt; 247 union nvmf_h2c_msg cmd; 248 union nvmf_c2h_msg rsp; 249 const uint8_t hostid[16] = { 250 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 251 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 252 }; 253 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 254 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 255 int rc; 256 257 memset(&group, 0, sizeof(group)); 258 group.thread = spdk_get_thread(); 259 260 memset(&ctrlr, 0, sizeof(ctrlr)); 261 ctrlr.subsys = &subsystem; 262 ctrlr.qpair_mask = spdk_bit_array_create(3); 263 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 264 ctrlr.vcprop.cc.bits.en = 1; 265 ctrlr.vcprop.cc.bits.iosqes = 6; 266 ctrlr.vcprop.cc.bits.iocqes = 4; 267 268 memset(&admin_qpair, 0, sizeof(admin_qpair)); 269 admin_qpair.group = &group; 270 271 memset(&tgt, 0, sizeof(tgt)); 272 memset(&transport, 0, sizeof(transport)); 273 transport.opts.max_queue_depth = 64; 274 transport.opts.max_qpairs_per_ctrlr = 3; 275 transport.tgt = &tgt; 276 277 memset(&qpair, 0, sizeof(qpair)); 278 qpair.transport = &transport; 279 qpair.group = &group; 280 281 memset(&connect_data, 0, sizeof(connect_data)); 282 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 283 connect_data.cntlid = 0xFFFF; 284 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 285 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 286 287 memset(&subsystem, 0, sizeof(subsystem)); 288 subsystem.thread = spdk_get_thread(); 289 subsystem.id = 1; 290 TAILQ_INIT(&subsystem.ctrlrs); 291 subsystem.tgt = &tgt; 292 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 293 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 294 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 295 296 memset(&cmd, 0, sizeof(cmd)); 297 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 298 cmd.connect_cmd.cid = 1; 299 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 300 cmd.connect_cmd.recfmt = 0; 301 cmd.connect_cmd.qid = 0; 302 cmd.connect_cmd.sqsize = 31; 303 cmd.connect_cmd.cattr = 0; 304 cmd.connect_cmd.kato = 120000; 305 306 memset(&req, 0, sizeof(req)); 307 req.qpair = &qpair; 308 req.length = sizeof(connect_data); 309 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 310 req.data = &connect_data; 311 req.cmd = &cmd; 312 req.rsp = &rsp; 313 314 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 315 MOCK_SET(spdk_nvmf_poll_group_create, &group); 316 317 /* Valid admin connect command */ 318 memset(&rsp, 0, sizeof(rsp)); 319 rc = spdk_nvmf_ctrlr_connect(&req); 320 poll_threads(); 321 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 322 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 323 CU_ASSERT(qpair.ctrlr != NULL); 324 spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 325 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 326 free(qpair.ctrlr); 327 qpair.ctrlr = NULL; 328 329 /* Invalid data length */ 330 memset(&rsp, 0, sizeof(rsp)); 331 req.length = sizeof(connect_data) - 1; 332 rc = spdk_nvmf_ctrlr_connect(&req); 333 poll_threads(); 334 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 335 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 336 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 337 CU_ASSERT(qpair.ctrlr == NULL); 338 req.length = sizeof(connect_data); 339 340 /* Invalid recfmt */ 341 memset(&rsp, 0, sizeof(rsp)); 342 cmd.connect_cmd.recfmt = 1234; 343 rc = spdk_nvmf_ctrlr_connect(&req); 344 poll_threads(); 345 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 346 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 347 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 348 CU_ASSERT(qpair.ctrlr == NULL); 349 cmd.connect_cmd.recfmt = 0; 350 351 /* Unterminated subnqn */ 352 memset(&rsp, 0, sizeof(rsp)); 353 memset(connect_data.subnqn, 'a', sizeof(connect_data.subnqn)); 354 rc = spdk_nvmf_ctrlr_connect(&req); 355 poll_threads(); 356 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 357 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 358 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 359 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 360 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 361 CU_ASSERT(qpair.ctrlr == NULL); 362 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 363 364 /* Subsystem not found */ 365 memset(&rsp, 0, sizeof(rsp)); 366 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 367 rc = spdk_nvmf_ctrlr_connect(&req); 368 poll_threads(); 369 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 370 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 371 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 372 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 373 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 374 CU_ASSERT(qpair.ctrlr == NULL); 375 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 376 377 /* Unterminated hostnqn */ 378 memset(&rsp, 0, sizeof(rsp)); 379 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 380 rc = spdk_nvmf_ctrlr_connect(&req); 381 poll_threads(); 382 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 383 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 384 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 385 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 386 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 387 CU_ASSERT(qpair.ctrlr == NULL); 388 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 389 390 /* Host not allowed */ 391 memset(&rsp, 0, sizeof(rsp)); 392 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 393 rc = spdk_nvmf_ctrlr_connect(&req); 394 poll_threads(); 395 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 396 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 397 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 398 CU_ASSERT(qpair.ctrlr == NULL); 399 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 400 401 /* Invalid sqsize == 0 */ 402 memset(&rsp, 0, sizeof(rsp)); 403 cmd.connect_cmd.sqsize = 0; 404 rc = spdk_nvmf_ctrlr_connect(&req); 405 poll_threads(); 406 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 407 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 408 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 409 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 410 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 411 CU_ASSERT(qpair.ctrlr == NULL); 412 cmd.connect_cmd.sqsize = 31; 413 414 /* Invalid sqsize > max_queue_depth */ 415 memset(&rsp, 0, sizeof(rsp)); 416 cmd.connect_cmd.sqsize = 64; 417 rc = spdk_nvmf_ctrlr_connect(&req); 418 poll_threads(); 419 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 420 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 421 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 422 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 423 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 424 CU_ASSERT(qpair.ctrlr == NULL); 425 cmd.connect_cmd.sqsize = 31; 426 427 /* Invalid cntlid for admin queue */ 428 memset(&rsp, 0, sizeof(rsp)); 429 connect_data.cntlid = 0x1234; 430 rc = spdk_nvmf_ctrlr_connect(&req); 431 poll_threads(); 432 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 433 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 434 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 435 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 436 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 437 CU_ASSERT(qpair.ctrlr == NULL); 438 connect_data.cntlid = 0xFFFF; 439 440 ctrlr.admin_qpair = &admin_qpair; 441 ctrlr.subsys = &subsystem; 442 443 /* Valid I/O queue connect command */ 444 memset(&rsp, 0, sizeof(rsp)); 445 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 446 cmd.connect_cmd.qid = 1; 447 rc = spdk_nvmf_ctrlr_connect(&req); 448 poll_threads(); 449 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 450 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 451 CU_ASSERT(qpair.ctrlr == &ctrlr); 452 qpair.ctrlr = NULL; 453 454 /* Non-existent controller */ 455 memset(&rsp, 0, sizeof(rsp)); 456 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, NULL); 457 rc = spdk_nvmf_ctrlr_connect(&req); 458 poll_threads(); 459 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 460 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 461 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 462 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 463 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 464 CU_ASSERT(qpair.ctrlr == NULL); 465 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 466 467 /* I/O connect to discovery controller */ 468 memset(&rsp, 0, sizeof(rsp)); 469 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 470 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 471 rc = spdk_nvmf_ctrlr_connect(&req); 472 poll_threads(); 473 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 474 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 475 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 476 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 477 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 478 CU_ASSERT(qpair.ctrlr == NULL); 479 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 480 481 /* I/O connect to disabled controller */ 482 memset(&rsp, 0, sizeof(rsp)); 483 ctrlr.vcprop.cc.bits.en = 0; 484 rc = spdk_nvmf_ctrlr_connect(&req); 485 poll_threads(); 486 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 487 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 488 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 489 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 490 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 491 CU_ASSERT(qpair.ctrlr == NULL); 492 ctrlr.vcprop.cc.bits.en = 1; 493 494 /* I/O connect with invalid IOSQES */ 495 memset(&rsp, 0, sizeof(rsp)); 496 ctrlr.vcprop.cc.bits.iosqes = 3; 497 rc = spdk_nvmf_ctrlr_connect(&req); 498 poll_threads(); 499 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 500 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 501 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 502 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 503 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 504 CU_ASSERT(qpair.ctrlr == NULL); 505 ctrlr.vcprop.cc.bits.iosqes = 6; 506 507 /* I/O connect with invalid IOCQES */ 508 memset(&rsp, 0, sizeof(rsp)); 509 ctrlr.vcprop.cc.bits.iocqes = 3; 510 rc = spdk_nvmf_ctrlr_connect(&req); 511 poll_threads(); 512 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 513 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 514 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 515 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 516 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 517 CU_ASSERT(qpair.ctrlr == NULL); 518 ctrlr.vcprop.cc.bits.iocqes = 4; 519 520 /* I/O connect with too many existing qpairs */ 521 memset(&rsp, 0, sizeof(rsp)); 522 spdk_bit_array_set(ctrlr.qpair_mask, 0); 523 spdk_bit_array_set(ctrlr.qpair_mask, 1); 524 spdk_bit_array_set(ctrlr.qpair_mask, 2); 525 rc = spdk_nvmf_ctrlr_connect(&req); 526 poll_threads(); 527 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 528 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 529 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 530 CU_ASSERT(qpair.ctrlr == NULL); 531 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 532 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 533 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 534 535 /* I/O connect with duplicate queue ID */ 536 memset(&rsp, 0, sizeof(rsp)); 537 memset(&qpair2, 0, sizeof(qpair2)); 538 qpair2.group = &group; 539 qpair2.qid = 1; 540 spdk_bit_array_set(ctrlr.qpair_mask, 1); 541 cmd.connect_cmd.qid = 1; 542 rc = spdk_nvmf_ctrlr_connect(&req); 543 poll_threads(); 544 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 545 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 546 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 547 CU_ASSERT(qpair.ctrlr == NULL); 548 549 /* Clean up globals */ 550 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 551 MOCK_CLEAR(spdk_nvmf_poll_group_create); 552 553 spdk_bit_array_free(&ctrlr.qpair_mask); 554 } 555 556 static void 557 test_get_ns_id_desc_list(void) 558 { 559 struct spdk_nvmf_subsystem subsystem; 560 struct spdk_nvmf_qpair qpair; 561 struct spdk_nvmf_ctrlr ctrlr; 562 struct spdk_nvmf_request req; 563 struct spdk_nvmf_ns *ns_ptrs[1]; 564 struct spdk_nvmf_ns ns; 565 union nvmf_h2c_msg cmd; 566 union nvmf_c2h_msg rsp; 567 struct spdk_bdev bdev; 568 uint8_t buf[4096]; 569 570 memset(&subsystem, 0, sizeof(subsystem)); 571 ns_ptrs[0] = &ns; 572 subsystem.ns = ns_ptrs; 573 subsystem.max_nsid = 1; 574 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 575 576 memset(&ns, 0, sizeof(ns)); 577 ns.opts.nsid = 1; 578 ns.bdev = &bdev; 579 580 memset(&qpair, 0, sizeof(qpair)); 581 qpair.ctrlr = &ctrlr; 582 583 memset(&ctrlr, 0, sizeof(ctrlr)); 584 ctrlr.subsys = &subsystem; 585 ctrlr.vcprop.cc.bits.en = 1; 586 587 memset(&req, 0, sizeof(req)); 588 req.qpair = &qpair; 589 req.cmd = &cmd; 590 req.rsp = &rsp; 591 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 592 req.data = buf; 593 req.length = sizeof(buf); 594 595 memset(&cmd, 0, sizeof(cmd)); 596 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 597 cmd.nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 598 599 /* Invalid NSID */ 600 cmd.nvme_cmd.nsid = 0; 601 memset(&rsp, 0, sizeof(rsp)); 602 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 603 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 604 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 605 606 /* Valid NSID, but ns has no IDs defined */ 607 cmd.nvme_cmd.nsid = 1; 608 memset(&rsp, 0, sizeof(rsp)); 609 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 610 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 611 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 612 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 613 614 /* Valid NSID, only EUI64 defined */ 615 ns.opts.eui64[0] = 0x11; 616 ns.opts.eui64[7] = 0xFF; 617 memset(&rsp, 0, sizeof(rsp)); 618 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 619 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 620 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 621 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 622 CU_ASSERT(buf[1] == 8); 623 CU_ASSERT(buf[4] == 0x11); 624 CU_ASSERT(buf[11] == 0xFF); 625 CU_ASSERT(buf[13] == 0); 626 627 /* Valid NSID, only NGUID defined */ 628 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 629 ns.opts.nguid[0] = 0x22; 630 ns.opts.nguid[15] = 0xEE; 631 memset(&rsp, 0, sizeof(rsp)); 632 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 633 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 634 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 635 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 636 CU_ASSERT(buf[1] == 16); 637 CU_ASSERT(buf[4] == 0x22); 638 CU_ASSERT(buf[19] == 0xEE); 639 CU_ASSERT(buf[21] == 0); 640 641 /* Valid NSID, both EUI64 and NGUID defined */ 642 ns.opts.eui64[0] = 0x11; 643 ns.opts.eui64[7] = 0xFF; 644 ns.opts.nguid[0] = 0x22; 645 ns.opts.nguid[15] = 0xEE; 646 memset(&rsp, 0, sizeof(rsp)); 647 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 648 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 649 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 650 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 651 CU_ASSERT(buf[1] == 8); 652 CU_ASSERT(buf[4] == 0x11); 653 CU_ASSERT(buf[11] == 0xFF); 654 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 655 CU_ASSERT(buf[13] == 16); 656 CU_ASSERT(buf[16] == 0x22); 657 CU_ASSERT(buf[31] == 0xEE); 658 CU_ASSERT(buf[33] == 0); 659 660 /* Valid NSID, EUI64, NGUID, and UUID defined */ 661 ns.opts.eui64[0] = 0x11; 662 ns.opts.eui64[7] = 0xFF; 663 ns.opts.nguid[0] = 0x22; 664 ns.opts.nguid[15] = 0xEE; 665 ns.opts.uuid.u.raw[0] = 0x33; 666 ns.opts.uuid.u.raw[15] = 0xDD; 667 memset(&rsp, 0, sizeof(rsp)); 668 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 669 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 670 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 671 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 672 CU_ASSERT(buf[1] == 8); 673 CU_ASSERT(buf[4] == 0x11); 674 CU_ASSERT(buf[11] == 0xFF); 675 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 676 CU_ASSERT(buf[13] == 16); 677 CU_ASSERT(buf[16] == 0x22); 678 CU_ASSERT(buf[31] == 0xEE); 679 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 680 CU_ASSERT(buf[33] == 16); 681 CU_ASSERT(buf[36] == 0x33); 682 CU_ASSERT(buf[51] == 0xDD); 683 CU_ASSERT(buf[53] == 0); 684 } 685 686 static void 687 test_identify_ns(void) 688 { 689 struct spdk_nvmf_subsystem subsystem = {}; 690 struct spdk_nvmf_transport transport = {}; 691 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 692 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 693 struct spdk_nvme_cmd cmd = {}; 694 struct spdk_nvme_cpl rsp = {}; 695 struct spdk_nvme_ns_data nsdata = {}; 696 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 697 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 698 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 699 700 subsystem.ns = ns_arr; 701 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 702 703 /* Invalid NSID 0 */ 704 cmd.nsid = 0; 705 memset(&nsdata, 0, sizeof(nsdata)); 706 memset(&rsp, 0, sizeof(rsp)); 707 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 708 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 709 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 710 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 711 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 712 713 /* Valid NSID 1 */ 714 cmd.nsid = 1; 715 memset(&nsdata, 0, sizeof(nsdata)); 716 memset(&rsp, 0, sizeof(rsp)); 717 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 718 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 719 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 720 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 721 CU_ASSERT(nsdata.nsze == 1234); 722 723 /* Valid but inactive NSID 2 */ 724 cmd.nsid = 2; 725 memset(&nsdata, 0, sizeof(nsdata)); 726 memset(&rsp, 0, sizeof(rsp)); 727 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 728 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 729 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 730 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 731 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 732 733 /* Valid NSID 3 */ 734 cmd.nsid = 3; 735 memset(&nsdata, 0, sizeof(nsdata)); 736 memset(&rsp, 0, sizeof(rsp)); 737 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 738 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 739 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 740 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 741 CU_ASSERT(nsdata.nsze == 5678); 742 743 /* Invalid NSID 4 */ 744 cmd.nsid = 4; 745 memset(&nsdata, 0, sizeof(nsdata)); 746 memset(&rsp, 0, sizeof(rsp)); 747 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 748 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 749 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 750 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 751 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 752 753 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 754 cmd.nsid = 0xFFFFFFFF; 755 memset(&nsdata, 0, sizeof(nsdata)); 756 memset(&rsp, 0, sizeof(rsp)); 757 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 758 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 759 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 760 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 761 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 762 } 763 764 int main(int argc, char **argv) 765 { 766 CU_pSuite suite = NULL; 767 unsigned int num_failures; 768 769 if (CU_initialize_registry() != CUE_SUCCESS) { 770 return CU_get_error(); 771 } 772 773 suite = CU_add_suite("nvmf", NULL, NULL); 774 if (suite == NULL) { 775 CU_cleanup_registry(); 776 return CU_get_error(); 777 } 778 779 if ( 780 CU_add_test(suite, "get_log_page", test_get_log_page) == NULL || 781 CU_add_test(suite, "process_fabrics_cmd", test_process_fabrics_cmd) == NULL || 782 CU_add_test(suite, "connect", test_connect) == NULL || 783 CU_add_test(suite, "get_ns_id_desc_list", test_get_ns_id_desc_list) == NULL || 784 CU_add_test(suite, "identify_ns", test_identify_ns) == NULL 785 ) { 786 CU_cleanup_registry(); 787 return CU_get_error(); 788 } 789 790 allocate_threads(1); 791 set_thread(0); 792 793 CU_basic_set_mode(CU_BRM_VERBOSE); 794 CU_basic_run_tests(); 795 num_failures = CU_get_number_of_failures(); 796 CU_cleanup_registry(); 797 798 free_threads(); 799 800 return num_failures; 801 } 802