1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 51 struct spdk_nvmf_subsystem *, 52 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 53 NULL); 54 55 DEFINE_STUB(spdk_nvmf_poll_group_create, 56 struct spdk_nvmf_poll_group *, 57 (struct spdk_nvmf_tgt *tgt), 58 NULL); 59 60 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 61 const char *, 62 (const struct spdk_nvmf_subsystem *subsystem), 63 NULL); 64 65 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 66 const char *, 67 (const struct spdk_nvmf_subsystem *subsystem), 68 NULL); 69 70 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 71 struct spdk_nvmf_ns *, 72 (struct spdk_nvmf_subsystem *subsystem), 73 NULL); 74 75 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 76 struct spdk_nvmf_ns *, 77 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 78 NULL); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 81 bool, 82 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 83 true); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr, 86 int, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 88 0); 89 90 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr, 91 struct spdk_nvmf_ctrlr *, 92 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 93 NULL); 94 95 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported, 96 bool, 97 (struct spdk_nvmf_ctrlr *ctrlr), 98 false); 99 100 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported, 101 bool, 102 (struct spdk_nvmf_ctrlr *ctrlr), 103 false); 104 105 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page, 106 (struct spdk_nvmf_tgt *tgt, struct iovec *iov, uint32_t iovcnt, uint64_t offset, uint32_t length)); 107 108 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 109 int, 110 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 111 0); 112 113 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 114 bool, 115 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvme_transport_id *trid), 116 true); 117 118 DEFINE_STUB(spdk_nvmf_transport_qpair_set_sqsize, 119 int, 120 (struct spdk_nvmf_qpair *qpair), 121 0); 122 123 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_read_cmd, 124 int, 125 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 126 struct spdk_nvmf_request *req), 127 0); 128 129 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_cmd, 130 int, 131 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 132 struct spdk_nvmf_request *req), 133 0); 134 135 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_zeroes_cmd, 136 int, 137 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 138 struct spdk_nvmf_request *req), 139 0); 140 141 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_flush_cmd, 142 int, 143 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 144 struct spdk_nvmf_request *req), 145 0); 146 147 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_dsm_cmd, 148 int, 149 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 150 struct spdk_nvmf_request *req), 151 0); 152 153 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_io, 154 int, 155 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 156 struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB(spdk_nvmf_transport_req_complete, 160 int, 161 (struct spdk_nvmf_request *req), 162 0); 163 164 DEFINE_STUB_V(spdk_nvmf_ns_reservation_request, (void *ctx)); 165 166 int 167 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 168 { 169 return 0; 170 } 171 172 void 173 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata) 174 { 175 uint64_t num_blocks; 176 177 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 178 num_blocks = ns->bdev->blockcnt; 179 nsdata->nsze = num_blocks; 180 nsdata->ncap = num_blocks; 181 nsdata->nuse = num_blocks; 182 nsdata->nlbaf = 0; 183 nsdata->flbas.format = 0; 184 nsdata->lbaf[0].lbads = spdk_u32log2(512); 185 } 186 187 static void 188 test_get_log_page(void) 189 { 190 struct spdk_nvmf_subsystem subsystem = {}; 191 struct spdk_nvmf_request req = {}; 192 struct spdk_nvmf_qpair qpair = {}; 193 struct spdk_nvmf_ctrlr ctrlr = {}; 194 union nvmf_h2c_msg cmd = {}; 195 union nvmf_c2h_msg rsp = {}; 196 char data[4096]; 197 198 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 199 200 ctrlr.subsys = &subsystem; 201 202 qpair.ctrlr = &ctrlr; 203 204 req.qpair = &qpair; 205 req.cmd = &cmd; 206 req.rsp = &rsp; 207 req.data = &data; 208 req.length = sizeof(data); 209 210 /* Get Log Page - all valid */ 211 memset(&cmd, 0, sizeof(cmd)); 212 memset(&rsp, 0, sizeof(rsp)); 213 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 214 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 215 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 216 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 217 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 218 219 /* Get Log Page with invalid log ID */ 220 memset(&cmd, 0, sizeof(cmd)); 221 memset(&rsp, 0, sizeof(rsp)); 222 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 223 cmd.nvme_cmd.cdw10 = 0; 224 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 225 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 226 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 227 228 /* Get Log Page with invalid offset (not dword aligned) */ 229 memset(&cmd, 0, sizeof(cmd)); 230 memset(&rsp, 0, sizeof(rsp)); 231 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 232 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 233 cmd.nvme_cmd.cdw12 = 2; 234 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 235 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 236 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 237 238 /* Get Log Page without data buffer */ 239 memset(&cmd, 0, sizeof(cmd)); 240 memset(&rsp, 0, sizeof(rsp)); 241 req.data = NULL; 242 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 243 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 244 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 245 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 246 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 247 req.data = data; 248 } 249 250 static void 251 test_process_fabrics_cmd(void) 252 { 253 struct spdk_nvmf_request req = {}; 254 int ret; 255 struct spdk_nvmf_qpair req_qpair = {}; 256 union nvmf_h2c_msg req_cmd = {}; 257 union nvmf_c2h_msg req_rsp = {}; 258 259 req.qpair = &req_qpair; 260 req.cmd = &req_cmd; 261 req.rsp = &req_rsp; 262 req.qpair->ctrlr = NULL; 263 264 /* No ctrlr and invalid command check */ 265 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 266 ret = spdk_nvmf_ctrlr_process_fabrics_cmd(&req); 267 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 268 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 269 } 270 271 static bool 272 nvme_status_success(const struct spdk_nvme_status *status) 273 { 274 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 275 } 276 277 static void 278 test_connect(void) 279 { 280 struct spdk_nvmf_fabric_connect_data connect_data; 281 struct spdk_nvmf_poll_group group; 282 struct spdk_nvmf_subsystem_poll_group *sgroups; 283 struct spdk_nvmf_transport transport; 284 struct spdk_nvmf_subsystem subsystem; 285 struct spdk_nvmf_request req; 286 struct spdk_nvmf_qpair admin_qpair; 287 struct spdk_nvmf_qpair qpair; 288 struct spdk_nvmf_qpair qpair2; 289 struct spdk_nvmf_ctrlr ctrlr; 290 struct spdk_nvmf_tgt tgt; 291 union nvmf_h2c_msg cmd; 292 union nvmf_c2h_msg rsp; 293 const uint8_t hostid[16] = { 294 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 295 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 296 }; 297 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 298 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 299 int rc; 300 301 memset(&group, 0, sizeof(group)); 302 group.thread = spdk_get_thread(); 303 304 memset(&ctrlr, 0, sizeof(ctrlr)); 305 ctrlr.subsys = &subsystem; 306 ctrlr.qpair_mask = spdk_bit_array_create(3); 307 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 308 ctrlr.vcprop.cc.bits.en = 1; 309 ctrlr.vcprop.cc.bits.iosqes = 6; 310 ctrlr.vcprop.cc.bits.iocqes = 4; 311 312 memset(&admin_qpair, 0, sizeof(admin_qpair)); 313 admin_qpair.group = &group; 314 315 memset(&tgt, 0, sizeof(tgt)); 316 memset(&transport, 0, sizeof(transport)); 317 transport.opts.max_aq_depth = 32; 318 transport.opts.max_queue_depth = 64; 319 transport.opts.max_qpairs_per_ctrlr = 3; 320 transport.tgt = &tgt; 321 322 memset(&qpair, 0, sizeof(qpair)); 323 qpair.transport = &transport; 324 qpair.group = &group; 325 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 326 TAILQ_INIT(&qpair.outstanding); 327 328 memset(&connect_data, 0, sizeof(connect_data)); 329 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 330 connect_data.cntlid = 0xFFFF; 331 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 332 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 333 334 memset(&subsystem, 0, sizeof(subsystem)); 335 subsystem.thread = spdk_get_thread(); 336 subsystem.id = 1; 337 TAILQ_INIT(&subsystem.ctrlrs); 338 subsystem.tgt = &tgt; 339 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 340 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 341 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 342 343 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 344 sgroups[subsystem.id].io_outstanding = 5; 345 group.sgroups = sgroups; 346 347 memset(&cmd, 0, sizeof(cmd)); 348 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 349 cmd.connect_cmd.cid = 1; 350 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 351 cmd.connect_cmd.recfmt = 0; 352 cmd.connect_cmd.qid = 0; 353 cmd.connect_cmd.sqsize = 31; 354 cmd.connect_cmd.cattr = 0; 355 cmd.connect_cmd.kato = 120000; 356 357 memset(&req, 0, sizeof(req)); 358 req.qpair = &qpair; 359 req.length = sizeof(connect_data); 360 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 361 req.data = &connect_data; 362 req.cmd = &cmd; 363 req.rsp = &rsp; 364 365 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 366 MOCK_SET(spdk_nvmf_poll_group_create, &group); 367 368 /* Valid admin connect command */ 369 memset(&rsp, 0, sizeof(rsp)); 370 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 371 rc = spdk_nvmf_ctrlr_connect(&req); 372 poll_threads(); 373 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 374 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 375 CU_ASSERT(qpair.ctrlr != NULL); 376 spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 377 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 378 free(qpair.ctrlr); 379 qpair.ctrlr = NULL; 380 381 /* Valid admin connect command with kato = 0 */ 382 cmd.connect_cmd.kato = 0; 383 memset(&rsp, 0, sizeof(rsp)); 384 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 385 rc = spdk_nvmf_ctrlr_connect(&req); 386 poll_threads(); 387 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 388 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 389 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 390 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 391 free(qpair.ctrlr); 392 qpair.ctrlr = NULL; 393 cmd.connect_cmd.kato = 120000; 394 395 /* Invalid data length */ 396 memset(&rsp, 0, sizeof(rsp)); 397 req.length = sizeof(connect_data) - 1; 398 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 399 rc = spdk_nvmf_ctrlr_connect(&req); 400 poll_threads(); 401 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 402 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 403 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 404 CU_ASSERT(qpair.ctrlr == NULL); 405 req.length = sizeof(connect_data); 406 407 /* Invalid recfmt */ 408 memset(&rsp, 0, sizeof(rsp)); 409 cmd.connect_cmd.recfmt = 1234; 410 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 411 rc = spdk_nvmf_ctrlr_connect(&req); 412 poll_threads(); 413 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 414 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 415 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 416 CU_ASSERT(qpair.ctrlr == NULL); 417 cmd.connect_cmd.recfmt = 0; 418 419 /* Unterminated subnqn */ 420 memset(&rsp, 0, sizeof(rsp)); 421 memset(connect_data.subnqn, 'a', sizeof(connect_data.subnqn)); 422 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 423 rc = spdk_nvmf_ctrlr_connect(&req); 424 poll_threads(); 425 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 426 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 427 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 428 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 429 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 430 CU_ASSERT(qpair.ctrlr == NULL); 431 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 432 433 /* Subsystem not found */ 434 memset(&rsp, 0, sizeof(rsp)); 435 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 436 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 437 rc = spdk_nvmf_ctrlr_connect(&req); 438 poll_threads(); 439 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 440 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 441 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 442 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 443 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 444 CU_ASSERT(qpair.ctrlr == NULL); 445 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 446 447 /* Unterminated hostnqn */ 448 memset(&rsp, 0, sizeof(rsp)); 449 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 450 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 451 rc = spdk_nvmf_ctrlr_connect(&req); 452 poll_threads(); 453 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 454 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 455 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 456 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 457 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 458 CU_ASSERT(qpair.ctrlr == NULL); 459 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 460 461 /* Host not allowed */ 462 memset(&rsp, 0, sizeof(rsp)); 463 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 464 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 465 rc = spdk_nvmf_ctrlr_connect(&req); 466 poll_threads(); 467 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 468 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 469 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 470 CU_ASSERT(qpair.ctrlr == NULL); 471 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 472 473 /* Invalid sqsize == 0 */ 474 memset(&rsp, 0, sizeof(rsp)); 475 cmd.connect_cmd.sqsize = 0; 476 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 477 rc = spdk_nvmf_ctrlr_connect(&req); 478 poll_threads(); 479 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 480 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 481 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 482 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 483 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 484 CU_ASSERT(qpair.ctrlr == NULL); 485 cmd.connect_cmd.sqsize = 31; 486 487 /* Invalid admin sqsize > max_aq_depth */ 488 memset(&rsp, 0, sizeof(rsp)); 489 cmd.connect_cmd.sqsize = 32; 490 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 491 rc = spdk_nvmf_ctrlr_connect(&req); 492 poll_threads(); 493 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 494 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 495 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 496 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 497 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 498 CU_ASSERT(qpair.ctrlr == NULL); 499 cmd.connect_cmd.sqsize = 31; 500 501 /* Invalid I/O sqsize > max_queue_depth */ 502 memset(&rsp, 0, sizeof(rsp)); 503 cmd.connect_cmd.qid = 1; 504 cmd.connect_cmd.sqsize = 64; 505 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 506 rc = spdk_nvmf_ctrlr_connect(&req); 507 poll_threads(); 508 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 509 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 510 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 511 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 512 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 513 CU_ASSERT(qpair.ctrlr == NULL); 514 cmd.connect_cmd.qid = 0; 515 cmd.connect_cmd.sqsize = 31; 516 517 /* Invalid cntlid for admin queue */ 518 memset(&rsp, 0, sizeof(rsp)); 519 connect_data.cntlid = 0x1234; 520 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 521 rc = spdk_nvmf_ctrlr_connect(&req); 522 poll_threads(); 523 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 524 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 525 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 526 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 527 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 528 CU_ASSERT(qpair.ctrlr == NULL); 529 connect_data.cntlid = 0xFFFF; 530 531 ctrlr.admin_qpair = &admin_qpair; 532 ctrlr.subsys = &subsystem; 533 534 /* Valid I/O queue connect command */ 535 memset(&rsp, 0, sizeof(rsp)); 536 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 537 cmd.connect_cmd.qid = 1; 538 cmd.connect_cmd.sqsize = 63; 539 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 540 rc = spdk_nvmf_ctrlr_connect(&req); 541 poll_threads(); 542 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 543 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 544 CU_ASSERT(qpair.ctrlr == &ctrlr); 545 qpair.ctrlr = NULL; 546 cmd.connect_cmd.sqsize = 31; 547 548 /* Non-existent controller */ 549 memset(&rsp, 0, sizeof(rsp)); 550 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, NULL); 551 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 552 rc = spdk_nvmf_ctrlr_connect(&req); 553 poll_threads(); 554 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 555 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 556 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 557 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 558 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 559 CU_ASSERT(qpair.ctrlr == NULL); 560 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 561 562 /* I/O connect to discovery controller */ 563 memset(&rsp, 0, sizeof(rsp)); 564 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 565 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 566 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 567 rc = spdk_nvmf_ctrlr_connect(&req); 568 poll_threads(); 569 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 570 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 571 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 572 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 573 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 574 CU_ASSERT(qpair.ctrlr == NULL); 575 576 /* I/O connect to discovery controller keep-alive-timeout should be 0 */ 577 cmd.connect_cmd.qid = 0; 578 memset(&rsp, 0, sizeof(rsp)); 579 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 580 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 581 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 582 rc = spdk_nvmf_ctrlr_connect(&req); 583 poll_threads(); 584 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 585 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 586 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 587 CU_ASSERT(qpair.ctrlr == NULL); 588 cmd.connect_cmd.qid = 1; 589 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 590 591 /* I/O connect to disabled controller */ 592 memset(&rsp, 0, sizeof(rsp)); 593 ctrlr.vcprop.cc.bits.en = 0; 594 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 595 rc = spdk_nvmf_ctrlr_connect(&req); 596 poll_threads(); 597 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 598 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 599 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 600 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 601 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 602 CU_ASSERT(qpair.ctrlr == NULL); 603 ctrlr.vcprop.cc.bits.en = 1; 604 605 /* I/O connect with invalid IOSQES */ 606 memset(&rsp, 0, sizeof(rsp)); 607 ctrlr.vcprop.cc.bits.iosqes = 3; 608 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 609 rc = spdk_nvmf_ctrlr_connect(&req); 610 poll_threads(); 611 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 612 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 613 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 614 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 615 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 616 CU_ASSERT(qpair.ctrlr == NULL); 617 ctrlr.vcprop.cc.bits.iosqes = 6; 618 619 /* I/O connect with invalid IOCQES */ 620 memset(&rsp, 0, sizeof(rsp)); 621 ctrlr.vcprop.cc.bits.iocqes = 3; 622 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 623 rc = spdk_nvmf_ctrlr_connect(&req); 624 poll_threads(); 625 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 626 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 627 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 628 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 629 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 630 CU_ASSERT(qpair.ctrlr == NULL); 631 ctrlr.vcprop.cc.bits.iocqes = 4; 632 633 /* I/O connect with too many existing qpairs */ 634 memset(&rsp, 0, sizeof(rsp)); 635 spdk_bit_array_set(ctrlr.qpair_mask, 0); 636 spdk_bit_array_set(ctrlr.qpair_mask, 1); 637 spdk_bit_array_set(ctrlr.qpair_mask, 2); 638 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 639 rc = spdk_nvmf_ctrlr_connect(&req); 640 poll_threads(); 641 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 642 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 643 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 644 CU_ASSERT(qpair.ctrlr == NULL); 645 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 646 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 647 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 648 649 /* I/O connect with duplicate queue ID */ 650 memset(&rsp, 0, sizeof(rsp)); 651 memset(&qpair2, 0, sizeof(qpair2)); 652 qpair2.group = &group; 653 qpair2.qid = 1; 654 spdk_bit_array_set(ctrlr.qpair_mask, 1); 655 cmd.connect_cmd.qid = 1; 656 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 657 rc = spdk_nvmf_ctrlr_connect(&req); 658 poll_threads(); 659 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 660 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 661 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 662 CU_ASSERT(qpair.ctrlr == NULL); 663 664 /* Clean up globals */ 665 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 666 MOCK_CLEAR(spdk_nvmf_poll_group_create); 667 668 spdk_bit_array_free(&ctrlr.qpair_mask); 669 free(sgroups); 670 } 671 672 static void 673 test_get_ns_id_desc_list(void) 674 { 675 struct spdk_nvmf_subsystem subsystem; 676 struct spdk_nvmf_qpair qpair; 677 struct spdk_nvmf_ctrlr ctrlr; 678 struct spdk_nvmf_request req; 679 struct spdk_nvmf_ns *ns_ptrs[1]; 680 struct spdk_nvmf_ns ns; 681 union nvmf_h2c_msg cmd; 682 union nvmf_c2h_msg rsp; 683 struct spdk_bdev bdev; 684 uint8_t buf[4096]; 685 686 memset(&subsystem, 0, sizeof(subsystem)); 687 ns_ptrs[0] = &ns; 688 subsystem.ns = ns_ptrs; 689 subsystem.max_nsid = 1; 690 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 691 692 memset(&ns, 0, sizeof(ns)); 693 ns.opts.nsid = 1; 694 ns.bdev = &bdev; 695 696 memset(&qpair, 0, sizeof(qpair)); 697 qpair.ctrlr = &ctrlr; 698 699 memset(&ctrlr, 0, sizeof(ctrlr)); 700 ctrlr.subsys = &subsystem; 701 ctrlr.vcprop.cc.bits.en = 1; 702 703 memset(&req, 0, sizeof(req)); 704 req.qpair = &qpair; 705 req.cmd = &cmd; 706 req.rsp = &rsp; 707 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 708 req.data = buf; 709 req.length = sizeof(buf); 710 711 memset(&cmd, 0, sizeof(cmd)); 712 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 713 cmd.nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 714 715 /* Invalid NSID */ 716 cmd.nvme_cmd.nsid = 0; 717 memset(&rsp, 0, sizeof(rsp)); 718 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 719 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 720 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 721 722 /* Valid NSID, but ns has no IDs defined */ 723 cmd.nvme_cmd.nsid = 1; 724 memset(&rsp, 0, sizeof(rsp)); 725 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 726 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 727 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 728 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 729 730 /* Valid NSID, only EUI64 defined */ 731 ns.opts.eui64[0] = 0x11; 732 ns.opts.eui64[7] = 0xFF; 733 memset(&rsp, 0, sizeof(rsp)); 734 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 735 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 736 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 737 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 738 CU_ASSERT(buf[1] == 8); 739 CU_ASSERT(buf[4] == 0x11); 740 CU_ASSERT(buf[11] == 0xFF); 741 CU_ASSERT(buf[13] == 0); 742 743 /* Valid NSID, only NGUID defined */ 744 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 745 ns.opts.nguid[0] = 0x22; 746 ns.opts.nguid[15] = 0xEE; 747 memset(&rsp, 0, sizeof(rsp)); 748 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 749 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 750 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 751 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 752 CU_ASSERT(buf[1] == 16); 753 CU_ASSERT(buf[4] == 0x22); 754 CU_ASSERT(buf[19] == 0xEE); 755 CU_ASSERT(buf[21] == 0); 756 757 /* Valid NSID, both EUI64 and NGUID defined */ 758 ns.opts.eui64[0] = 0x11; 759 ns.opts.eui64[7] = 0xFF; 760 ns.opts.nguid[0] = 0x22; 761 ns.opts.nguid[15] = 0xEE; 762 memset(&rsp, 0, sizeof(rsp)); 763 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 764 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 765 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 766 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 767 CU_ASSERT(buf[1] == 8); 768 CU_ASSERT(buf[4] == 0x11); 769 CU_ASSERT(buf[11] == 0xFF); 770 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 771 CU_ASSERT(buf[13] == 16); 772 CU_ASSERT(buf[16] == 0x22); 773 CU_ASSERT(buf[31] == 0xEE); 774 CU_ASSERT(buf[33] == 0); 775 776 /* Valid NSID, EUI64, NGUID, and UUID defined */ 777 ns.opts.eui64[0] = 0x11; 778 ns.opts.eui64[7] = 0xFF; 779 ns.opts.nguid[0] = 0x22; 780 ns.opts.nguid[15] = 0xEE; 781 ns.opts.uuid.u.raw[0] = 0x33; 782 ns.opts.uuid.u.raw[15] = 0xDD; 783 memset(&rsp, 0, sizeof(rsp)); 784 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 785 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 786 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 787 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 788 CU_ASSERT(buf[1] == 8); 789 CU_ASSERT(buf[4] == 0x11); 790 CU_ASSERT(buf[11] == 0xFF); 791 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 792 CU_ASSERT(buf[13] == 16); 793 CU_ASSERT(buf[16] == 0x22); 794 CU_ASSERT(buf[31] == 0xEE); 795 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 796 CU_ASSERT(buf[33] == 16); 797 CU_ASSERT(buf[36] == 0x33); 798 CU_ASSERT(buf[51] == 0xDD); 799 CU_ASSERT(buf[53] == 0); 800 } 801 802 static void 803 test_identify_ns(void) 804 { 805 struct spdk_nvmf_subsystem subsystem = {}; 806 struct spdk_nvmf_transport transport = {}; 807 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 808 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 809 struct spdk_nvme_cmd cmd = {}; 810 struct spdk_nvme_cpl rsp = {}; 811 struct spdk_nvme_ns_data nsdata = {}; 812 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 813 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 814 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 815 816 subsystem.ns = ns_arr; 817 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 818 819 /* Invalid NSID 0 */ 820 cmd.nsid = 0; 821 memset(&nsdata, 0, sizeof(nsdata)); 822 memset(&rsp, 0, sizeof(rsp)); 823 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 824 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 825 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 826 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 827 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 828 829 /* Valid NSID 1 */ 830 cmd.nsid = 1; 831 memset(&nsdata, 0, sizeof(nsdata)); 832 memset(&rsp, 0, sizeof(rsp)); 833 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 834 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 835 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 836 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 837 CU_ASSERT(nsdata.nsze == 1234); 838 839 /* Valid but inactive NSID 2 */ 840 cmd.nsid = 2; 841 memset(&nsdata, 0, sizeof(nsdata)); 842 memset(&rsp, 0, sizeof(rsp)); 843 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 844 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 845 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 846 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 847 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 848 849 /* Valid NSID 3 */ 850 cmd.nsid = 3; 851 memset(&nsdata, 0, sizeof(nsdata)); 852 memset(&rsp, 0, sizeof(rsp)); 853 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 854 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 855 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 856 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 857 CU_ASSERT(nsdata.nsze == 5678); 858 859 /* Invalid NSID 4 */ 860 cmd.nsid = 4; 861 memset(&nsdata, 0, sizeof(nsdata)); 862 memset(&rsp, 0, sizeof(rsp)); 863 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 864 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 865 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 866 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 867 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 868 869 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 870 cmd.nsid = 0xFFFFFFFF; 871 memset(&nsdata, 0, sizeof(nsdata)); 872 memset(&rsp, 0, sizeof(rsp)); 873 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 874 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 875 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 876 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 877 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 878 } 879 880 /* 881 * Reservation Unit Test Configuration 882 * -------- -------- -------- 883 * | Host A | | Host B | | Host C | 884 * -------- -------- -------- 885 * / \ | | 886 * -------- -------- ------- ------- 887 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 888 * -------- -------- ------- ------- 889 * \ \ / / 890 * \ \ / / 891 * \ \ / / 892 * -------------------------------------- 893 * | NAMESPACE 1 | 894 * -------------------------------------- 895 */ 896 897 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 898 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 899 900 static void 901 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 902 { 903 /* Host A has two controllers */ 904 spdk_uuid_generate(&g_ctrlr1_A.hostid); 905 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 906 907 /* Host B has 1 controller */ 908 spdk_uuid_generate(&g_ctrlr_B.hostid); 909 910 /* Host C has 1 controller */ 911 spdk_uuid_generate(&g_ctrlr_C.hostid); 912 913 memset(&g_ns_info, 0, sizeof(g_ns_info)); 914 g_ns_info.rtype = rtype; 915 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 916 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 917 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 918 } 919 920 static void 921 test_reservation_write_exclusive(void) 922 { 923 struct spdk_nvmf_request req = {}; 924 union nvmf_h2c_msg cmd = {}; 925 union nvmf_c2h_msg rsp = {}; 926 int rc; 927 928 req.cmd = &cmd; 929 req.rsp = &rsp; 930 931 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 932 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 933 g_ns_info.holder_id = g_ctrlr1_A.hostid; 934 935 /* Test Case: Issue a Read command from Host A and Host B */ 936 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 937 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 938 SPDK_CU_ASSERT_FATAL(rc == 0); 939 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 940 SPDK_CU_ASSERT_FATAL(rc == 0); 941 942 /* Test Case: Issue a DSM Write command from Host A and Host B */ 943 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 944 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 945 SPDK_CU_ASSERT_FATAL(rc == 0); 946 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 947 SPDK_CU_ASSERT_FATAL(rc < 0); 948 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 949 950 /* Test Case: Issue a Write command from Host C */ 951 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 952 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 953 SPDK_CU_ASSERT_FATAL(rc < 0); 954 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 955 956 /* Test Case: Issue a Read command from Host B */ 957 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 958 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 959 SPDK_CU_ASSERT_FATAL(rc == 0); 960 961 /* Unregister Host C */ 962 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 963 964 /* Test Case: Read and Write commands from non-registrant Host C */ 965 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 966 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 967 SPDK_CU_ASSERT_FATAL(rc < 0); 968 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 969 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 970 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 971 SPDK_CU_ASSERT_FATAL(rc == 0); 972 } 973 974 static void 975 test_reservation_exclusive_access(void) 976 { 977 struct spdk_nvmf_request req = {}; 978 union nvmf_h2c_msg cmd = {}; 979 union nvmf_c2h_msg rsp = {}; 980 int rc; 981 982 req.cmd = &cmd; 983 req.rsp = &rsp; 984 985 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 986 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 987 g_ns_info.holder_id = g_ctrlr1_A.hostid; 988 989 /* Test Case: Issue a Read command from Host B */ 990 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 991 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 992 SPDK_CU_ASSERT_FATAL(rc < 0); 993 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 994 995 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 996 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 997 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 998 SPDK_CU_ASSERT_FATAL(rc == 0); 999 } 1000 1001 static void 1002 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1003 { 1004 struct spdk_nvmf_request req = {}; 1005 union nvmf_h2c_msg cmd = {}; 1006 union nvmf_c2h_msg rsp = {}; 1007 int rc; 1008 1009 req.cmd = &cmd; 1010 req.rsp = &rsp; 1011 1012 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1013 ut_reservation_init(rtype); 1014 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1015 1016 /* Test Case: Issue a Read command from Host A and Host C */ 1017 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1018 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1019 SPDK_CU_ASSERT_FATAL(rc == 0); 1020 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1021 SPDK_CU_ASSERT_FATAL(rc == 0); 1022 1023 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1024 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1025 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1026 SPDK_CU_ASSERT_FATAL(rc == 0); 1027 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1028 SPDK_CU_ASSERT_FATAL(rc == 0); 1029 1030 /* Unregister Host C */ 1031 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1032 1033 /* Test Case: Read and Write commands from non-registrant Host C */ 1034 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1035 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1036 SPDK_CU_ASSERT_FATAL(rc == 0); 1037 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1038 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1039 SPDK_CU_ASSERT_FATAL(rc < 0); 1040 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1041 } 1042 1043 static void 1044 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1045 { 1046 _test_reservation_write_exclusive_regs_only_and_all_regs( 1047 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1048 _test_reservation_write_exclusive_regs_only_and_all_regs( 1049 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1050 } 1051 1052 static void 1053 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1054 { 1055 struct spdk_nvmf_request req = {}; 1056 union nvmf_h2c_msg cmd = {}; 1057 union nvmf_c2h_msg rsp = {}; 1058 int rc; 1059 1060 req.cmd = &cmd; 1061 req.rsp = &rsp; 1062 1063 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1064 ut_reservation_init(rtype); 1065 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1066 1067 /* Test Case: Issue a Write command from Host B */ 1068 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1069 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1070 SPDK_CU_ASSERT_FATAL(rc == 0); 1071 1072 /* Unregister Host B */ 1073 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1074 1075 /* Test Case: Issue a Read command from Host B */ 1076 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1077 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1078 SPDK_CU_ASSERT_FATAL(rc < 0); 1079 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1080 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1081 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1082 SPDK_CU_ASSERT_FATAL(rc < 0); 1083 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1084 } 1085 1086 static void 1087 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1088 { 1089 _test_reservation_exclusive_access_regs_only_and_all_regs( 1090 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1091 _test_reservation_exclusive_access_regs_only_and_all_regs( 1092 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1093 } 1094 1095 static void 1096 test_reservation_notification_log_page(void) 1097 { 1098 struct spdk_nvmf_ctrlr ctrlr; 1099 struct spdk_nvmf_qpair qpair; 1100 struct spdk_nvmf_ns ns; 1101 struct spdk_nvmf_request req; 1102 union nvmf_h2c_msg cmd; 1103 union nvmf_c2h_msg rsp; 1104 union spdk_nvme_async_event_completion event = {0}; 1105 struct spdk_nvme_reservation_notification_log logs[3]; 1106 1107 memset(&ctrlr, 0, sizeof(ctrlr)); 1108 ctrlr.thread = spdk_get_thread(); 1109 TAILQ_INIT(&ctrlr.log_head); 1110 ns.nsid = 1; 1111 1112 /* Test Case: Mask all the reservation notifications */ 1113 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1114 SPDK_NVME_RESERVATION_RELEASED_MASK | 1115 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1116 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1117 SPDK_NVME_REGISTRATION_PREEMPTED); 1118 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1119 SPDK_NVME_RESERVATION_RELEASED); 1120 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1121 SPDK_NVME_RESERVATION_PREEMPTED); 1122 poll_threads(); 1123 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1124 1125 /* Test Case: Unmask all the reservation notifications, 1126 * 3 log pages are generated, and AER was triggered. 1127 */ 1128 ns.mask = 0; 1129 ctrlr.num_avail_log_pages = 0; 1130 req.cmd = &cmd; 1131 req.rsp = &rsp; 1132 ctrlr.aer_req = &req; 1133 req.qpair = &qpair; 1134 TAILQ_INIT(&qpair.outstanding); 1135 qpair.ctrlr = NULL; 1136 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1137 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1138 1139 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1140 SPDK_NVME_REGISTRATION_PREEMPTED); 1141 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1142 SPDK_NVME_RESERVATION_RELEASED); 1143 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1144 SPDK_NVME_RESERVATION_PREEMPTED); 1145 poll_threads(); 1146 event.raw = rsp.nvme_cpl.cdw0; 1147 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1148 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1149 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1150 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1151 1152 /* Test Case: Get Log Page to clear the log pages */ 1153 spdk_nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs)); 1154 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1155 } 1156 1157 int main(int argc, char **argv) 1158 { 1159 CU_pSuite suite = NULL; 1160 unsigned int num_failures; 1161 1162 if (CU_initialize_registry() != CUE_SUCCESS) { 1163 return CU_get_error(); 1164 } 1165 1166 suite = CU_add_suite("nvmf", NULL, NULL); 1167 if (suite == NULL) { 1168 CU_cleanup_registry(); 1169 return CU_get_error(); 1170 } 1171 1172 if ( 1173 CU_add_test(suite, "get_log_page", test_get_log_page) == NULL || 1174 CU_add_test(suite, "process_fabrics_cmd", test_process_fabrics_cmd) == NULL || 1175 CU_add_test(suite, "connect", test_connect) == NULL || 1176 CU_add_test(suite, "get_ns_id_desc_list", test_get_ns_id_desc_list) == NULL || 1177 CU_add_test(suite, "identify_ns", test_identify_ns) == NULL || 1178 CU_add_test(suite, "reservation_write_exclusive", test_reservation_write_exclusive) == NULL || 1179 CU_add_test(suite, "reservation_exclusive_access", test_reservation_exclusive_access) == NULL || 1180 CU_add_test(suite, "reservation_write_exclusive_regs_only_and_all_regs", 1181 test_reservation_write_exclusive_regs_only_and_all_regs) == NULL || 1182 CU_add_test(suite, "reservation_exclusive_access_regs_only_and_all_regs", 1183 test_reservation_exclusive_access_regs_only_and_all_regs) == NULL || 1184 CU_add_test(suite, "reservation_notification_log_page", 1185 test_reservation_notification_log_page) == NULL 1186 ) { 1187 CU_cleanup_registry(); 1188 return CU_get_error(); 1189 } 1190 1191 allocate_threads(1); 1192 set_thread(0); 1193 1194 CU_basic_set_mode(CU_BRM_VERBOSE); 1195 CU_basic_run_tests(); 1196 num_failures = CU_get_number_of_failures(); 1197 CU_cleanup_registry(); 1198 1199 free_threads(); 1200 1201 return num_failures; 1202 } 1203