1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 51 struct spdk_nvmf_subsystem *, 52 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 53 NULL); 54 55 DEFINE_STUB(spdk_nvmf_poll_group_create, 56 struct spdk_nvmf_poll_group *, 57 (struct spdk_nvmf_tgt *tgt), 58 NULL); 59 60 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 61 const char *, 62 (const struct spdk_nvmf_subsystem *subsystem), 63 NULL); 64 65 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 66 struct spdk_nvmf_ns *, 67 (struct spdk_nvmf_subsystem *subsystem), 68 NULL); 69 70 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 71 struct spdk_nvmf_ns *, 72 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 73 NULL); 74 75 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 76 bool, 77 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 78 true); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr, 81 int, 82 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 83 0); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr, 86 struct spdk_nvmf_ctrlr *, 87 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 88 NULL); 89 90 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported, 91 bool, 92 (struct spdk_nvmf_ctrlr *ctrlr), 93 false); 94 95 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported, 96 bool, 97 (struct spdk_nvmf_ctrlr *ctrlr), 98 false); 99 100 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page, 101 (struct spdk_nvmf_tgt *tgt, struct iovec *iov, uint32_t iovcnt, uint64_t offset, uint32_t length)); 102 103 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 104 int, 105 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 106 0); 107 108 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 109 bool, 110 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvme_transport_id *trid), 111 true); 112 113 DEFINE_STUB(spdk_nvmf_transport_qpair_set_sqsize, 114 int, 115 (struct spdk_nvmf_qpair *qpair), 116 0); 117 118 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_read_cmd, 119 int, 120 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 121 struct spdk_nvmf_request *req), 122 0); 123 124 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_cmd, 125 int, 126 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 127 struct spdk_nvmf_request *req), 128 0); 129 130 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_zeroes_cmd, 131 int, 132 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 struct spdk_nvmf_request *req), 134 0); 135 136 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_flush_cmd, 137 int, 138 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 139 struct spdk_nvmf_request *req), 140 0); 141 142 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_dsm_cmd, 143 int, 144 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 145 struct spdk_nvmf_request *req), 146 0); 147 148 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_io, 149 int, 150 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 151 struct spdk_nvmf_request *req), 152 0); 153 154 DEFINE_STUB(spdk_nvmf_transport_req_complete, 155 int, 156 (struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB_V(spdk_nvmf_ns_reservation_request, (void *ctx)); 160 161 int 162 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 163 { 164 return 0; 165 } 166 167 void 168 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata) 169 { 170 uint64_t num_blocks; 171 172 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 173 num_blocks = ns->bdev->blockcnt; 174 nsdata->nsze = num_blocks; 175 nsdata->ncap = num_blocks; 176 nsdata->nuse = num_blocks; 177 nsdata->nlbaf = 0; 178 nsdata->flbas.format = 0; 179 nsdata->lbaf[0].lbads = spdk_u32log2(512); 180 } 181 182 static void 183 test_get_log_page(void) 184 { 185 struct spdk_nvmf_subsystem subsystem = {}; 186 struct spdk_nvmf_request req = {}; 187 struct spdk_nvmf_qpair qpair = {}; 188 struct spdk_nvmf_ctrlr ctrlr = {}; 189 union nvmf_h2c_msg cmd = {}; 190 union nvmf_c2h_msg rsp = {}; 191 char data[4096]; 192 193 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 194 195 ctrlr.subsys = &subsystem; 196 197 qpair.ctrlr = &ctrlr; 198 199 req.qpair = &qpair; 200 req.cmd = &cmd; 201 req.rsp = &rsp; 202 req.data = &data; 203 req.length = sizeof(data); 204 205 /* Get Log Page - all valid */ 206 memset(&cmd, 0, sizeof(cmd)); 207 memset(&rsp, 0, sizeof(rsp)); 208 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 209 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 210 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 211 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 212 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 213 214 /* Get Log Page with invalid log ID */ 215 memset(&cmd, 0, sizeof(cmd)); 216 memset(&rsp, 0, sizeof(rsp)); 217 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 218 cmd.nvme_cmd.cdw10 = 0; 219 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 220 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 221 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 222 223 /* Get Log Page with invalid offset (not dword aligned) */ 224 memset(&cmd, 0, sizeof(cmd)); 225 memset(&rsp, 0, sizeof(rsp)); 226 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 227 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 228 cmd.nvme_cmd.cdw12 = 2; 229 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 230 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 231 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 232 233 /* Get Log Page without data buffer */ 234 memset(&cmd, 0, sizeof(cmd)); 235 memset(&rsp, 0, sizeof(rsp)); 236 req.data = NULL; 237 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 238 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 239 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 240 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 241 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 242 req.data = data; 243 } 244 245 static void 246 test_process_fabrics_cmd(void) 247 { 248 struct spdk_nvmf_request req = {}; 249 int ret; 250 struct spdk_nvmf_qpair req_qpair = {}; 251 union nvmf_h2c_msg req_cmd = {}; 252 union nvmf_c2h_msg req_rsp = {}; 253 254 req.qpair = &req_qpair; 255 req.cmd = &req_cmd; 256 req.rsp = &req_rsp; 257 req.qpair->ctrlr = NULL; 258 259 /* No ctrlr and invalid command check */ 260 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 261 ret = spdk_nvmf_ctrlr_process_fabrics_cmd(&req); 262 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 263 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 264 } 265 266 static bool 267 nvme_status_success(const struct spdk_nvme_status *status) 268 { 269 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 270 } 271 272 static void 273 test_connect(void) 274 { 275 struct spdk_nvmf_fabric_connect_data connect_data; 276 struct spdk_nvmf_poll_group group; 277 struct spdk_nvmf_transport transport; 278 struct spdk_nvmf_subsystem subsystem; 279 struct spdk_nvmf_request req; 280 struct spdk_nvmf_qpair admin_qpair; 281 struct spdk_nvmf_qpair qpair; 282 struct spdk_nvmf_qpair qpair2; 283 struct spdk_nvmf_ctrlr ctrlr; 284 struct spdk_nvmf_tgt tgt; 285 union nvmf_h2c_msg cmd; 286 union nvmf_c2h_msg rsp; 287 const uint8_t hostid[16] = { 288 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 289 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 290 }; 291 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 292 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 293 int rc; 294 295 memset(&group, 0, sizeof(group)); 296 group.thread = spdk_get_thread(); 297 298 memset(&ctrlr, 0, sizeof(ctrlr)); 299 ctrlr.subsys = &subsystem; 300 ctrlr.qpair_mask = spdk_bit_array_create(3); 301 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 302 ctrlr.vcprop.cc.bits.en = 1; 303 ctrlr.vcprop.cc.bits.iosqes = 6; 304 ctrlr.vcprop.cc.bits.iocqes = 4; 305 306 memset(&admin_qpair, 0, sizeof(admin_qpair)); 307 admin_qpair.group = &group; 308 309 memset(&tgt, 0, sizeof(tgt)); 310 memset(&transport, 0, sizeof(transport)); 311 transport.opts.max_queue_depth = 64; 312 transport.opts.max_qpairs_per_ctrlr = 3; 313 transport.tgt = &tgt; 314 315 memset(&qpair, 0, sizeof(qpair)); 316 qpair.transport = &transport; 317 qpair.group = &group; 318 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 319 TAILQ_INIT(&qpair.outstanding); 320 321 memset(&connect_data, 0, sizeof(connect_data)); 322 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 323 connect_data.cntlid = 0xFFFF; 324 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 325 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 326 327 memset(&subsystem, 0, sizeof(subsystem)); 328 subsystem.thread = spdk_get_thread(); 329 subsystem.id = 1; 330 TAILQ_INIT(&subsystem.ctrlrs); 331 subsystem.tgt = &tgt; 332 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 333 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 334 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 335 336 memset(&cmd, 0, sizeof(cmd)); 337 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 338 cmd.connect_cmd.cid = 1; 339 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 340 cmd.connect_cmd.recfmt = 0; 341 cmd.connect_cmd.qid = 0; 342 cmd.connect_cmd.sqsize = 31; 343 cmd.connect_cmd.cattr = 0; 344 cmd.connect_cmd.kato = 120000; 345 346 memset(&req, 0, sizeof(req)); 347 req.qpair = &qpair; 348 req.length = sizeof(connect_data); 349 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 350 req.data = &connect_data; 351 req.cmd = &cmd; 352 req.rsp = &rsp; 353 354 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 355 MOCK_SET(spdk_nvmf_poll_group_create, &group); 356 357 /* Valid admin connect command */ 358 memset(&rsp, 0, sizeof(rsp)); 359 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 360 rc = spdk_nvmf_ctrlr_connect(&req); 361 poll_threads(); 362 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 363 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 364 CU_ASSERT(qpair.ctrlr != NULL); 365 spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 366 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 367 free(qpair.ctrlr); 368 qpair.ctrlr = NULL; 369 370 /* Invalid data length */ 371 memset(&rsp, 0, sizeof(rsp)); 372 req.length = sizeof(connect_data) - 1; 373 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 374 rc = spdk_nvmf_ctrlr_connect(&req); 375 poll_threads(); 376 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 377 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 378 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 379 CU_ASSERT(qpair.ctrlr == NULL); 380 req.length = sizeof(connect_data); 381 382 /* Invalid recfmt */ 383 memset(&rsp, 0, sizeof(rsp)); 384 cmd.connect_cmd.recfmt = 1234; 385 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 386 rc = spdk_nvmf_ctrlr_connect(&req); 387 poll_threads(); 388 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 389 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 390 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 391 CU_ASSERT(qpair.ctrlr == NULL); 392 cmd.connect_cmd.recfmt = 0; 393 394 /* Unterminated subnqn */ 395 memset(&rsp, 0, sizeof(rsp)); 396 memset(connect_data.subnqn, 'a', sizeof(connect_data.subnqn)); 397 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 398 rc = spdk_nvmf_ctrlr_connect(&req); 399 poll_threads(); 400 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 401 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 402 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 403 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 404 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 405 CU_ASSERT(qpair.ctrlr == NULL); 406 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 407 408 /* Subsystem not found */ 409 memset(&rsp, 0, sizeof(rsp)); 410 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 411 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 412 rc = spdk_nvmf_ctrlr_connect(&req); 413 poll_threads(); 414 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 415 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 416 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 417 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 418 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 419 CU_ASSERT(qpair.ctrlr == NULL); 420 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 421 422 /* Unterminated hostnqn */ 423 memset(&rsp, 0, sizeof(rsp)); 424 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 425 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 426 rc = spdk_nvmf_ctrlr_connect(&req); 427 poll_threads(); 428 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 429 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 430 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 431 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 432 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 433 CU_ASSERT(qpair.ctrlr == NULL); 434 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 435 436 /* Host not allowed */ 437 memset(&rsp, 0, sizeof(rsp)); 438 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 439 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 440 rc = spdk_nvmf_ctrlr_connect(&req); 441 poll_threads(); 442 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 443 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 444 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 445 CU_ASSERT(qpair.ctrlr == NULL); 446 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 447 448 /* Invalid sqsize == 0 */ 449 memset(&rsp, 0, sizeof(rsp)); 450 cmd.connect_cmd.sqsize = 0; 451 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 452 rc = spdk_nvmf_ctrlr_connect(&req); 453 poll_threads(); 454 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 455 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 456 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 457 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 458 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 459 CU_ASSERT(qpair.ctrlr == NULL); 460 cmd.connect_cmd.sqsize = 31; 461 462 /* Invalid sqsize > max_queue_depth */ 463 memset(&rsp, 0, sizeof(rsp)); 464 cmd.connect_cmd.sqsize = 64; 465 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 466 rc = spdk_nvmf_ctrlr_connect(&req); 467 poll_threads(); 468 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 469 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 470 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 471 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 472 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 473 CU_ASSERT(qpair.ctrlr == NULL); 474 cmd.connect_cmd.sqsize = 31; 475 476 /* Invalid cntlid for admin queue */ 477 memset(&rsp, 0, sizeof(rsp)); 478 connect_data.cntlid = 0x1234; 479 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 480 rc = spdk_nvmf_ctrlr_connect(&req); 481 poll_threads(); 482 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 483 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 484 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 485 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 486 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 487 CU_ASSERT(qpair.ctrlr == NULL); 488 connect_data.cntlid = 0xFFFF; 489 490 ctrlr.admin_qpair = &admin_qpair; 491 ctrlr.subsys = &subsystem; 492 493 /* Valid I/O queue connect command */ 494 memset(&rsp, 0, sizeof(rsp)); 495 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 496 cmd.connect_cmd.qid = 1; 497 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 498 rc = spdk_nvmf_ctrlr_connect(&req); 499 poll_threads(); 500 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 501 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 502 CU_ASSERT(qpair.ctrlr == &ctrlr); 503 qpair.ctrlr = NULL; 504 505 /* Non-existent controller */ 506 memset(&rsp, 0, sizeof(rsp)); 507 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, NULL); 508 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 509 rc = spdk_nvmf_ctrlr_connect(&req); 510 poll_threads(); 511 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 512 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 513 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 514 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 515 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 516 CU_ASSERT(qpair.ctrlr == NULL); 517 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 518 519 /* I/O connect to discovery controller */ 520 memset(&rsp, 0, sizeof(rsp)); 521 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 522 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 523 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 524 rc = spdk_nvmf_ctrlr_connect(&req); 525 poll_threads(); 526 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 527 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 528 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 529 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 530 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 531 CU_ASSERT(qpair.ctrlr == NULL); 532 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 533 534 /* I/O connect to disabled controller */ 535 memset(&rsp, 0, sizeof(rsp)); 536 ctrlr.vcprop.cc.bits.en = 0; 537 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 538 rc = spdk_nvmf_ctrlr_connect(&req); 539 poll_threads(); 540 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 541 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 542 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 543 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 544 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 545 CU_ASSERT(qpair.ctrlr == NULL); 546 ctrlr.vcprop.cc.bits.en = 1; 547 548 /* I/O connect with invalid IOSQES */ 549 memset(&rsp, 0, sizeof(rsp)); 550 ctrlr.vcprop.cc.bits.iosqes = 3; 551 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 552 rc = spdk_nvmf_ctrlr_connect(&req); 553 poll_threads(); 554 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 555 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 556 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 557 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 558 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 559 CU_ASSERT(qpair.ctrlr == NULL); 560 ctrlr.vcprop.cc.bits.iosqes = 6; 561 562 /* I/O connect with invalid IOCQES */ 563 memset(&rsp, 0, sizeof(rsp)); 564 ctrlr.vcprop.cc.bits.iocqes = 3; 565 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 566 rc = spdk_nvmf_ctrlr_connect(&req); 567 poll_threads(); 568 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 569 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 570 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 571 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 572 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 573 CU_ASSERT(qpair.ctrlr == NULL); 574 ctrlr.vcprop.cc.bits.iocqes = 4; 575 576 /* I/O connect with too many existing qpairs */ 577 memset(&rsp, 0, sizeof(rsp)); 578 spdk_bit_array_set(ctrlr.qpair_mask, 0); 579 spdk_bit_array_set(ctrlr.qpair_mask, 1); 580 spdk_bit_array_set(ctrlr.qpair_mask, 2); 581 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 582 rc = spdk_nvmf_ctrlr_connect(&req); 583 poll_threads(); 584 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 585 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 586 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 587 CU_ASSERT(qpair.ctrlr == NULL); 588 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 589 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 590 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 591 592 /* I/O connect with duplicate queue ID */ 593 memset(&rsp, 0, sizeof(rsp)); 594 memset(&qpair2, 0, sizeof(qpair2)); 595 qpair2.group = &group; 596 qpair2.qid = 1; 597 spdk_bit_array_set(ctrlr.qpair_mask, 1); 598 cmd.connect_cmd.qid = 1; 599 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 600 rc = spdk_nvmf_ctrlr_connect(&req); 601 poll_threads(); 602 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 603 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 604 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 605 CU_ASSERT(qpair.ctrlr == NULL); 606 607 /* Clean up globals */ 608 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 609 MOCK_CLEAR(spdk_nvmf_poll_group_create); 610 611 spdk_bit_array_free(&ctrlr.qpair_mask); 612 } 613 614 static void 615 test_get_ns_id_desc_list(void) 616 { 617 struct spdk_nvmf_subsystem subsystem; 618 struct spdk_nvmf_qpair qpair; 619 struct spdk_nvmf_ctrlr ctrlr; 620 struct spdk_nvmf_request req; 621 struct spdk_nvmf_ns *ns_ptrs[1]; 622 struct spdk_nvmf_ns ns; 623 union nvmf_h2c_msg cmd; 624 union nvmf_c2h_msg rsp; 625 struct spdk_bdev bdev; 626 uint8_t buf[4096]; 627 628 memset(&subsystem, 0, sizeof(subsystem)); 629 ns_ptrs[0] = &ns; 630 subsystem.ns = ns_ptrs; 631 subsystem.max_nsid = 1; 632 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 633 634 memset(&ns, 0, sizeof(ns)); 635 ns.opts.nsid = 1; 636 ns.bdev = &bdev; 637 638 memset(&qpair, 0, sizeof(qpair)); 639 qpair.ctrlr = &ctrlr; 640 641 memset(&ctrlr, 0, sizeof(ctrlr)); 642 ctrlr.subsys = &subsystem; 643 ctrlr.vcprop.cc.bits.en = 1; 644 645 memset(&req, 0, sizeof(req)); 646 req.qpair = &qpair; 647 req.cmd = &cmd; 648 req.rsp = &rsp; 649 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 650 req.data = buf; 651 req.length = sizeof(buf); 652 653 memset(&cmd, 0, sizeof(cmd)); 654 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 655 cmd.nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 656 657 /* Invalid NSID */ 658 cmd.nvme_cmd.nsid = 0; 659 memset(&rsp, 0, sizeof(rsp)); 660 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 661 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 662 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 663 664 /* Valid NSID, but ns has no IDs defined */ 665 cmd.nvme_cmd.nsid = 1; 666 memset(&rsp, 0, sizeof(rsp)); 667 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 668 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 669 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 670 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 671 672 /* Valid NSID, only EUI64 defined */ 673 ns.opts.eui64[0] = 0x11; 674 ns.opts.eui64[7] = 0xFF; 675 memset(&rsp, 0, sizeof(rsp)); 676 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 677 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 678 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 679 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 680 CU_ASSERT(buf[1] == 8); 681 CU_ASSERT(buf[4] == 0x11); 682 CU_ASSERT(buf[11] == 0xFF); 683 CU_ASSERT(buf[13] == 0); 684 685 /* Valid NSID, only NGUID defined */ 686 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 687 ns.opts.nguid[0] = 0x22; 688 ns.opts.nguid[15] = 0xEE; 689 memset(&rsp, 0, sizeof(rsp)); 690 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 691 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 692 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 693 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 694 CU_ASSERT(buf[1] == 16); 695 CU_ASSERT(buf[4] == 0x22); 696 CU_ASSERT(buf[19] == 0xEE); 697 CU_ASSERT(buf[21] == 0); 698 699 /* Valid NSID, both EUI64 and NGUID defined */ 700 ns.opts.eui64[0] = 0x11; 701 ns.opts.eui64[7] = 0xFF; 702 ns.opts.nguid[0] = 0x22; 703 ns.opts.nguid[15] = 0xEE; 704 memset(&rsp, 0, sizeof(rsp)); 705 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 706 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 707 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 708 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 709 CU_ASSERT(buf[1] == 8); 710 CU_ASSERT(buf[4] == 0x11); 711 CU_ASSERT(buf[11] == 0xFF); 712 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 713 CU_ASSERT(buf[13] == 16); 714 CU_ASSERT(buf[16] == 0x22); 715 CU_ASSERT(buf[31] == 0xEE); 716 CU_ASSERT(buf[33] == 0); 717 718 /* Valid NSID, EUI64, NGUID, and UUID defined */ 719 ns.opts.eui64[0] = 0x11; 720 ns.opts.eui64[7] = 0xFF; 721 ns.opts.nguid[0] = 0x22; 722 ns.opts.nguid[15] = 0xEE; 723 ns.opts.uuid.u.raw[0] = 0x33; 724 ns.opts.uuid.u.raw[15] = 0xDD; 725 memset(&rsp, 0, sizeof(rsp)); 726 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 727 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 728 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 729 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 730 CU_ASSERT(buf[1] == 8); 731 CU_ASSERT(buf[4] == 0x11); 732 CU_ASSERT(buf[11] == 0xFF); 733 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 734 CU_ASSERT(buf[13] == 16); 735 CU_ASSERT(buf[16] == 0x22); 736 CU_ASSERT(buf[31] == 0xEE); 737 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 738 CU_ASSERT(buf[33] == 16); 739 CU_ASSERT(buf[36] == 0x33); 740 CU_ASSERT(buf[51] == 0xDD); 741 CU_ASSERT(buf[53] == 0); 742 } 743 744 static void 745 test_identify_ns(void) 746 { 747 struct spdk_nvmf_subsystem subsystem = {}; 748 struct spdk_nvmf_transport transport = {}; 749 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 750 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 751 struct spdk_nvme_cmd cmd = {}; 752 struct spdk_nvme_cpl rsp = {}; 753 struct spdk_nvme_ns_data nsdata = {}; 754 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 755 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 756 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 757 758 subsystem.ns = ns_arr; 759 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 760 761 /* Invalid NSID 0 */ 762 cmd.nsid = 0; 763 memset(&nsdata, 0, sizeof(nsdata)); 764 memset(&rsp, 0, sizeof(rsp)); 765 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 766 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 767 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 768 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 769 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 770 771 /* Valid NSID 1 */ 772 cmd.nsid = 1; 773 memset(&nsdata, 0, sizeof(nsdata)); 774 memset(&rsp, 0, sizeof(rsp)); 775 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 776 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 777 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 778 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 779 CU_ASSERT(nsdata.nsze == 1234); 780 781 /* Valid but inactive NSID 2 */ 782 cmd.nsid = 2; 783 memset(&nsdata, 0, sizeof(nsdata)); 784 memset(&rsp, 0, sizeof(rsp)); 785 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 786 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 787 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 788 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 789 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 790 791 /* Valid NSID 3 */ 792 cmd.nsid = 3; 793 memset(&nsdata, 0, sizeof(nsdata)); 794 memset(&rsp, 0, sizeof(rsp)); 795 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 796 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 797 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 798 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 799 CU_ASSERT(nsdata.nsze == 5678); 800 801 /* Invalid NSID 4 */ 802 cmd.nsid = 4; 803 memset(&nsdata, 0, sizeof(nsdata)); 804 memset(&rsp, 0, sizeof(rsp)); 805 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 806 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 807 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 808 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 809 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 810 811 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 812 cmd.nsid = 0xFFFFFFFF; 813 memset(&nsdata, 0, sizeof(nsdata)); 814 memset(&rsp, 0, sizeof(rsp)); 815 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 816 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 817 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 818 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 819 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 820 } 821 822 /* 823 * Reservation Unit Test Configuration 824 * -------- -------- -------- 825 * | Host A | | Host B | | Host C | 826 * -------- -------- -------- 827 * / \ | | 828 * -------- -------- ------- ------- 829 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 830 * -------- -------- ------- ------- 831 * \ \ / / 832 * \ \ / / 833 * \ \ / / 834 * -------------------------------------- 835 * | NAMESPACE 1 | 836 * -------------------------------------- 837 */ 838 839 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 840 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 841 842 static void 843 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 844 { 845 /* Host A has two controllers */ 846 spdk_uuid_generate(&g_ctrlr1_A.hostid); 847 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 848 849 /* Host B has 1 controller */ 850 spdk_uuid_generate(&g_ctrlr_B.hostid); 851 852 /* Host C has 1 controller */ 853 spdk_uuid_generate(&g_ctrlr_C.hostid); 854 855 memset(&g_ns_info, 0, sizeof(g_ns_info)); 856 g_ns_info.rtype = rtype; 857 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 858 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 859 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 860 } 861 862 static void 863 test_reservation_write_exclusive(void) 864 { 865 struct spdk_nvmf_request req = {}; 866 union nvmf_h2c_msg cmd = {}; 867 union nvmf_c2h_msg rsp = {}; 868 int rc; 869 870 req.cmd = &cmd; 871 req.rsp = &rsp; 872 873 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 874 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 875 g_ns_info.holder_id = g_ctrlr1_A.hostid; 876 877 /* Test Case: Issue a Read command from Host A and Host B */ 878 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 879 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 880 SPDK_CU_ASSERT_FATAL(rc == 0); 881 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 882 SPDK_CU_ASSERT_FATAL(rc == 0); 883 884 /* Test Case: Issue a DSM Write command from Host A and Host B */ 885 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 886 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 887 SPDK_CU_ASSERT_FATAL(rc == 0); 888 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 889 SPDK_CU_ASSERT_FATAL(rc < 0); 890 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 891 892 /* Test Case: Issue a Write command from Host C */ 893 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 894 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 895 SPDK_CU_ASSERT_FATAL(rc < 0); 896 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 897 898 /* Test Case: Issue a Read command from Host B */ 899 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 900 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 901 SPDK_CU_ASSERT_FATAL(rc == 0); 902 903 /* Unregister Host C */ 904 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 905 906 /* Test Case: Read and Write commands from non-registrant Host C */ 907 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 908 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 909 SPDK_CU_ASSERT_FATAL(rc < 0); 910 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 911 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 912 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 913 SPDK_CU_ASSERT_FATAL(rc == 0); 914 } 915 916 static void 917 test_reservation_exclusive_access(void) 918 { 919 struct spdk_nvmf_request req = {}; 920 union nvmf_h2c_msg cmd = {}; 921 union nvmf_c2h_msg rsp = {}; 922 int rc; 923 924 req.cmd = &cmd; 925 req.rsp = &rsp; 926 927 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 928 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 929 g_ns_info.holder_id = g_ctrlr1_A.hostid; 930 931 /* Test Case: Issue a Read command from Host B */ 932 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 933 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 934 SPDK_CU_ASSERT_FATAL(rc < 0); 935 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 936 937 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 938 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 939 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 940 SPDK_CU_ASSERT_FATAL(rc == 0); 941 } 942 943 static void 944 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 945 { 946 struct spdk_nvmf_request req = {}; 947 union nvmf_h2c_msg cmd = {}; 948 union nvmf_c2h_msg rsp = {}; 949 int rc; 950 951 req.cmd = &cmd; 952 req.rsp = &rsp; 953 954 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 955 ut_reservation_init(rtype); 956 g_ns_info.holder_id = g_ctrlr1_A.hostid; 957 958 /* Test Case: Issue a Read command from Host A and Host C */ 959 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 960 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 961 SPDK_CU_ASSERT_FATAL(rc == 0); 962 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 963 SPDK_CU_ASSERT_FATAL(rc == 0); 964 965 /* Test Case: Issue a DSM Write command from Host A and Host C */ 966 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 967 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 968 SPDK_CU_ASSERT_FATAL(rc == 0); 969 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 970 SPDK_CU_ASSERT_FATAL(rc == 0); 971 972 /* Unregister Host C */ 973 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 974 975 /* Test Case: Read and Write commands from non-registrant Host C */ 976 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 977 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 978 SPDK_CU_ASSERT_FATAL(rc == 0); 979 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 980 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 981 SPDK_CU_ASSERT_FATAL(rc < 0); 982 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 983 } 984 985 static void 986 test_reservation_write_exclusive_regs_only_and_all_regs(void) 987 { 988 _test_reservation_write_exclusive_regs_only_and_all_regs( 989 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 990 _test_reservation_write_exclusive_regs_only_and_all_regs( 991 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 992 } 993 994 static void 995 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 996 { 997 struct spdk_nvmf_request req = {}; 998 union nvmf_h2c_msg cmd = {}; 999 union nvmf_c2h_msg rsp = {}; 1000 int rc; 1001 1002 req.cmd = &cmd; 1003 req.rsp = &rsp; 1004 1005 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1006 ut_reservation_init(rtype); 1007 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1008 1009 /* Test Case: Issue a Write command from Host B */ 1010 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1011 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1012 SPDK_CU_ASSERT_FATAL(rc == 0); 1013 1014 /* Unregister Host B */ 1015 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1016 1017 /* Test Case: Issue a Read command from Host B */ 1018 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1019 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1020 SPDK_CU_ASSERT_FATAL(rc < 0); 1021 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1022 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1023 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1024 SPDK_CU_ASSERT_FATAL(rc < 0); 1025 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1026 } 1027 1028 static void 1029 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1030 { 1031 _test_reservation_exclusive_access_regs_only_and_all_regs( 1032 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1033 _test_reservation_exclusive_access_regs_only_and_all_regs( 1034 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1035 } 1036 1037 int main(int argc, char **argv) 1038 { 1039 CU_pSuite suite = NULL; 1040 unsigned int num_failures; 1041 1042 if (CU_initialize_registry() != CUE_SUCCESS) { 1043 return CU_get_error(); 1044 } 1045 1046 suite = CU_add_suite("nvmf", NULL, NULL); 1047 if (suite == NULL) { 1048 CU_cleanup_registry(); 1049 return CU_get_error(); 1050 } 1051 1052 if ( 1053 CU_add_test(suite, "get_log_page", test_get_log_page) == NULL || 1054 CU_add_test(suite, "process_fabrics_cmd", test_process_fabrics_cmd) == NULL || 1055 CU_add_test(suite, "connect", test_connect) == NULL || 1056 CU_add_test(suite, "get_ns_id_desc_list", test_get_ns_id_desc_list) == NULL || 1057 CU_add_test(suite, "identify_ns", test_identify_ns) == NULL || 1058 CU_add_test(suite, "reservation_write_exclusive", test_reservation_write_exclusive) == NULL || 1059 CU_add_test(suite, "reservation_exclusive_access", test_reservation_exclusive_access) == NULL || 1060 CU_add_test(suite, "reservation_write_exclusive_regs_only_and_all_regs", 1061 test_reservation_write_exclusive_regs_only_and_all_regs) == NULL || 1062 CU_add_test(suite, "reservation_exclusive_access_regs_only_and_all_regs", 1063 test_reservation_exclusive_access_regs_only_and_all_regs) == NULL 1064 ) { 1065 CU_cleanup_registry(); 1066 return CU_get_error(); 1067 } 1068 1069 allocate_threads(1); 1070 set_thread(0); 1071 1072 CU_basic_set_mode(CU_BRM_VERBOSE); 1073 CU_basic_run_tests(); 1074 num_failures = CU_get_number_of_failures(); 1075 CU_cleanup_registry(); 1076 1077 free_threads(); 1078 1079 return num_failures; 1080 } 1081