1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 51 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 52 53 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 54 struct spdk_nvmf_subsystem *, 55 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 56 NULL); 57 58 DEFINE_STUB(spdk_nvmf_poll_group_create, 59 struct spdk_nvmf_poll_group *, 60 (struct spdk_nvmf_tgt *tgt), 61 NULL); 62 63 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 64 const char *, 65 (const struct spdk_nvmf_subsystem *subsystem), 66 subsystem_default_sn); 67 68 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 69 const char *, 70 (const struct spdk_nvmf_subsystem *subsystem), 71 subsystem_default_mn); 72 73 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 74 struct spdk_nvmf_ns *, 75 (struct spdk_nvmf_subsystem *subsystem), 76 NULL); 77 78 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 79 struct spdk_nvmf_ns *, 80 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 81 NULL); 82 83 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 84 bool, 85 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 86 true); 87 88 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr, 89 int, 90 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 91 0); 92 93 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr, 94 struct spdk_nvmf_ctrlr *, 95 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 96 NULL); 97 98 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported, 99 bool, 100 (struct spdk_nvmf_ctrlr *ctrlr), 101 false); 102 103 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported, 104 bool, 105 (struct spdk_nvmf_ctrlr *ctrlr), 106 false); 107 108 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page, 109 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 110 uint32_t iovcnt, uint64_t offset, uint32_t length)); 111 112 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 113 int, 114 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 115 0); 116 117 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 118 bool, 119 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 120 true); 121 122 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_read_cmd, 123 int, 124 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 125 struct spdk_nvmf_request *req), 126 0); 127 128 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_cmd, 129 int, 130 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct spdk_nvmf_request *req), 132 0); 133 134 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_compare_cmd, 135 int, 136 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 struct spdk_nvmf_request *req), 138 0); 139 140 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_compare_and_write_cmd, 141 int, 142 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 144 0); 145 146 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_zeroes_cmd, 147 int, 148 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 149 struct spdk_nvmf_request *req), 150 0); 151 152 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_flush_cmd, 153 int, 154 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 155 struct spdk_nvmf_request *req), 156 0); 157 158 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_dsm_cmd, 159 int, 160 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 161 struct spdk_nvmf_request *req), 162 0); 163 164 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_io, 165 int, 166 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 167 struct spdk_nvmf_request *req), 168 0); 169 170 DEFINE_STUB(spdk_nvmf_transport_req_complete, 171 int, 172 (struct spdk_nvmf_request *req), 173 0); 174 175 DEFINE_STUB_V(spdk_nvmf_ns_reservation_request, (void *ctx)); 176 177 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_get_dif_ctx, bool, 178 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 179 struct spdk_dif_ctx *dif_ctx), 180 true); 181 182 int 183 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 184 { 185 return 0; 186 } 187 188 void 189 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 190 bool dif_insert_or_strip) 191 { 192 uint64_t num_blocks; 193 194 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 195 num_blocks = ns->bdev->blockcnt; 196 nsdata->nsze = num_blocks; 197 nsdata->ncap = num_blocks; 198 nsdata->nuse = num_blocks; 199 nsdata->nlbaf = 0; 200 nsdata->flbas.format = 0; 201 nsdata->lbaf[0].lbads = spdk_u32log2(512); 202 } 203 204 static void 205 test_get_log_page(void) 206 { 207 struct spdk_nvmf_subsystem subsystem = {}; 208 struct spdk_nvmf_request req = {}; 209 struct spdk_nvmf_qpair qpair = {}; 210 struct spdk_nvmf_ctrlr ctrlr = {}; 211 union nvmf_h2c_msg cmd = {}; 212 union nvmf_c2h_msg rsp = {}; 213 char data[4096]; 214 215 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 216 217 ctrlr.subsys = &subsystem; 218 219 qpair.ctrlr = &ctrlr; 220 221 req.qpair = &qpair; 222 req.cmd = &cmd; 223 req.rsp = &rsp; 224 req.data = &data; 225 req.length = sizeof(data); 226 227 /* Get Log Page - all valid */ 228 memset(&cmd, 0, sizeof(cmd)); 229 memset(&rsp, 0, sizeof(rsp)); 230 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 231 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 232 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 233 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 234 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 235 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 236 237 /* Get Log Page with invalid log ID */ 238 memset(&cmd, 0, sizeof(cmd)); 239 memset(&rsp, 0, sizeof(rsp)); 240 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 241 cmd.nvme_cmd.cdw10 = 0; 242 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 243 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 244 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 245 246 /* Get Log Page with invalid offset (not dword aligned) */ 247 memset(&cmd, 0, sizeof(cmd)); 248 memset(&rsp, 0, sizeof(rsp)); 249 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 250 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 251 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 252 cmd.nvme_cmd.cdw12 = 2; 253 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 254 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 255 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 256 257 /* Get Log Page without data buffer */ 258 memset(&cmd, 0, sizeof(cmd)); 259 memset(&rsp, 0, sizeof(rsp)); 260 req.data = NULL; 261 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 262 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 263 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 264 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 265 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 266 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 267 req.data = data; 268 } 269 270 static void 271 test_process_fabrics_cmd(void) 272 { 273 struct spdk_nvmf_request req = {}; 274 int ret; 275 struct spdk_nvmf_qpair req_qpair = {}; 276 union nvmf_h2c_msg req_cmd = {}; 277 union nvmf_c2h_msg req_rsp = {}; 278 279 req.qpair = &req_qpair; 280 req.cmd = &req_cmd; 281 req.rsp = &req_rsp; 282 req.qpair->ctrlr = NULL; 283 284 /* No ctrlr and invalid command check */ 285 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 286 ret = spdk_nvmf_ctrlr_process_fabrics_cmd(&req); 287 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 288 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 289 } 290 291 static bool 292 nvme_status_success(const struct spdk_nvme_status *status) 293 { 294 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 295 } 296 297 static void 298 test_connect(void) 299 { 300 struct spdk_nvmf_fabric_connect_data connect_data; 301 struct spdk_nvmf_poll_group group; 302 struct spdk_nvmf_subsystem_poll_group *sgroups; 303 struct spdk_nvmf_transport transport; 304 struct spdk_nvmf_subsystem subsystem; 305 struct spdk_nvmf_request req; 306 struct spdk_nvmf_qpair admin_qpair; 307 struct spdk_nvmf_qpair qpair; 308 struct spdk_nvmf_qpair qpair2; 309 struct spdk_nvmf_ctrlr ctrlr; 310 struct spdk_nvmf_tgt tgt; 311 union nvmf_h2c_msg cmd; 312 union nvmf_c2h_msg rsp; 313 const uint8_t hostid[16] = { 314 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 315 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 316 }; 317 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 318 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 319 int rc; 320 321 memset(&group, 0, sizeof(group)); 322 group.thread = spdk_get_thread(); 323 324 memset(&ctrlr, 0, sizeof(ctrlr)); 325 ctrlr.subsys = &subsystem; 326 ctrlr.qpair_mask = spdk_bit_array_create(3); 327 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 328 ctrlr.vcprop.cc.bits.en = 1; 329 ctrlr.vcprop.cc.bits.iosqes = 6; 330 ctrlr.vcprop.cc.bits.iocqes = 4; 331 332 memset(&admin_qpair, 0, sizeof(admin_qpair)); 333 admin_qpair.group = &group; 334 335 memset(&tgt, 0, sizeof(tgt)); 336 memset(&transport, 0, sizeof(transport)); 337 transport.opts.max_aq_depth = 32; 338 transport.opts.max_queue_depth = 64; 339 transport.opts.max_qpairs_per_ctrlr = 3; 340 transport.tgt = &tgt; 341 342 memset(&qpair, 0, sizeof(qpair)); 343 qpair.transport = &transport; 344 qpair.group = &group; 345 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 346 TAILQ_INIT(&qpair.outstanding); 347 348 memset(&connect_data, 0, sizeof(connect_data)); 349 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 350 connect_data.cntlid = 0xFFFF; 351 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 352 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 353 354 memset(&subsystem, 0, sizeof(subsystem)); 355 subsystem.thread = spdk_get_thread(); 356 subsystem.id = 1; 357 TAILQ_INIT(&subsystem.ctrlrs); 358 subsystem.tgt = &tgt; 359 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 360 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 361 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 362 363 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 364 sgroups[subsystem.id].io_outstanding = 5; 365 group.sgroups = sgroups; 366 367 memset(&cmd, 0, sizeof(cmd)); 368 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 369 cmd.connect_cmd.cid = 1; 370 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 371 cmd.connect_cmd.recfmt = 0; 372 cmd.connect_cmd.qid = 0; 373 cmd.connect_cmd.sqsize = 31; 374 cmd.connect_cmd.cattr = 0; 375 cmd.connect_cmd.kato = 120000; 376 377 memset(&req, 0, sizeof(req)); 378 req.qpair = &qpair; 379 req.length = sizeof(connect_data); 380 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 381 req.data = &connect_data; 382 req.cmd = &cmd; 383 req.rsp = &rsp; 384 385 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 386 MOCK_SET(spdk_nvmf_poll_group_create, &group); 387 388 /* Valid admin connect command */ 389 memset(&rsp, 0, sizeof(rsp)); 390 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 391 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 392 poll_threads(); 393 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 394 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 395 CU_ASSERT(qpair.ctrlr != NULL); 396 spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 397 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 398 free(qpair.ctrlr); 399 qpair.ctrlr = NULL; 400 401 /* Valid admin connect command with kato = 0 */ 402 cmd.connect_cmd.kato = 0; 403 memset(&rsp, 0, sizeof(rsp)); 404 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 405 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 406 poll_threads(); 407 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 408 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 409 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 410 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 411 free(qpair.ctrlr); 412 qpair.ctrlr = NULL; 413 cmd.connect_cmd.kato = 120000; 414 415 /* Invalid data length */ 416 memset(&rsp, 0, sizeof(rsp)); 417 req.length = sizeof(connect_data) - 1; 418 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 419 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 420 poll_threads(); 421 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 422 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 423 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 424 CU_ASSERT(qpair.ctrlr == NULL); 425 req.length = sizeof(connect_data); 426 427 /* Invalid recfmt */ 428 memset(&rsp, 0, sizeof(rsp)); 429 cmd.connect_cmd.recfmt = 1234; 430 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 431 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 432 poll_threads(); 433 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 434 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 435 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 436 CU_ASSERT(qpair.ctrlr == NULL); 437 cmd.connect_cmd.recfmt = 0; 438 439 /* Subsystem not found */ 440 memset(&rsp, 0, sizeof(rsp)); 441 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 442 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 443 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 444 poll_threads(); 445 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 446 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 447 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 448 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 449 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 450 CU_ASSERT(qpair.ctrlr == NULL); 451 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 452 453 /* Unterminated hostnqn */ 454 memset(&rsp, 0, sizeof(rsp)); 455 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 456 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 457 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 458 poll_threads(); 459 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 460 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 461 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 462 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 463 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 464 CU_ASSERT(qpair.ctrlr == NULL); 465 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 466 467 /* Host not allowed */ 468 memset(&rsp, 0, sizeof(rsp)); 469 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 470 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 471 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 472 poll_threads(); 473 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 474 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 475 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 476 CU_ASSERT(qpair.ctrlr == NULL); 477 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 478 479 /* Invalid sqsize == 0 */ 480 memset(&rsp, 0, sizeof(rsp)); 481 cmd.connect_cmd.sqsize = 0; 482 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 483 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 484 poll_threads(); 485 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 486 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 487 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 488 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 489 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 490 CU_ASSERT(qpair.ctrlr == NULL); 491 cmd.connect_cmd.sqsize = 31; 492 493 /* Invalid admin sqsize > max_aq_depth */ 494 memset(&rsp, 0, sizeof(rsp)); 495 cmd.connect_cmd.sqsize = 32; 496 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 497 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 498 poll_threads(); 499 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 500 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 501 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 502 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 503 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 504 CU_ASSERT(qpair.ctrlr == NULL); 505 cmd.connect_cmd.sqsize = 31; 506 507 /* Invalid I/O sqsize > max_queue_depth */ 508 memset(&rsp, 0, sizeof(rsp)); 509 cmd.connect_cmd.qid = 1; 510 cmd.connect_cmd.sqsize = 64; 511 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 512 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 513 poll_threads(); 514 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 515 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 516 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 517 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 518 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 519 CU_ASSERT(qpair.ctrlr == NULL); 520 cmd.connect_cmd.qid = 0; 521 cmd.connect_cmd.sqsize = 31; 522 523 /* Invalid cntlid for admin queue */ 524 memset(&rsp, 0, sizeof(rsp)); 525 connect_data.cntlid = 0x1234; 526 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 527 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 528 poll_threads(); 529 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 530 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 531 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 532 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 533 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 534 CU_ASSERT(qpair.ctrlr == NULL); 535 connect_data.cntlid = 0xFFFF; 536 537 ctrlr.admin_qpair = &admin_qpair; 538 ctrlr.subsys = &subsystem; 539 540 /* Valid I/O queue connect command */ 541 memset(&rsp, 0, sizeof(rsp)); 542 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 543 cmd.connect_cmd.qid = 1; 544 cmd.connect_cmd.sqsize = 63; 545 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 546 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 547 poll_threads(); 548 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 549 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 550 CU_ASSERT(qpair.ctrlr == &ctrlr); 551 qpair.ctrlr = NULL; 552 cmd.connect_cmd.sqsize = 31; 553 554 /* Non-existent controller */ 555 memset(&rsp, 0, sizeof(rsp)); 556 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, NULL); 557 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 558 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 559 poll_threads(); 560 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 561 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 562 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 563 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 564 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 565 CU_ASSERT(qpair.ctrlr == NULL); 566 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 567 568 /* I/O connect to discovery controller */ 569 memset(&rsp, 0, sizeof(rsp)); 570 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 571 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 572 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 573 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 574 poll_threads(); 575 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 576 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 577 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 578 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 579 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 580 CU_ASSERT(qpair.ctrlr == NULL); 581 582 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 583 cmd.connect_cmd.qid = 0; 584 cmd.connect_cmd.kato = 120000; 585 memset(&rsp, 0, sizeof(rsp)); 586 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 587 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 588 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 589 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 590 poll_threads(); 591 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 592 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 593 CU_ASSERT(qpair.ctrlr != NULL); 594 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 595 spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 596 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 597 free(qpair.ctrlr); 598 qpair.ctrlr = NULL; 599 600 /* I/O connect to discovery controller with keep-alive-timeout == 0. 601 * Then, a fixed timeout value is set to keep-alive-timeout. 602 */ 603 cmd.connect_cmd.kato = 0; 604 memset(&rsp, 0, sizeof(rsp)); 605 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 606 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 607 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 608 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 609 poll_threads(); 610 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 611 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 612 CU_ASSERT(qpair.ctrlr != NULL); 613 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 614 spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 615 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 616 free(qpair.ctrlr); 617 qpair.ctrlr = NULL; 618 cmd.connect_cmd.qid = 1; 619 cmd.connect_cmd.kato = 120000; 620 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 621 622 /* I/O connect to disabled controller */ 623 memset(&rsp, 0, sizeof(rsp)); 624 ctrlr.vcprop.cc.bits.en = 0; 625 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 626 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 627 poll_threads(); 628 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 629 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 630 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 631 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 632 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 633 CU_ASSERT(qpair.ctrlr == NULL); 634 ctrlr.vcprop.cc.bits.en = 1; 635 636 /* I/O connect with invalid IOSQES */ 637 memset(&rsp, 0, sizeof(rsp)); 638 ctrlr.vcprop.cc.bits.iosqes = 3; 639 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 640 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 641 poll_threads(); 642 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 643 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 644 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 645 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 646 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 647 CU_ASSERT(qpair.ctrlr == NULL); 648 ctrlr.vcprop.cc.bits.iosqes = 6; 649 650 /* I/O connect with invalid IOCQES */ 651 memset(&rsp, 0, sizeof(rsp)); 652 ctrlr.vcprop.cc.bits.iocqes = 3; 653 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 654 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 655 poll_threads(); 656 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 657 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 658 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 659 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 660 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 661 CU_ASSERT(qpair.ctrlr == NULL); 662 ctrlr.vcprop.cc.bits.iocqes = 4; 663 664 /* I/O connect with too many existing qpairs */ 665 memset(&rsp, 0, sizeof(rsp)); 666 spdk_bit_array_set(ctrlr.qpair_mask, 0); 667 spdk_bit_array_set(ctrlr.qpair_mask, 1); 668 spdk_bit_array_set(ctrlr.qpair_mask, 2); 669 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 670 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 671 poll_threads(); 672 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 673 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 674 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 675 CU_ASSERT(qpair.ctrlr == NULL); 676 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 677 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 678 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 679 680 /* I/O connect with duplicate queue ID */ 681 memset(&rsp, 0, sizeof(rsp)); 682 memset(&qpair2, 0, sizeof(qpair2)); 683 qpair2.group = &group; 684 qpair2.qid = 1; 685 spdk_bit_array_set(ctrlr.qpair_mask, 1); 686 cmd.connect_cmd.qid = 1; 687 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 688 rc = spdk_nvmf_ctrlr_cmd_connect(&req); 689 poll_threads(); 690 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 691 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 692 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 693 CU_ASSERT(qpair.ctrlr == NULL); 694 695 /* Clean up globals */ 696 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 697 MOCK_CLEAR(spdk_nvmf_poll_group_create); 698 699 spdk_bit_array_free(&ctrlr.qpair_mask); 700 free(sgroups); 701 } 702 703 static void 704 test_get_ns_id_desc_list(void) 705 { 706 struct spdk_nvmf_subsystem subsystem; 707 struct spdk_nvmf_qpair qpair; 708 struct spdk_nvmf_ctrlr ctrlr; 709 struct spdk_nvmf_request req; 710 struct spdk_nvmf_ns *ns_ptrs[1]; 711 struct spdk_nvmf_ns ns; 712 union nvmf_h2c_msg cmd; 713 union nvmf_c2h_msg rsp; 714 struct spdk_bdev bdev; 715 uint8_t buf[4096]; 716 717 memset(&subsystem, 0, sizeof(subsystem)); 718 ns_ptrs[0] = &ns; 719 subsystem.ns = ns_ptrs; 720 subsystem.max_nsid = 1; 721 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 722 723 memset(&ns, 0, sizeof(ns)); 724 ns.opts.nsid = 1; 725 ns.bdev = &bdev; 726 727 memset(&qpair, 0, sizeof(qpair)); 728 qpair.ctrlr = &ctrlr; 729 730 memset(&ctrlr, 0, sizeof(ctrlr)); 731 ctrlr.subsys = &subsystem; 732 ctrlr.vcprop.cc.bits.en = 1; 733 734 memset(&req, 0, sizeof(req)); 735 req.qpair = &qpair; 736 req.cmd = &cmd; 737 req.rsp = &rsp; 738 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 739 req.data = buf; 740 req.length = sizeof(buf); 741 742 memset(&cmd, 0, sizeof(cmd)); 743 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 744 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 745 746 /* Invalid NSID */ 747 cmd.nvme_cmd.nsid = 0; 748 memset(&rsp, 0, sizeof(rsp)); 749 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 750 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 751 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 752 753 /* Valid NSID, but ns has no IDs defined */ 754 cmd.nvme_cmd.nsid = 1; 755 memset(&rsp, 0, sizeof(rsp)); 756 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 757 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 758 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 759 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 760 761 /* Valid NSID, only EUI64 defined */ 762 ns.opts.eui64[0] = 0x11; 763 ns.opts.eui64[7] = 0xFF; 764 memset(&rsp, 0, sizeof(rsp)); 765 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 766 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 767 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 768 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 769 CU_ASSERT(buf[1] == 8); 770 CU_ASSERT(buf[4] == 0x11); 771 CU_ASSERT(buf[11] == 0xFF); 772 CU_ASSERT(buf[13] == 0); 773 774 /* Valid NSID, only NGUID defined */ 775 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 776 ns.opts.nguid[0] = 0x22; 777 ns.opts.nguid[15] = 0xEE; 778 memset(&rsp, 0, sizeof(rsp)); 779 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 780 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 781 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 782 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 783 CU_ASSERT(buf[1] == 16); 784 CU_ASSERT(buf[4] == 0x22); 785 CU_ASSERT(buf[19] == 0xEE); 786 CU_ASSERT(buf[21] == 0); 787 788 /* Valid NSID, both EUI64 and NGUID defined */ 789 ns.opts.eui64[0] = 0x11; 790 ns.opts.eui64[7] = 0xFF; 791 ns.opts.nguid[0] = 0x22; 792 ns.opts.nguid[15] = 0xEE; 793 memset(&rsp, 0, sizeof(rsp)); 794 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 795 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 796 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 797 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 798 CU_ASSERT(buf[1] == 8); 799 CU_ASSERT(buf[4] == 0x11); 800 CU_ASSERT(buf[11] == 0xFF); 801 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 802 CU_ASSERT(buf[13] == 16); 803 CU_ASSERT(buf[16] == 0x22); 804 CU_ASSERT(buf[31] == 0xEE); 805 CU_ASSERT(buf[33] == 0); 806 807 /* Valid NSID, EUI64, NGUID, and UUID defined */ 808 ns.opts.eui64[0] = 0x11; 809 ns.opts.eui64[7] = 0xFF; 810 ns.opts.nguid[0] = 0x22; 811 ns.opts.nguid[15] = 0xEE; 812 ns.opts.uuid.u.raw[0] = 0x33; 813 ns.opts.uuid.u.raw[15] = 0xDD; 814 memset(&rsp, 0, sizeof(rsp)); 815 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 816 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 817 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 818 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 819 CU_ASSERT(buf[1] == 8); 820 CU_ASSERT(buf[4] == 0x11); 821 CU_ASSERT(buf[11] == 0xFF); 822 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 823 CU_ASSERT(buf[13] == 16); 824 CU_ASSERT(buf[16] == 0x22); 825 CU_ASSERT(buf[31] == 0xEE); 826 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 827 CU_ASSERT(buf[33] == 16); 828 CU_ASSERT(buf[36] == 0x33); 829 CU_ASSERT(buf[51] == 0xDD); 830 CU_ASSERT(buf[53] == 0); 831 } 832 833 static void 834 test_identify_ns(void) 835 { 836 struct spdk_nvmf_subsystem subsystem = {}; 837 struct spdk_nvmf_transport transport = {}; 838 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 839 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 840 struct spdk_nvme_cmd cmd = {}; 841 struct spdk_nvme_cpl rsp = {}; 842 struct spdk_nvme_ns_data nsdata = {}; 843 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 844 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 845 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 846 847 subsystem.ns = ns_arr; 848 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 849 850 /* Invalid NSID 0 */ 851 cmd.nsid = 0; 852 memset(&nsdata, 0, sizeof(nsdata)); 853 memset(&rsp, 0, sizeof(rsp)); 854 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 855 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 856 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 857 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 858 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 859 860 /* Valid NSID 1 */ 861 cmd.nsid = 1; 862 memset(&nsdata, 0, sizeof(nsdata)); 863 memset(&rsp, 0, sizeof(rsp)); 864 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 865 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 866 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 867 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 868 CU_ASSERT(nsdata.nsze == 1234); 869 870 /* Valid but inactive NSID 2 */ 871 cmd.nsid = 2; 872 memset(&nsdata, 0, sizeof(nsdata)); 873 memset(&rsp, 0, sizeof(rsp)); 874 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 875 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 876 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 877 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 878 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 879 880 /* Valid NSID 3 */ 881 cmd.nsid = 3; 882 memset(&nsdata, 0, sizeof(nsdata)); 883 memset(&rsp, 0, sizeof(rsp)); 884 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 885 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 886 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 887 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 888 CU_ASSERT(nsdata.nsze == 5678); 889 890 /* Invalid NSID 4 */ 891 cmd.nsid = 4; 892 memset(&nsdata, 0, sizeof(nsdata)); 893 memset(&rsp, 0, sizeof(rsp)); 894 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 895 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 896 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 897 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 898 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 899 900 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 901 cmd.nsid = 0xFFFFFFFF; 902 memset(&nsdata, 0, sizeof(nsdata)); 903 memset(&rsp, 0, sizeof(rsp)); 904 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 905 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 906 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 907 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 908 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 909 } 910 911 static void 912 test_set_get_features(void) 913 { 914 struct spdk_nvmf_subsystem subsystem = {}; 915 struct spdk_nvmf_qpair admin_qpair = {}; 916 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 917 union nvmf_h2c_msg cmd = {}; 918 union nvmf_c2h_msg rsp = {}; 919 struct spdk_nvmf_ns ns[3]; 920 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};; 921 struct spdk_nvmf_request req; 922 int rc; 923 924 subsystem.ns = ns_arr; 925 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 926 admin_qpair.ctrlr = &ctrlr; 927 req.qpair = &admin_qpair; 928 cmd.nvme_cmd.nsid = 1; 929 req.cmd = &cmd; 930 req.rsp = &rsp; 931 932 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 933 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 934 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 935 ns[0].ptpl_file = "testcfg"; 936 rc = spdk_nvmf_ctrlr_set_features_reservation_persistence(&req); 937 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 938 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 939 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 940 CU_ASSERT(ns[0].ptpl_activated == true); 941 942 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 943 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 944 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 945 rc = spdk_nvmf_ctrlr_get_features_reservation_persistence(&req); 946 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 947 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 948 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 949 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 950 } 951 952 /* 953 * Reservation Unit Test Configuration 954 * -------- -------- -------- 955 * | Host A | | Host B | | Host C | 956 * -------- -------- -------- 957 * / \ | | 958 * -------- -------- ------- ------- 959 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 960 * -------- -------- ------- ------- 961 * \ \ / / 962 * \ \ / / 963 * \ \ / / 964 * -------------------------------------- 965 * | NAMESPACE 1 | 966 * -------------------------------------- 967 */ 968 969 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 970 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 971 972 static void 973 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 974 { 975 /* Host A has two controllers */ 976 spdk_uuid_generate(&g_ctrlr1_A.hostid); 977 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 978 979 /* Host B has 1 controller */ 980 spdk_uuid_generate(&g_ctrlr_B.hostid); 981 982 /* Host C has 1 controller */ 983 spdk_uuid_generate(&g_ctrlr_C.hostid); 984 985 memset(&g_ns_info, 0, sizeof(g_ns_info)); 986 g_ns_info.rtype = rtype; 987 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 988 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 989 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 990 } 991 992 static void 993 test_reservation_write_exclusive(void) 994 { 995 struct spdk_nvmf_request req = {}; 996 union nvmf_h2c_msg cmd = {}; 997 union nvmf_c2h_msg rsp = {}; 998 int rc; 999 1000 req.cmd = &cmd; 1001 req.rsp = &rsp; 1002 1003 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1004 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1005 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1006 1007 /* Test Case: Issue a Read command from Host A and Host B */ 1008 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1009 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1010 SPDK_CU_ASSERT_FATAL(rc == 0); 1011 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1012 SPDK_CU_ASSERT_FATAL(rc == 0); 1013 1014 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1015 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1016 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1017 SPDK_CU_ASSERT_FATAL(rc == 0); 1018 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1019 SPDK_CU_ASSERT_FATAL(rc < 0); 1020 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1021 1022 /* Test Case: Issue a Write command from Host C */ 1023 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1024 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1025 SPDK_CU_ASSERT_FATAL(rc < 0); 1026 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1027 1028 /* Test Case: Issue a Read command from Host B */ 1029 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1030 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1031 SPDK_CU_ASSERT_FATAL(rc == 0); 1032 1033 /* Unregister Host C */ 1034 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1035 1036 /* Test Case: Read and Write commands from non-registrant Host C */ 1037 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1038 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1039 SPDK_CU_ASSERT_FATAL(rc < 0); 1040 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1041 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1042 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1043 SPDK_CU_ASSERT_FATAL(rc == 0); 1044 } 1045 1046 static void 1047 test_reservation_exclusive_access(void) 1048 { 1049 struct spdk_nvmf_request req = {}; 1050 union nvmf_h2c_msg cmd = {}; 1051 union nvmf_c2h_msg rsp = {}; 1052 int rc; 1053 1054 req.cmd = &cmd; 1055 req.rsp = &rsp; 1056 1057 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1058 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1059 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1060 1061 /* Test Case: Issue a Read command from Host B */ 1062 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1063 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1064 SPDK_CU_ASSERT_FATAL(rc < 0); 1065 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1066 1067 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1068 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1069 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1070 SPDK_CU_ASSERT_FATAL(rc == 0); 1071 } 1072 1073 static void 1074 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1075 { 1076 struct spdk_nvmf_request req = {}; 1077 union nvmf_h2c_msg cmd = {}; 1078 union nvmf_c2h_msg rsp = {}; 1079 int rc; 1080 1081 req.cmd = &cmd; 1082 req.rsp = &rsp; 1083 1084 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1085 ut_reservation_init(rtype); 1086 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1087 1088 /* Test Case: Issue a Read command from Host A and Host C */ 1089 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1090 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1091 SPDK_CU_ASSERT_FATAL(rc == 0); 1092 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1093 SPDK_CU_ASSERT_FATAL(rc == 0); 1094 1095 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1096 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1097 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1098 SPDK_CU_ASSERT_FATAL(rc == 0); 1099 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1100 SPDK_CU_ASSERT_FATAL(rc == 0); 1101 1102 /* Unregister Host C */ 1103 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1104 1105 /* Test Case: Read and Write commands from non-registrant Host C */ 1106 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1107 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1108 SPDK_CU_ASSERT_FATAL(rc == 0); 1109 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1110 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1111 SPDK_CU_ASSERT_FATAL(rc < 0); 1112 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1113 } 1114 1115 static void 1116 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1117 { 1118 _test_reservation_write_exclusive_regs_only_and_all_regs( 1119 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1120 _test_reservation_write_exclusive_regs_only_and_all_regs( 1121 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1122 } 1123 1124 static void 1125 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1126 { 1127 struct spdk_nvmf_request req = {}; 1128 union nvmf_h2c_msg cmd = {}; 1129 union nvmf_c2h_msg rsp = {}; 1130 int rc; 1131 1132 req.cmd = &cmd; 1133 req.rsp = &rsp; 1134 1135 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1136 ut_reservation_init(rtype); 1137 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1138 1139 /* Test Case: Issue a Write command from Host B */ 1140 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1141 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1142 SPDK_CU_ASSERT_FATAL(rc == 0); 1143 1144 /* Unregister Host B */ 1145 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1146 1147 /* Test Case: Issue a Read command from Host B */ 1148 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1149 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1150 SPDK_CU_ASSERT_FATAL(rc < 0); 1151 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1152 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1153 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1154 SPDK_CU_ASSERT_FATAL(rc < 0); 1155 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1156 } 1157 1158 static void 1159 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1160 { 1161 _test_reservation_exclusive_access_regs_only_and_all_regs( 1162 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1163 _test_reservation_exclusive_access_regs_only_and_all_regs( 1164 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1165 } 1166 1167 static void 1168 test_reservation_notification_log_page(void) 1169 { 1170 struct spdk_nvmf_ctrlr ctrlr; 1171 struct spdk_nvmf_qpair qpair; 1172 struct spdk_nvmf_ns ns; 1173 struct spdk_nvmf_request req; 1174 union nvmf_h2c_msg cmd; 1175 union nvmf_c2h_msg rsp = {{0}}; 1176 union spdk_nvme_async_event_completion event = {0}; 1177 struct spdk_nvme_reservation_notification_log logs[3]; 1178 1179 memset(&ctrlr, 0, sizeof(ctrlr)); 1180 ctrlr.thread = spdk_get_thread(); 1181 TAILQ_INIT(&ctrlr.log_head); 1182 ns.nsid = 1; 1183 1184 /* Test Case: Mask all the reservation notifications */ 1185 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1186 SPDK_NVME_RESERVATION_RELEASED_MASK | 1187 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1188 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1189 SPDK_NVME_REGISTRATION_PREEMPTED); 1190 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1191 SPDK_NVME_RESERVATION_RELEASED); 1192 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1193 SPDK_NVME_RESERVATION_PREEMPTED); 1194 poll_threads(); 1195 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1196 1197 /* Test Case: Unmask all the reservation notifications, 1198 * 3 log pages are generated, and AER was triggered. 1199 */ 1200 ns.mask = 0; 1201 ctrlr.num_avail_log_pages = 0; 1202 req.cmd = &cmd; 1203 req.rsp = &rsp; 1204 ctrlr.aer_req = &req; 1205 req.qpair = &qpair; 1206 TAILQ_INIT(&qpair.outstanding); 1207 qpair.ctrlr = NULL; 1208 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1209 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1210 1211 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1212 SPDK_NVME_REGISTRATION_PREEMPTED); 1213 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1214 SPDK_NVME_RESERVATION_RELEASED); 1215 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1216 SPDK_NVME_RESERVATION_PREEMPTED); 1217 poll_threads(); 1218 event.raw = rsp.nvme_cpl.cdw0; 1219 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1220 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1221 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1222 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1223 1224 /* Test Case: Get Log Page to clear the log pages */ 1225 spdk_nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs)); 1226 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1227 } 1228 1229 static void 1230 test_get_dif_ctx(void) 1231 { 1232 struct spdk_nvmf_subsystem subsystem = {}; 1233 struct spdk_nvmf_request req = {}; 1234 struct spdk_nvmf_qpair qpair = {}; 1235 struct spdk_nvmf_ctrlr ctrlr = {}; 1236 struct spdk_nvmf_ns ns = {}; 1237 struct spdk_nvmf_ns *_ns = NULL; 1238 struct spdk_bdev bdev = {}; 1239 union nvmf_h2c_msg cmd = {}; 1240 struct spdk_dif_ctx dif_ctx = {}; 1241 bool ret; 1242 1243 ctrlr.subsys = &subsystem; 1244 1245 qpair.ctrlr = &ctrlr; 1246 1247 req.qpair = &qpair; 1248 req.cmd = &cmd; 1249 1250 ns.bdev = &bdev; 1251 1252 ctrlr.dif_insert_or_strip = false; 1253 1254 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1255 CU_ASSERT(ret == false); 1256 1257 ctrlr.dif_insert_or_strip = true; 1258 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1259 1260 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1261 CU_ASSERT(ret == false); 1262 1263 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1264 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1265 1266 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1267 CU_ASSERT(ret == false); 1268 1269 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1270 1271 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1272 CU_ASSERT(ret == false); 1273 1274 qpair.qid = 1; 1275 1276 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1277 CU_ASSERT(ret == false); 1278 1279 cmd.nvme_cmd.nsid = 1; 1280 1281 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1282 CU_ASSERT(ret == false); 1283 1284 subsystem.max_nsid = 1; 1285 subsystem.ns = &_ns; 1286 subsystem.ns[0] = &ns; 1287 1288 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1289 CU_ASSERT(ret == false); 1290 1291 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1292 1293 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1294 CU_ASSERT(ret == true); 1295 } 1296 1297 static void 1298 test_identify_ctrlr(void) 1299 { 1300 struct spdk_nvmf_subsystem subsystem = { 1301 .subtype = SPDK_NVMF_SUBTYPE_NVME 1302 }; 1303 struct spdk_nvmf_transport_ops tops = {}; 1304 struct spdk_nvmf_transport transport = { 1305 .ops = &tops, 1306 .opts = { 1307 .in_capsule_data_size = 4096, 1308 }, 1309 }; 1310 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1311 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1312 struct spdk_nvme_ctrlr_data cdata = {}; 1313 uint32_t expected_ioccsz; 1314 1315 /* Check ioccsz, TCP transport */ 1316 tops.type = SPDK_NVME_TRANSPORT_TCP; 1317 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1318 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1319 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1320 1321 /* Check ioccsz, RDMA transport */ 1322 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1323 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1324 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1325 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1326 1327 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1328 tops.type = SPDK_NVME_TRANSPORT_TCP; 1329 ctrlr.dif_insert_or_strip = true; 1330 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1331 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1332 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1333 1334 /* Check ioccsz, RDMA transport with dif_insert_or_strip */ 1335 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1336 ctrlr.dif_insert_or_strip = true; 1337 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16; 1338 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1339 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1340 } 1341 1342 static int 1343 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1344 { 1345 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1346 1347 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1348 }; 1349 1350 static void 1351 test_custom_admin_cmd(void) 1352 { 1353 struct spdk_nvmf_subsystem subsystem; 1354 struct spdk_nvmf_qpair qpair; 1355 struct spdk_nvmf_ctrlr ctrlr; 1356 struct spdk_nvmf_request req; 1357 struct spdk_nvmf_ns *ns_ptrs[1]; 1358 struct spdk_nvmf_ns ns; 1359 union nvmf_h2c_msg cmd; 1360 union nvmf_c2h_msg rsp; 1361 struct spdk_bdev bdev; 1362 uint8_t buf[4096]; 1363 int rc; 1364 1365 memset(&subsystem, 0, sizeof(subsystem)); 1366 ns_ptrs[0] = &ns; 1367 subsystem.ns = ns_ptrs; 1368 subsystem.max_nsid = 1; 1369 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1370 1371 memset(&ns, 0, sizeof(ns)); 1372 ns.opts.nsid = 1; 1373 ns.bdev = &bdev; 1374 1375 memset(&qpair, 0, sizeof(qpair)); 1376 qpair.ctrlr = &ctrlr; 1377 1378 memset(&ctrlr, 0, sizeof(ctrlr)); 1379 ctrlr.subsys = &subsystem; 1380 ctrlr.vcprop.cc.bits.en = 1; 1381 1382 memset(&req, 0, sizeof(req)); 1383 req.qpair = &qpair; 1384 req.cmd = &cmd; 1385 req.rsp = &rsp; 1386 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1387 req.data = buf; 1388 req.length = sizeof(buf); 1389 1390 memset(&cmd, 0, sizeof(cmd)); 1391 cmd.nvme_cmd.opc = 0xc1; 1392 cmd.nvme_cmd.nsid = 0; 1393 memset(&rsp, 0, sizeof(rsp)); 1394 1395 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1396 1397 /* Ensure that our hdlr is being called */ 1398 rc = spdk_nvmf_ctrlr_process_admin_cmd(&req); 1399 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1400 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1401 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1402 } 1403 1404 static void 1405 test_fused_compare_and_write(void) 1406 { 1407 struct spdk_nvmf_request req; 1408 struct spdk_nvmf_qpair qpair; 1409 struct spdk_nvme_cmd cmd; 1410 union nvmf_c2h_msg rsp; 1411 struct spdk_nvmf_ctrlr ctrlr; 1412 struct spdk_nvmf_subsystem subsystem; 1413 struct spdk_nvmf_ns ns; 1414 struct spdk_nvmf_ns *subsys_ns[1]; 1415 struct spdk_bdev bdev; 1416 1417 struct spdk_nvmf_poll_group group; 1418 struct spdk_nvmf_subsystem_poll_group sgroups; 1419 struct spdk_nvmf_subsystem_pg_ns_info ns_info; 1420 1421 ns.bdev = &bdev; 1422 1423 subsystem.id = 0; 1424 subsystem.max_nsid = 1; 1425 subsys_ns[0] = &ns; 1426 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1427 1428 /* Enable controller */ 1429 ctrlr.vcprop.cc.bits.en = 1; 1430 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1431 1432 group.num_sgroups = 1; 1433 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1434 sgroups.num_ns = 1; 1435 sgroups.ns_info = &ns_info; 1436 TAILQ_INIT(&sgroups.queued); 1437 group.sgroups = &sgroups; 1438 TAILQ_INIT(&qpair.outstanding); 1439 1440 qpair.ctrlr = &ctrlr; 1441 qpair.group = &group; 1442 qpair.qid = 1; 1443 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1444 1445 cmd.nsid = 1; 1446 1447 req.qpair = &qpair; 1448 req.cmd = (union nvmf_h2c_msg *)&cmd; 1449 req.rsp = &rsp; 1450 1451 /* SUCCESS/SUCCESS */ 1452 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1453 cmd.opc = SPDK_NVME_OPC_COMPARE; 1454 1455 spdk_nvmf_request_exec(&req); 1456 CU_ASSERT(qpair.first_fused_req != NULL); 1457 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1458 1459 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1460 cmd.opc = SPDK_NVME_OPC_WRITE; 1461 1462 spdk_nvmf_request_exec(&req); 1463 CU_ASSERT(qpair.first_fused_req == NULL); 1464 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1465 1466 /* Wrong sequence */ 1467 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1468 cmd.opc = SPDK_NVME_OPC_WRITE; 1469 1470 spdk_nvmf_request_exec(&req); 1471 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1472 CU_ASSERT(qpair.first_fused_req == NULL); 1473 1474 /* Write as FUSE_FIRST (Wrong op code) */ 1475 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1476 cmd.opc = SPDK_NVME_OPC_WRITE; 1477 1478 spdk_nvmf_request_exec(&req); 1479 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1480 CU_ASSERT(qpair.first_fused_req == NULL); 1481 1482 /* Compare as FUSE_SECOND (Wrong op code) */ 1483 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1484 cmd.opc = SPDK_NVME_OPC_COMPARE; 1485 1486 spdk_nvmf_request_exec(&req); 1487 CU_ASSERT(qpair.first_fused_req != NULL); 1488 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1489 1490 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1491 cmd.opc = SPDK_NVME_OPC_COMPARE; 1492 1493 spdk_nvmf_request_exec(&req); 1494 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1495 CU_ASSERT(qpair.first_fused_req == NULL); 1496 } 1497 1498 int main(int argc, char **argv) 1499 { 1500 CU_pSuite suite = NULL; 1501 unsigned int num_failures; 1502 1503 if (CU_initialize_registry() != CUE_SUCCESS) { 1504 return CU_get_error(); 1505 } 1506 1507 suite = CU_add_suite("nvmf", NULL, NULL); 1508 if (suite == NULL) { 1509 CU_cleanup_registry(); 1510 return CU_get_error(); 1511 } 1512 1513 if (CU_add_test(suite, "get_log_page", test_get_log_page) == NULL || 1514 CU_add_test(suite, "process_fabrics_cmd", test_process_fabrics_cmd) == NULL || 1515 CU_add_test(suite, "connect", test_connect) == NULL || 1516 CU_add_test(suite, "get_ns_id_desc_list", test_get_ns_id_desc_list) == NULL || 1517 CU_add_test(suite, "identify_ns", test_identify_ns) == NULL || 1518 CU_add_test(suite, "reservation_write_exclusive", test_reservation_write_exclusive) == NULL || 1519 CU_add_test(suite, "reservation_exclusive_access", test_reservation_exclusive_access) == NULL || 1520 CU_add_test(suite, "reservation_write_exclusive_regs_only_and_all_regs", 1521 test_reservation_write_exclusive_regs_only_and_all_regs) == NULL || 1522 CU_add_test(suite, "reservation_exclusive_access_regs_only_and_all_regs", 1523 test_reservation_exclusive_access_regs_only_and_all_regs) == NULL || 1524 CU_add_test(suite, "reservation_notification_log_page", 1525 test_reservation_notification_log_page) == NULL || 1526 CU_add_test(suite, "get_dif_ctx", test_get_dif_ctx) == NULL || 1527 CU_add_test(suite, "set_get_features", test_set_get_features) == NULL || 1528 CU_add_test(suite, "identify_ctrlr", test_identify_ctrlr) == NULL || 1529 CU_add_test(suite, "custom_admin_cmd", test_custom_admin_cmd) == NULL || 1530 CU_add_test(suite, "fused_compare_and_write", test_fused_compare_and_write) == NULL) { 1531 CU_cleanup_registry(); 1532 return CU_get_error(); 1533 } 1534 1535 allocate_threads(1); 1536 set_thread(0); 1537 1538 CU_basic_set_mode(CU_BRM_VERBOSE); 1539 CU_basic_run_tests(); 1540 num_failures = CU_get_number_of_failures(); 1541 CU_cleanup_registry(); 1542 1543 free_threads(); 1544 1545 return num_failures; 1546 } 1547