1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 51 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 52 53 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 54 struct spdk_nvmf_subsystem *, 55 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 56 NULL); 57 58 DEFINE_STUB(spdk_nvmf_poll_group_create, 59 struct spdk_nvmf_poll_group *, 60 (struct spdk_nvmf_tgt *tgt), 61 NULL); 62 63 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 64 const char *, 65 (const struct spdk_nvmf_subsystem *subsystem), 66 subsystem_default_sn); 67 68 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 69 const char *, 70 (const struct spdk_nvmf_subsystem *subsystem), 71 subsystem_default_mn); 72 73 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 74 struct spdk_nvmf_ns *, 75 (struct spdk_nvmf_subsystem *subsystem), 76 NULL); 77 78 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 79 struct spdk_nvmf_ns *, 80 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 81 NULL); 82 83 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 84 bool, 85 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 86 true); 87 88 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr, 89 int, 90 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 91 0); 92 93 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr, 94 struct spdk_nvmf_ctrlr *, 95 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 96 NULL); 97 98 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported, 99 bool, 100 (struct spdk_nvmf_ctrlr *ctrlr), 101 false); 102 103 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported, 104 bool, 105 (struct spdk_nvmf_ctrlr *ctrlr), 106 false); 107 108 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page, 109 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 110 uint32_t iovcnt, uint64_t offset, uint32_t length)); 111 112 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 113 int, 114 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 115 0); 116 117 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 118 bool, 119 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvme_transport_id *trid), 120 true); 121 122 DEFINE_STUB(spdk_nvmf_transport_qpair_set_sqsize, 123 int, 124 (struct spdk_nvmf_qpair *qpair), 125 0); 126 127 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_read_cmd, 128 int, 129 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 130 struct spdk_nvmf_request *req), 131 0); 132 133 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_cmd, 134 int, 135 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 136 struct spdk_nvmf_request *req), 137 0); 138 139 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_zeroes_cmd, 140 int, 141 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 142 struct spdk_nvmf_request *req), 143 0); 144 145 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_flush_cmd, 146 int, 147 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 148 struct spdk_nvmf_request *req), 149 0); 150 151 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_dsm_cmd, 152 int, 153 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 154 struct spdk_nvmf_request *req), 155 0); 156 157 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_io, 158 int, 159 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 160 struct spdk_nvmf_request *req), 161 0); 162 163 DEFINE_STUB(spdk_nvmf_transport_req_complete, 164 int, 165 (struct spdk_nvmf_request *req), 166 0); 167 168 DEFINE_STUB_V(spdk_nvmf_ns_reservation_request, (void *ctx)); 169 170 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_get_dif_ctx, bool, 171 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 172 struct spdk_dif_ctx *dif_ctx), 173 true); 174 175 int 176 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 177 { 178 return 0; 179 } 180 181 void 182 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 183 bool dif_insert_or_strip) 184 { 185 uint64_t num_blocks; 186 187 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 188 num_blocks = ns->bdev->blockcnt; 189 nsdata->nsze = num_blocks; 190 nsdata->ncap = num_blocks; 191 nsdata->nuse = num_blocks; 192 nsdata->nlbaf = 0; 193 nsdata->flbas.format = 0; 194 nsdata->lbaf[0].lbads = spdk_u32log2(512); 195 } 196 197 static void 198 test_get_log_page(void) 199 { 200 struct spdk_nvmf_subsystem subsystem = {}; 201 struct spdk_nvmf_request req = {}; 202 struct spdk_nvmf_qpair qpair = {}; 203 struct spdk_nvmf_ctrlr ctrlr = {}; 204 union nvmf_h2c_msg cmd = {}; 205 union nvmf_c2h_msg rsp = {}; 206 char data[4096]; 207 208 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 209 210 ctrlr.subsys = &subsystem; 211 212 qpair.ctrlr = &ctrlr; 213 214 req.qpair = &qpair; 215 req.cmd = &cmd; 216 req.rsp = &rsp; 217 req.data = &data; 218 req.length = sizeof(data); 219 220 /* Get Log Page - all valid */ 221 memset(&cmd, 0, sizeof(cmd)); 222 memset(&rsp, 0, sizeof(rsp)); 223 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 224 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 225 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 226 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 227 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 228 229 /* Get Log Page with invalid log ID */ 230 memset(&cmd, 0, sizeof(cmd)); 231 memset(&rsp, 0, sizeof(rsp)); 232 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 233 cmd.nvme_cmd.cdw10 = 0; 234 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 235 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 236 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 237 238 /* Get Log Page with invalid offset (not dword aligned) */ 239 memset(&cmd, 0, sizeof(cmd)); 240 memset(&rsp, 0, sizeof(rsp)); 241 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 242 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 243 cmd.nvme_cmd.cdw12 = 2; 244 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 245 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 246 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 247 248 /* Get Log Page without data buffer */ 249 memset(&cmd, 0, sizeof(cmd)); 250 memset(&rsp, 0, sizeof(rsp)); 251 req.data = NULL; 252 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 253 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 254 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 255 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 256 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 257 req.data = data; 258 } 259 260 static void 261 test_process_fabrics_cmd(void) 262 { 263 struct spdk_nvmf_request req = {}; 264 int ret; 265 struct spdk_nvmf_qpair req_qpair = {}; 266 union nvmf_h2c_msg req_cmd = {}; 267 union nvmf_c2h_msg req_rsp = {}; 268 269 req.qpair = &req_qpair; 270 req.cmd = &req_cmd; 271 req.rsp = &req_rsp; 272 req.qpair->ctrlr = NULL; 273 274 /* No ctrlr and invalid command check */ 275 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 276 ret = spdk_nvmf_ctrlr_process_fabrics_cmd(&req); 277 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 278 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 279 } 280 281 static bool 282 nvme_status_success(const struct spdk_nvme_status *status) 283 { 284 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 285 } 286 287 static void 288 test_connect(void) 289 { 290 struct spdk_nvmf_fabric_connect_data connect_data; 291 struct spdk_nvmf_poll_group group; 292 struct spdk_nvmf_subsystem_poll_group *sgroups; 293 struct spdk_nvmf_transport transport; 294 struct spdk_nvmf_subsystem subsystem; 295 struct spdk_nvmf_request req; 296 struct spdk_nvmf_qpair admin_qpair; 297 struct spdk_nvmf_qpair qpair; 298 struct spdk_nvmf_qpair qpair2; 299 struct spdk_nvmf_ctrlr ctrlr; 300 struct spdk_nvmf_tgt tgt; 301 union nvmf_h2c_msg cmd; 302 union nvmf_c2h_msg rsp; 303 const uint8_t hostid[16] = { 304 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 305 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 306 }; 307 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 308 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 309 int rc; 310 311 memset(&group, 0, sizeof(group)); 312 group.thread = spdk_get_thread(); 313 314 memset(&ctrlr, 0, sizeof(ctrlr)); 315 ctrlr.subsys = &subsystem; 316 ctrlr.qpair_mask = spdk_bit_array_create(3); 317 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 318 ctrlr.vcprop.cc.bits.en = 1; 319 ctrlr.vcprop.cc.bits.iosqes = 6; 320 ctrlr.vcprop.cc.bits.iocqes = 4; 321 322 memset(&admin_qpair, 0, sizeof(admin_qpair)); 323 admin_qpair.group = &group; 324 325 memset(&tgt, 0, sizeof(tgt)); 326 memset(&transport, 0, sizeof(transport)); 327 transport.opts.max_aq_depth = 32; 328 transport.opts.max_queue_depth = 64; 329 transport.opts.max_qpairs_per_ctrlr = 3; 330 transport.tgt = &tgt; 331 332 memset(&qpair, 0, sizeof(qpair)); 333 qpair.transport = &transport; 334 qpair.group = &group; 335 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 336 TAILQ_INIT(&qpair.outstanding); 337 338 memset(&connect_data, 0, sizeof(connect_data)); 339 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 340 connect_data.cntlid = 0xFFFF; 341 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 342 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 343 344 memset(&subsystem, 0, sizeof(subsystem)); 345 subsystem.thread = spdk_get_thread(); 346 subsystem.id = 1; 347 TAILQ_INIT(&subsystem.ctrlrs); 348 subsystem.tgt = &tgt; 349 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 350 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 351 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 352 353 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 354 sgroups[subsystem.id].io_outstanding = 5; 355 group.sgroups = sgroups; 356 357 memset(&cmd, 0, sizeof(cmd)); 358 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 359 cmd.connect_cmd.cid = 1; 360 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 361 cmd.connect_cmd.recfmt = 0; 362 cmd.connect_cmd.qid = 0; 363 cmd.connect_cmd.sqsize = 31; 364 cmd.connect_cmd.cattr = 0; 365 cmd.connect_cmd.kato = 120000; 366 367 memset(&req, 0, sizeof(req)); 368 req.qpair = &qpair; 369 req.length = sizeof(connect_data); 370 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 371 req.data = &connect_data; 372 req.cmd = &cmd; 373 req.rsp = &rsp; 374 375 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 376 MOCK_SET(spdk_nvmf_poll_group_create, &group); 377 378 /* Valid admin connect command */ 379 memset(&rsp, 0, sizeof(rsp)); 380 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 381 rc = spdk_nvmf_ctrlr_connect(&req); 382 poll_threads(); 383 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 384 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 385 CU_ASSERT(qpair.ctrlr != NULL); 386 spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 387 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 388 free(qpair.ctrlr); 389 qpair.ctrlr = NULL; 390 391 /* Valid admin connect command with kato = 0 */ 392 cmd.connect_cmd.kato = 0; 393 memset(&rsp, 0, sizeof(rsp)); 394 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 395 rc = spdk_nvmf_ctrlr_connect(&req); 396 poll_threads(); 397 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 398 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 399 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 400 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 401 free(qpair.ctrlr); 402 qpair.ctrlr = NULL; 403 cmd.connect_cmd.kato = 120000; 404 405 /* Invalid data length */ 406 memset(&rsp, 0, sizeof(rsp)); 407 req.length = sizeof(connect_data) - 1; 408 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 409 rc = spdk_nvmf_ctrlr_connect(&req); 410 poll_threads(); 411 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 412 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 413 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 414 CU_ASSERT(qpair.ctrlr == NULL); 415 req.length = sizeof(connect_data); 416 417 /* Invalid recfmt */ 418 memset(&rsp, 0, sizeof(rsp)); 419 cmd.connect_cmd.recfmt = 1234; 420 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 421 rc = spdk_nvmf_ctrlr_connect(&req); 422 poll_threads(); 423 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 424 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 425 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 426 CU_ASSERT(qpair.ctrlr == NULL); 427 cmd.connect_cmd.recfmt = 0; 428 429 /* Unterminated subnqn */ 430 memset(&rsp, 0, sizeof(rsp)); 431 memset(connect_data.subnqn, 'a', sizeof(connect_data.subnqn)); 432 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 433 rc = spdk_nvmf_ctrlr_connect(&req); 434 poll_threads(); 435 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 436 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 437 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 438 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 439 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 440 CU_ASSERT(qpair.ctrlr == NULL); 441 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 442 443 /* Subsystem not found */ 444 memset(&rsp, 0, sizeof(rsp)); 445 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 446 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 447 rc = spdk_nvmf_ctrlr_connect(&req); 448 poll_threads(); 449 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 450 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 451 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 452 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 453 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 454 CU_ASSERT(qpair.ctrlr == NULL); 455 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 456 457 /* Unterminated hostnqn */ 458 memset(&rsp, 0, sizeof(rsp)); 459 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 460 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 461 rc = spdk_nvmf_ctrlr_connect(&req); 462 poll_threads(); 463 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 464 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 465 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 466 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 467 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 468 CU_ASSERT(qpair.ctrlr == NULL); 469 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 470 471 /* Host not allowed */ 472 memset(&rsp, 0, sizeof(rsp)); 473 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 474 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 475 rc = spdk_nvmf_ctrlr_connect(&req); 476 poll_threads(); 477 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 478 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 479 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 480 CU_ASSERT(qpair.ctrlr == NULL); 481 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 482 483 /* Invalid sqsize == 0 */ 484 memset(&rsp, 0, sizeof(rsp)); 485 cmd.connect_cmd.sqsize = 0; 486 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 487 rc = spdk_nvmf_ctrlr_connect(&req); 488 poll_threads(); 489 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 490 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 491 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 492 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 493 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 494 CU_ASSERT(qpair.ctrlr == NULL); 495 cmd.connect_cmd.sqsize = 31; 496 497 /* Invalid admin sqsize > max_aq_depth */ 498 memset(&rsp, 0, sizeof(rsp)); 499 cmd.connect_cmd.sqsize = 32; 500 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 501 rc = spdk_nvmf_ctrlr_connect(&req); 502 poll_threads(); 503 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 504 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 505 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 506 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 507 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 508 CU_ASSERT(qpair.ctrlr == NULL); 509 cmd.connect_cmd.sqsize = 31; 510 511 /* Invalid I/O sqsize > max_queue_depth */ 512 memset(&rsp, 0, sizeof(rsp)); 513 cmd.connect_cmd.qid = 1; 514 cmd.connect_cmd.sqsize = 64; 515 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 516 rc = spdk_nvmf_ctrlr_connect(&req); 517 poll_threads(); 518 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 519 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 520 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 521 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 522 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 523 CU_ASSERT(qpair.ctrlr == NULL); 524 cmd.connect_cmd.qid = 0; 525 cmd.connect_cmd.sqsize = 31; 526 527 /* Invalid cntlid for admin queue */ 528 memset(&rsp, 0, sizeof(rsp)); 529 connect_data.cntlid = 0x1234; 530 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 531 rc = spdk_nvmf_ctrlr_connect(&req); 532 poll_threads(); 533 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 534 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 535 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 536 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 537 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 538 CU_ASSERT(qpair.ctrlr == NULL); 539 connect_data.cntlid = 0xFFFF; 540 541 ctrlr.admin_qpair = &admin_qpair; 542 ctrlr.subsys = &subsystem; 543 544 /* Valid I/O queue connect command */ 545 memset(&rsp, 0, sizeof(rsp)); 546 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 547 cmd.connect_cmd.qid = 1; 548 cmd.connect_cmd.sqsize = 63; 549 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 550 rc = spdk_nvmf_ctrlr_connect(&req); 551 poll_threads(); 552 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 553 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 554 CU_ASSERT(qpair.ctrlr == &ctrlr); 555 qpair.ctrlr = NULL; 556 cmd.connect_cmd.sqsize = 31; 557 558 /* Non-existent controller */ 559 memset(&rsp, 0, sizeof(rsp)); 560 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, NULL); 561 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 562 rc = spdk_nvmf_ctrlr_connect(&req); 563 poll_threads(); 564 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 565 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 566 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 567 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 568 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 569 CU_ASSERT(qpair.ctrlr == NULL); 570 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 571 572 /* I/O connect to discovery controller */ 573 memset(&rsp, 0, sizeof(rsp)); 574 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 575 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 576 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 577 rc = spdk_nvmf_ctrlr_connect(&req); 578 poll_threads(); 579 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 580 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 581 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 582 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 583 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 584 CU_ASSERT(qpair.ctrlr == NULL); 585 586 /* I/O connect to discovery controller keep-alive-timeout should be 0 */ 587 cmd.connect_cmd.qid = 0; 588 memset(&rsp, 0, sizeof(rsp)); 589 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 590 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 591 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 592 rc = spdk_nvmf_ctrlr_connect(&req); 593 poll_threads(); 594 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 595 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 596 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 597 CU_ASSERT(qpair.ctrlr == NULL); 598 cmd.connect_cmd.qid = 1; 599 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 600 601 /* I/O connect to disabled controller */ 602 memset(&rsp, 0, sizeof(rsp)); 603 ctrlr.vcprop.cc.bits.en = 0; 604 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 605 rc = spdk_nvmf_ctrlr_connect(&req); 606 poll_threads(); 607 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 608 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 609 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 610 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 611 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 612 CU_ASSERT(qpair.ctrlr == NULL); 613 ctrlr.vcprop.cc.bits.en = 1; 614 615 /* I/O connect with invalid IOSQES */ 616 memset(&rsp, 0, sizeof(rsp)); 617 ctrlr.vcprop.cc.bits.iosqes = 3; 618 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 619 rc = spdk_nvmf_ctrlr_connect(&req); 620 poll_threads(); 621 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 622 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 623 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 624 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 625 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 626 CU_ASSERT(qpair.ctrlr == NULL); 627 ctrlr.vcprop.cc.bits.iosqes = 6; 628 629 /* I/O connect with invalid IOCQES */ 630 memset(&rsp, 0, sizeof(rsp)); 631 ctrlr.vcprop.cc.bits.iocqes = 3; 632 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 633 rc = spdk_nvmf_ctrlr_connect(&req); 634 poll_threads(); 635 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 636 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 637 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 638 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 639 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 640 CU_ASSERT(qpair.ctrlr == NULL); 641 ctrlr.vcprop.cc.bits.iocqes = 4; 642 643 /* I/O connect with too many existing qpairs */ 644 memset(&rsp, 0, sizeof(rsp)); 645 spdk_bit_array_set(ctrlr.qpair_mask, 0); 646 spdk_bit_array_set(ctrlr.qpair_mask, 1); 647 spdk_bit_array_set(ctrlr.qpair_mask, 2); 648 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 649 rc = spdk_nvmf_ctrlr_connect(&req); 650 poll_threads(); 651 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 652 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 653 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 654 CU_ASSERT(qpair.ctrlr == NULL); 655 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 656 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 657 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 658 659 /* I/O connect with duplicate queue ID */ 660 memset(&rsp, 0, sizeof(rsp)); 661 memset(&qpair2, 0, sizeof(qpair2)); 662 qpair2.group = &group; 663 qpair2.qid = 1; 664 spdk_bit_array_set(ctrlr.qpair_mask, 1); 665 cmd.connect_cmd.qid = 1; 666 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 667 rc = spdk_nvmf_ctrlr_connect(&req); 668 poll_threads(); 669 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 670 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 671 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 672 CU_ASSERT(qpair.ctrlr == NULL); 673 674 /* Clean up globals */ 675 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 676 MOCK_CLEAR(spdk_nvmf_poll_group_create); 677 678 spdk_bit_array_free(&ctrlr.qpair_mask); 679 free(sgroups); 680 } 681 682 static void 683 test_get_ns_id_desc_list(void) 684 { 685 struct spdk_nvmf_subsystem subsystem; 686 struct spdk_nvmf_qpair qpair; 687 struct spdk_nvmf_ctrlr ctrlr; 688 struct spdk_nvmf_request req; 689 struct spdk_nvmf_ns *ns_ptrs[1]; 690 struct spdk_nvmf_ns ns; 691 union nvmf_h2c_msg cmd; 692 union nvmf_c2h_msg rsp; 693 struct spdk_bdev bdev; 694 uint8_t buf[4096]; 695 696 memset(&subsystem, 0, sizeof(subsystem)); 697 ns_ptrs[0] = &ns; 698 subsystem.ns = ns_ptrs; 699 subsystem.max_nsid = 1; 700 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 701 702 memset(&ns, 0, sizeof(ns)); 703 ns.opts.nsid = 1; 704 ns.bdev = &bdev; 705 706 memset(&qpair, 0, sizeof(qpair)); 707 qpair.ctrlr = &ctrlr; 708 709 memset(&ctrlr, 0, sizeof(ctrlr)); 710 ctrlr.subsys = &subsystem; 711 ctrlr.vcprop.cc.bits.en = 1; 712 713 memset(&req, 0, sizeof(req)); 714 req.qpair = &qpair; 715 req.cmd = &cmd; 716 req.rsp = &rsp; 717 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 718 req.data = buf; 719 req.length = sizeof(buf); 720 721 memset(&cmd, 0, sizeof(cmd)); 722 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 723 cmd.nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 724 725 /* Invalid NSID */ 726 cmd.nvme_cmd.nsid = 0; 727 memset(&rsp, 0, sizeof(rsp)); 728 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 729 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 730 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 731 732 /* Valid NSID, but ns has no IDs defined */ 733 cmd.nvme_cmd.nsid = 1; 734 memset(&rsp, 0, sizeof(rsp)); 735 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 736 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 737 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 738 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 739 740 /* Valid NSID, only EUI64 defined */ 741 ns.opts.eui64[0] = 0x11; 742 ns.opts.eui64[7] = 0xFF; 743 memset(&rsp, 0, sizeof(rsp)); 744 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 745 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 746 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 747 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 748 CU_ASSERT(buf[1] == 8); 749 CU_ASSERT(buf[4] == 0x11); 750 CU_ASSERT(buf[11] == 0xFF); 751 CU_ASSERT(buf[13] == 0); 752 753 /* Valid NSID, only NGUID defined */ 754 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 755 ns.opts.nguid[0] = 0x22; 756 ns.opts.nguid[15] = 0xEE; 757 memset(&rsp, 0, sizeof(rsp)); 758 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 759 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 760 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 761 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 762 CU_ASSERT(buf[1] == 16); 763 CU_ASSERT(buf[4] == 0x22); 764 CU_ASSERT(buf[19] == 0xEE); 765 CU_ASSERT(buf[21] == 0); 766 767 /* Valid NSID, both EUI64 and NGUID defined */ 768 ns.opts.eui64[0] = 0x11; 769 ns.opts.eui64[7] = 0xFF; 770 ns.opts.nguid[0] = 0x22; 771 ns.opts.nguid[15] = 0xEE; 772 memset(&rsp, 0, sizeof(rsp)); 773 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 774 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 775 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 776 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 777 CU_ASSERT(buf[1] == 8); 778 CU_ASSERT(buf[4] == 0x11); 779 CU_ASSERT(buf[11] == 0xFF); 780 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 781 CU_ASSERT(buf[13] == 16); 782 CU_ASSERT(buf[16] == 0x22); 783 CU_ASSERT(buf[31] == 0xEE); 784 CU_ASSERT(buf[33] == 0); 785 786 /* Valid NSID, EUI64, NGUID, and UUID defined */ 787 ns.opts.eui64[0] = 0x11; 788 ns.opts.eui64[7] = 0xFF; 789 ns.opts.nguid[0] = 0x22; 790 ns.opts.nguid[15] = 0xEE; 791 ns.opts.uuid.u.raw[0] = 0x33; 792 ns.opts.uuid.u.raw[15] = 0xDD; 793 memset(&rsp, 0, sizeof(rsp)); 794 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 795 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 796 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 797 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 798 CU_ASSERT(buf[1] == 8); 799 CU_ASSERT(buf[4] == 0x11); 800 CU_ASSERT(buf[11] == 0xFF); 801 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 802 CU_ASSERT(buf[13] == 16); 803 CU_ASSERT(buf[16] == 0x22); 804 CU_ASSERT(buf[31] == 0xEE); 805 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 806 CU_ASSERT(buf[33] == 16); 807 CU_ASSERT(buf[36] == 0x33); 808 CU_ASSERT(buf[51] == 0xDD); 809 CU_ASSERT(buf[53] == 0); 810 } 811 812 static void 813 test_identify_ns(void) 814 { 815 struct spdk_nvmf_subsystem subsystem = {}; 816 struct spdk_nvmf_transport transport = {}; 817 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 818 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 819 struct spdk_nvme_cmd cmd = {}; 820 struct spdk_nvme_cpl rsp = {}; 821 struct spdk_nvme_ns_data nsdata = {}; 822 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 823 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 824 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 825 826 subsystem.ns = ns_arr; 827 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 828 829 /* Invalid NSID 0 */ 830 cmd.nsid = 0; 831 memset(&nsdata, 0, sizeof(nsdata)); 832 memset(&rsp, 0, sizeof(rsp)); 833 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 834 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 835 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 836 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 837 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 838 839 /* Valid NSID 1 */ 840 cmd.nsid = 1; 841 memset(&nsdata, 0, sizeof(nsdata)); 842 memset(&rsp, 0, sizeof(rsp)); 843 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 844 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 845 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 846 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 847 CU_ASSERT(nsdata.nsze == 1234); 848 849 /* Valid but inactive NSID 2 */ 850 cmd.nsid = 2; 851 memset(&nsdata, 0, sizeof(nsdata)); 852 memset(&rsp, 0, sizeof(rsp)); 853 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 854 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 855 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 856 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 857 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 858 859 /* Valid NSID 3 */ 860 cmd.nsid = 3; 861 memset(&nsdata, 0, sizeof(nsdata)); 862 memset(&rsp, 0, sizeof(rsp)); 863 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 864 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 865 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 866 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 867 CU_ASSERT(nsdata.nsze == 5678); 868 869 /* Invalid NSID 4 */ 870 cmd.nsid = 4; 871 memset(&nsdata, 0, sizeof(nsdata)); 872 memset(&rsp, 0, sizeof(rsp)); 873 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 874 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 875 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 876 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 877 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 878 879 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 880 cmd.nsid = 0xFFFFFFFF; 881 memset(&nsdata, 0, sizeof(nsdata)); 882 memset(&rsp, 0, sizeof(rsp)); 883 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 884 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 885 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 886 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 887 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 888 } 889 890 static void 891 test_set_get_features(void) 892 { 893 struct spdk_nvmf_subsystem subsystem = {}; 894 struct spdk_nvmf_qpair admin_qpair = {}; 895 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 896 union nvmf_h2c_msg cmd = {}; 897 union nvmf_c2h_msg rsp = {}; 898 struct spdk_nvmf_ns ns[3]; 899 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};; 900 struct spdk_nvmf_request req; 901 int rc; 902 903 subsystem.ns = ns_arr; 904 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 905 admin_qpair.ctrlr = &ctrlr; 906 req.qpair = &admin_qpair; 907 cmd.nvme_cmd.nsid = 1; 908 req.cmd = &cmd; 909 req.rsp = &rsp; 910 911 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 912 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 913 cmd.nvme_cmd.cdw11 = 0x1u; 914 ns[0].ptpl_file = "testcfg"; 915 rc = spdk_nvmf_ctrlr_set_features_reservation_persistence(&req); 916 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 917 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 918 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 919 CU_ASSERT(ns[0].ptpl_activated == true); 920 921 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 922 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 923 cmd.nvme_cmd.cdw10 = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 924 rc = spdk_nvmf_ctrlr_get_features_reservation_persistence(&req); 925 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 926 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 927 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 928 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 929 } 930 931 /* 932 * Reservation Unit Test Configuration 933 * -------- -------- -------- 934 * | Host A | | Host B | | Host C | 935 * -------- -------- -------- 936 * / \ | | 937 * -------- -------- ------- ------- 938 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 939 * -------- -------- ------- ------- 940 * \ \ / / 941 * \ \ / / 942 * \ \ / / 943 * -------------------------------------- 944 * | NAMESPACE 1 | 945 * -------------------------------------- 946 */ 947 948 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 949 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 950 951 static void 952 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 953 { 954 /* Host A has two controllers */ 955 spdk_uuid_generate(&g_ctrlr1_A.hostid); 956 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 957 958 /* Host B has 1 controller */ 959 spdk_uuid_generate(&g_ctrlr_B.hostid); 960 961 /* Host C has 1 controller */ 962 spdk_uuid_generate(&g_ctrlr_C.hostid); 963 964 memset(&g_ns_info, 0, sizeof(g_ns_info)); 965 g_ns_info.rtype = rtype; 966 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 967 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 968 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 969 } 970 971 static void 972 test_reservation_write_exclusive(void) 973 { 974 struct spdk_nvmf_request req = {}; 975 union nvmf_h2c_msg cmd = {}; 976 union nvmf_c2h_msg rsp = {}; 977 int rc; 978 979 req.cmd = &cmd; 980 req.rsp = &rsp; 981 982 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 983 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 984 g_ns_info.holder_id = g_ctrlr1_A.hostid; 985 986 /* Test Case: Issue a Read command from Host A and Host B */ 987 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 988 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 989 SPDK_CU_ASSERT_FATAL(rc == 0); 990 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 991 SPDK_CU_ASSERT_FATAL(rc == 0); 992 993 /* Test Case: Issue a DSM Write command from Host A and Host B */ 994 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 995 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 996 SPDK_CU_ASSERT_FATAL(rc == 0); 997 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 998 SPDK_CU_ASSERT_FATAL(rc < 0); 999 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1000 1001 /* Test Case: Issue a Write command from Host C */ 1002 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1003 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1004 SPDK_CU_ASSERT_FATAL(rc < 0); 1005 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1006 1007 /* Test Case: Issue a Read command from Host B */ 1008 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1009 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1010 SPDK_CU_ASSERT_FATAL(rc == 0); 1011 1012 /* Unregister Host C */ 1013 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1014 1015 /* Test Case: Read and Write commands from non-registrant Host C */ 1016 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1017 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1018 SPDK_CU_ASSERT_FATAL(rc < 0); 1019 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1020 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1021 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1022 SPDK_CU_ASSERT_FATAL(rc == 0); 1023 } 1024 1025 static void 1026 test_reservation_exclusive_access(void) 1027 { 1028 struct spdk_nvmf_request req = {}; 1029 union nvmf_h2c_msg cmd = {}; 1030 union nvmf_c2h_msg rsp = {}; 1031 int rc; 1032 1033 req.cmd = &cmd; 1034 req.rsp = &rsp; 1035 1036 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1037 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1038 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1039 1040 /* Test Case: Issue a Read command from Host B */ 1041 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1042 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1043 SPDK_CU_ASSERT_FATAL(rc < 0); 1044 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1045 1046 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1047 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1048 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1049 SPDK_CU_ASSERT_FATAL(rc == 0); 1050 } 1051 1052 static void 1053 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1054 { 1055 struct spdk_nvmf_request req = {}; 1056 union nvmf_h2c_msg cmd = {}; 1057 union nvmf_c2h_msg rsp = {}; 1058 int rc; 1059 1060 req.cmd = &cmd; 1061 req.rsp = &rsp; 1062 1063 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1064 ut_reservation_init(rtype); 1065 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1066 1067 /* Test Case: Issue a Read command from Host A and Host C */ 1068 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1069 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1070 SPDK_CU_ASSERT_FATAL(rc == 0); 1071 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1072 SPDK_CU_ASSERT_FATAL(rc == 0); 1073 1074 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1075 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1076 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1077 SPDK_CU_ASSERT_FATAL(rc == 0); 1078 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1079 SPDK_CU_ASSERT_FATAL(rc == 0); 1080 1081 /* Unregister Host C */ 1082 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1083 1084 /* Test Case: Read and Write commands from non-registrant Host C */ 1085 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1086 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1087 SPDK_CU_ASSERT_FATAL(rc == 0); 1088 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1089 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1090 SPDK_CU_ASSERT_FATAL(rc < 0); 1091 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1092 } 1093 1094 static void 1095 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1096 { 1097 _test_reservation_write_exclusive_regs_only_and_all_regs( 1098 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1099 _test_reservation_write_exclusive_regs_only_and_all_regs( 1100 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1101 } 1102 1103 static void 1104 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1105 { 1106 struct spdk_nvmf_request req = {}; 1107 union nvmf_h2c_msg cmd = {}; 1108 union nvmf_c2h_msg rsp = {}; 1109 int rc; 1110 1111 req.cmd = &cmd; 1112 req.rsp = &rsp; 1113 1114 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1115 ut_reservation_init(rtype); 1116 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1117 1118 /* Test Case: Issue a Write command from Host B */ 1119 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1120 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1121 SPDK_CU_ASSERT_FATAL(rc == 0); 1122 1123 /* Unregister Host B */ 1124 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1125 1126 /* Test Case: Issue a Read command from Host B */ 1127 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1128 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1129 SPDK_CU_ASSERT_FATAL(rc < 0); 1130 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1131 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1132 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1133 SPDK_CU_ASSERT_FATAL(rc < 0); 1134 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1135 } 1136 1137 static void 1138 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1139 { 1140 _test_reservation_exclusive_access_regs_only_and_all_regs( 1141 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1142 _test_reservation_exclusive_access_regs_only_and_all_regs( 1143 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1144 } 1145 1146 static void 1147 test_reservation_notification_log_page(void) 1148 { 1149 struct spdk_nvmf_ctrlr ctrlr; 1150 struct spdk_nvmf_qpair qpair; 1151 struct spdk_nvmf_ns ns; 1152 struct spdk_nvmf_request req; 1153 union nvmf_h2c_msg cmd; 1154 union nvmf_c2h_msg rsp = {{0}}; 1155 union spdk_nvme_async_event_completion event = {0}; 1156 struct spdk_nvme_reservation_notification_log logs[3]; 1157 1158 memset(&ctrlr, 0, sizeof(ctrlr)); 1159 ctrlr.thread = spdk_get_thread(); 1160 TAILQ_INIT(&ctrlr.log_head); 1161 ns.nsid = 1; 1162 1163 /* Test Case: Mask all the reservation notifications */ 1164 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1165 SPDK_NVME_RESERVATION_RELEASED_MASK | 1166 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1167 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1168 SPDK_NVME_REGISTRATION_PREEMPTED); 1169 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1170 SPDK_NVME_RESERVATION_RELEASED); 1171 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1172 SPDK_NVME_RESERVATION_PREEMPTED); 1173 poll_threads(); 1174 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1175 1176 /* Test Case: Unmask all the reservation notifications, 1177 * 3 log pages are generated, and AER was triggered. 1178 */ 1179 ns.mask = 0; 1180 ctrlr.num_avail_log_pages = 0; 1181 req.cmd = &cmd; 1182 req.rsp = &rsp; 1183 ctrlr.aer_req = &req; 1184 req.qpair = &qpair; 1185 TAILQ_INIT(&qpair.outstanding); 1186 qpair.ctrlr = NULL; 1187 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1188 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1189 1190 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1191 SPDK_NVME_REGISTRATION_PREEMPTED); 1192 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1193 SPDK_NVME_RESERVATION_RELEASED); 1194 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1195 SPDK_NVME_RESERVATION_PREEMPTED); 1196 poll_threads(); 1197 event.raw = rsp.nvme_cpl.cdw0; 1198 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1199 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1200 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1201 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1202 1203 /* Test Case: Get Log Page to clear the log pages */ 1204 spdk_nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs)); 1205 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1206 } 1207 1208 static void 1209 test_get_dif_ctx(void) 1210 { 1211 struct spdk_nvmf_subsystem subsystem = {}; 1212 struct spdk_nvmf_request req = {}; 1213 struct spdk_nvmf_qpair qpair = {}; 1214 struct spdk_nvmf_ctrlr ctrlr = {}; 1215 struct spdk_nvmf_ns ns = {}; 1216 struct spdk_nvmf_ns *_ns = NULL; 1217 struct spdk_bdev bdev = {}; 1218 union nvmf_h2c_msg cmd = {}; 1219 struct spdk_dif_ctx dif_ctx = {}; 1220 bool ret; 1221 1222 ctrlr.subsys = &subsystem; 1223 1224 qpair.ctrlr = &ctrlr; 1225 1226 req.qpair = &qpair; 1227 req.cmd = &cmd; 1228 1229 ns.bdev = &bdev; 1230 1231 ctrlr.dif_insert_or_strip = false; 1232 1233 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1234 CU_ASSERT(ret == false); 1235 1236 ctrlr.dif_insert_or_strip = true; 1237 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1238 1239 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1240 CU_ASSERT(ret == false); 1241 1242 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1243 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1244 1245 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1246 CU_ASSERT(ret == false); 1247 1248 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1249 1250 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1251 CU_ASSERT(ret == false); 1252 1253 qpair.qid = 1; 1254 1255 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1256 CU_ASSERT(ret == false); 1257 1258 cmd.nvme_cmd.nsid = 1; 1259 1260 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1261 CU_ASSERT(ret == false); 1262 1263 subsystem.max_nsid = 1; 1264 subsystem.ns = &_ns; 1265 subsystem.ns[0] = &ns; 1266 1267 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1268 CU_ASSERT(ret == false); 1269 1270 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1271 1272 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1273 CU_ASSERT(ret == true); 1274 } 1275 1276 static void 1277 test_identify_ctrlr(void) 1278 { 1279 struct spdk_nvmf_subsystem subsystem = { 1280 .subtype = SPDK_NVMF_SUBTYPE_NVME 1281 }; 1282 struct spdk_nvmf_transport_ops tops = {}; 1283 struct spdk_nvmf_transport transport = { 1284 .ops = &tops, 1285 .opts = { 1286 .in_capsule_data_size = 4096, 1287 }, 1288 }; 1289 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1290 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1291 struct spdk_nvme_ctrlr_data cdata = {}; 1292 uint32_t expected_ioccsz; 1293 1294 /* Check ioccsz, TCP transport */ 1295 tops.type = SPDK_NVME_TRANSPORT_TCP; 1296 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1297 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1298 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1299 1300 /* Check ioccsz, RDMA transport */ 1301 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1302 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1303 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1304 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1305 1306 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1307 tops.type = SPDK_NVME_TRANSPORT_TCP; 1308 ctrlr.dif_insert_or_strip = true; 1309 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1310 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1311 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1312 1313 /* Check ioccsz, RDMA transport with dif_insert_or_strip */ 1314 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1315 ctrlr.dif_insert_or_strip = true; 1316 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16; 1317 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1318 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1319 } 1320 1321 int main(int argc, char **argv) 1322 { 1323 CU_pSuite suite = NULL; 1324 unsigned int num_failures; 1325 1326 if (CU_initialize_registry() != CUE_SUCCESS) { 1327 return CU_get_error(); 1328 } 1329 1330 suite = CU_add_suite("nvmf", NULL, NULL); 1331 if (suite == NULL) { 1332 CU_cleanup_registry(); 1333 return CU_get_error(); 1334 } 1335 1336 if (CU_add_test(suite, "get_log_page", test_get_log_page) == NULL || 1337 CU_add_test(suite, "process_fabrics_cmd", test_process_fabrics_cmd) == NULL || 1338 CU_add_test(suite, "connect", test_connect) == NULL || 1339 CU_add_test(suite, "get_ns_id_desc_list", test_get_ns_id_desc_list) == NULL || 1340 CU_add_test(suite, "identify_ns", test_identify_ns) == NULL || 1341 CU_add_test(suite, "reservation_write_exclusive", test_reservation_write_exclusive) == NULL || 1342 CU_add_test(suite, "reservation_exclusive_access", test_reservation_exclusive_access) == NULL || 1343 CU_add_test(suite, "reservation_write_exclusive_regs_only_and_all_regs", 1344 test_reservation_write_exclusive_regs_only_and_all_regs) == NULL || 1345 CU_add_test(suite, "reservation_exclusive_access_regs_only_and_all_regs", 1346 test_reservation_exclusive_access_regs_only_and_all_regs) == NULL || 1347 CU_add_test(suite, "reservation_notification_log_page", 1348 test_reservation_notification_log_page) == NULL || 1349 CU_add_test(suite, "get_dif_ctx", test_get_dif_ctx) == NULL || 1350 CU_add_test(suite, "set_get_features", test_set_get_features) == NULL || 1351 CU_add_test(suite, "identify_ctrlr", test_identify_ctrlr) == NULL) { 1352 CU_cleanup_registry(); 1353 return CU_get_error(); 1354 } 1355 1356 allocate_threads(1); 1357 set_thread(0); 1358 1359 CU_basic_set_mode(CU_BRM_VERBOSE); 1360 CU_basic_run_tests(); 1361 num_failures = CU_get_number_of_failures(); 1362 CU_cleanup_registry(); 1363 1364 free_threads(); 1365 1366 return num_failures; 1367 } 1368