1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 51 struct spdk_nvmf_subsystem *, 52 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 53 NULL); 54 55 DEFINE_STUB(spdk_nvmf_poll_group_create, 56 struct spdk_nvmf_poll_group *, 57 (struct spdk_nvmf_tgt *tgt), 58 NULL); 59 60 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 61 const char *, 62 (const struct spdk_nvmf_subsystem *subsystem), 63 NULL); 64 65 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 66 const char *, 67 (const struct spdk_nvmf_subsystem *subsystem), 68 NULL); 69 70 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 71 struct spdk_nvmf_ns *, 72 (struct spdk_nvmf_subsystem *subsystem), 73 NULL); 74 75 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 76 struct spdk_nvmf_ns *, 77 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 78 NULL); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 81 bool, 82 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 83 true); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr, 86 int, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 88 0); 89 90 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr, 91 struct spdk_nvmf_ctrlr *, 92 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 93 NULL); 94 95 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported, 96 bool, 97 (struct spdk_nvmf_ctrlr *ctrlr), 98 false); 99 100 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported, 101 bool, 102 (struct spdk_nvmf_ctrlr *ctrlr), 103 false); 104 105 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page, 106 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 107 uint32_t iovcnt, uint64_t offset, uint32_t length)); 108 109 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 110 int, 111 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 112 0); 113 114 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 115 bool, 116 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvme_transport_id *trid), 117 true); 118 119 DEFINE_STUB(spdk_nvmf_transport_qpair_set_sqsize, 120 int, 121 (struct spdk_nvmf_qpair *qpair), 122 0); 123 124 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_read_cmd, 125 int, 126 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 127 struct spdk_nvmf_request *req), 128 0); 129 130 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_cmd, 131 int, 132 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 struct spdk_nvmf_request *req), 134 0); 135 136 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_zeroes_cmd, 137 int, 138 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 139 struct spdk_nvmf_request *req), 140 0); 141 142 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_flush_cmd, 143 int, 144 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 145 struct spdk_nvmf_request *req), 146 0); 147 148 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_dsm_cmd, 149 int, 150 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 151 struct spdk_nvmf_request *req), 152 0); 153 154 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_io, 155 int, 156 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 157 struct spdk_nvmf_request *req), 158 0); 159 160 DEFINE_STUB(spdk_nvmf_transport_req_complete, 161 int, 162 (struct spdk_nvmf_request *req), 163 0); 164 165 DEFINE_STUB_V(spdk_nvmf_ns_reservation_request, (void *ctx)); 166 167 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_get_dif_ctx, bool, 168 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 169 struct spdk_dif_ctx *dif_ctx), 170 true); 171 172 int 173 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 174 { 175 return 0; 176 } 177 178 void 179 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 180 bool dif_insert_or_strip) 181 { 182 uint64_t num_blocks; 183 184 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 185 num_blocks = ns->bdev->blockcnt; 186 nsdata->nsze = num_blocks; 187 nsdata->ncap = num_blocks; 188 nsdata->nuse = num_blocks; 189 nsdata->nlbaf = 0; 190 nsdata->flbas.format = 0; 191 nsdata->lbaf[0].lbads = spdk_u32log2(512); 192 } 193 194 static void 195 test_get_log_page(void) 196 { 197 struct spdk_nvmf_subsystem subsystem = {}; 198 struct spdk_nvmf_request req = {}; 199 struct spdk_nvmf_qpair qpair = {}; 200 struct spdk_nvmf_ctrlr ctrlr = {}; 201 union nvmf_h2c_msg cmd = {}; 202 union nvmf_c2h_msg rsp = {}; 203 char data[4096]; 204 205 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 206 207 ctrlr.subsys = &subsystem; 208 209 qpair.ctrlr = &ctrlr; 210 211 req.qpair = &qpair; 212 req.cmd = &cmd; 213 req.rsp = &rsp; 214 req.data = &data; 215 req.length = sizeof(data); 216 217 /* Get Log Page - all valid */ 218 memset(&cmd, 0, sizeof(cmd)); 219 memset(&rsp, 0, sizeof(rsp)); 220 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 221 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 222 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 223 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 224 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 225 226 /* Get Log Page with invalid log ID */ 227 memset(&cmd, 0, sizeof(cmd)); 228 memset(&rsp, 0, sizeof(rsp)); 229 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 230 cmd.nvme_cmd.cdw10 = 0; 231 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 232 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 233 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 234 235 /* Get Log Page with invalid offset (not dword aligned) */ 236 memset(&cmd, 0, sizeof(cmd)); 237 memset(&rsp, 0, sizeof(rsp)); 238 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 239 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 240 cmd.nvme_cmd.cdw12 = 2; 241 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 242 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 243 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 244 245 /* Get Log Page without data buffer */ 246 memset(&cmd, 0, sizeof(cmd)); 247 memset(&rsp, 0, sizeof(rsp)); 248 req.data = NULL; 249 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 250 cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16; 251 CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 252 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 253 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 254 req.data = data; 255 } 256 257 static void 258 test_process_fabrics_cmd(void) 259 { 260 struct spdk_nvmf_request req = {}; 261 int ret; 262 struct spdk_nvmf_qpair req_qpair = {}; 263 union nvmf_h2c_msg req_cmd = {}; 264 union nvmf_c2h_msg req_rsp = {}; 265 266 req.qpair = &req_qpair; 267 req.cmd = &req_cmd; 268 req.rsp = &req_rsp; 269 req.qpair->ctrlr = NULL; 270 271 /* No ctrlr and invalid command check */ 272 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 273 ret = spdk_nvmf_ctrlr_process_fabrics_cmd(&req); 274 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 275 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 276 } 277 278 static bool 279 nvme_status_success(const struct spdk_nvme_status *status) 280 { 281 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 282 } 283 284 static void 285 test_connect(void) 286 { 287 struct spdk_nvmf_fabric_connect_data connect_data; 288 struct spdk_nvmf_poll_group group; 289 struct spdk_nvmf_subsystem_poll_group *sgroups; 290 struct spdk_nvmf_transport transport; 291 struct spdk_nvmf_subsystem subsystem; 292 struct spdk_nvmf_request req; 293 struct spdk_nvmf_qpair admin_qpair; 294 struct spdk_nvmf_qpair qpair; 295 struct spdk_nvmf_qpair qpair2; 296 struct spdk_nvmf_ctrlr ctrlr; 297 struct spdk_nvmf_tgt tgt; 298 union nvmf_h2c_msg cmd; 299 union nvmf_c2h_msg rsp; 300 const uint8_t hostid[16] = { 301 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 302 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 303 }; 304 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 305 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 306 int rc; 307 308 memset(&group, 0, sizeof(group)); 309 group.thread = spdk_get_thread(); 310 311 memset(&ctrlr, 0, sizeof(ctrlr)); 312 ctrlr.subsys = &subsystem; 313 ctrlr.qpair_mask = spdk_bit_array_create(3); 314 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 315 ctrlr.vcprop.cc.bits.en = 1; 316 ctrlr.vcprop.cc.bits.iosqes = 6; 317 ctrlr.vcprop.cc.bits.iocqes = 4; 318 319 memset(&admin_qpair, 0, sizeof(admin_qpair)); 320 admin_qpair.group = &group; 321 322 memset(&tgt, 0, sizeof(tgt)); 323 memset(&transport, 0, sizeof(transport)); 324 transport.opts.max_aq_depth = 32; 325 transport.opts.max_queue_depth = 64; 326 transport.opts.max_qpairs_per_ctrlr = 3; 327 transport.tgt = &tgt; 328 329 memset(&qpair, 0, sizeof(qpair)); 330 qpair.transport = &transport; 331 qpair.group = &group; 332 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 333 TAILQ_INIT(&qpair.outstanding); 334 335 memset(&connect_data, 0, sizeof(connect_data)); 336 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 337 connect_data.cntlid = 0xFFFF; 338 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 339 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 340 341 memset(&subsystem, 0, sizeof(subsystem)); 342 subsystem.thread = spdk_get_thread(); 343 subsystem.id = 1; 344 TAILQ_INIT(&subsystem.ctrlrs); 345 subsystem.tgt = &tgt; 346 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 347 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 348 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 349 350 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 351 sgroups[subsystem.id].io_outstanding = 5; 352 group.sgroups = sgroups; 353 354 memset(&cmd, 0, sizeof(cmd)); 355 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 356 cmd.connect_cmd.cid = 1; 357 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 358 cmd.connect_cmd.recfmt = 0; 359 cmd.connect_cmd.qid = 0; 360 cmd.connect_cmd.sqsize = 31; 361 cmd.connect_cmd.cattr = 0; 362 cmd.connect_cmd.kato = 120000; 363 364 memset(&req, 0, sizeof(req)); 365 req.qpair = &qpair; 366 req.length = sizeof(connect_data); 367 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 368 req.data = &connect_data; 369 req.cmd = &cmd; 370 req.rsp = &rsp; 371 372 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 373 MOCK_SET(spdk_nvmf_poll_group_create, &group); 374 375 /* Valid admin connect command */ 376 memset(&rsp, 0, sizeof(rsp)); 377 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 378 rc = spdk_nvmf_ctrlr_connect(&req); 379 poll_threads(); 380 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 381 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 382 CU_ASSERT(qpair.ctrlr != NULL); 383 spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 384 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 385 free(qpair.ctrlr); 386 qpair.ctrlr = NULL; 387 388 /* Valid admin connect command with kato = 0 */ 389 cmd.connect_cmd.kato = 0; 390 memset(&rsp, 0, sizeof(rsp)); 391 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 392 rc = spdk_nvmf_ctrlr_connect(&req); 393 poll_threads(); 394 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 395 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 396 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 397 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 398 free(qpair.ctrlr); 399 qpair.ctrlr = NULL; 400 cmd.connect_cmd.kato = 120000; 401 402 /* Invalid data length */ 403 memset(&rsp, 0, sizeof(rsp)); 404 req.length = sizeof(connect_data) - 1; 405 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 406 rc = spdk_nvmf_ctrlr_connect(&req); 407 poll_threads(); 408 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 409 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 410 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 411 CU_ASSERT(qpair.ctrlr == NULL); 412 req.length = sizeof(connect_data); 413 414 /* Invalid recfmt */ 415 memset(&rsp, 0, sizeof(rsp)); 416 cmd.connect_cmd.recfmt = 1234; 417 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 418 rc = spdk_nvmf_ctrlr_connect(&req); 419 poll_threads(); 420 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 421 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 422 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 423 CU_ASSERT(qpair.ctrlr == NULL); 424 cmd.connect_cmd.recfmt = 0; 425 426 /* Unterminated subnqn */ 427 memset(&rsp, 0, sizeof(rsp)); 428 memset(connect_data.subnqn, 'a', sizeof(connect_data.subnqn)); 429 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 430 rc = spdk_nvmf_ctrlr_connect(&req); 431 poll_threads(); 432 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 433 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 434 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 435 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 436 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 437 CU_ASSERT(qpair.ctrlr == NULL); 438 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 439 440 /* Subsystem not found */ 441 memset(&rsp, 0, sizeof(rsp)); 442 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 443 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 444 rc = spdk_nvmf_ctrlr_connect(&req); 445 poll_threads(); 446 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 447 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 448 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 449 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 450 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 451 CU_ASSERT(qpair.ctrlr == NULL); 452 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 453 454 /* Unterminated hostnqn */ 455 memset(&rsp, 0, sizeof(rsp)); 456 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 457 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 458 rc = spdk_nvmf_ctrlr_connect(&req); 459 poll_threads(); 460 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 461 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 462 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 463 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 464 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 465 CU_ASSERT(qpair.ctrlr == NULL); 466 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 467 468 /* Host not allowed */ 469 memset(&rsp, 0, sizeof(rsp)); 470 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 471 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 472 rc = spdk_nvmf_ctrlr_connect(&req); 473 poll_threads(); 474 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 475 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 476 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 477 CU_ASSERT(qpair.ctrlr == NULL); 478 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 479 480 /* Invalid sqsize == 0 */ 481 memset(&rsp, 0, sizeof(rsp)); 482 cmd.connect_cmd.sqsize = 0; 483 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 484 rc = spdk_nvmf_ctrlr_connect(&req); 485 poll_threads(); 486 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 487 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 488 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 489 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 490 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 491 CU_ASSERT(qpair.ctrlr == NULL); 492 cmd.connect_cmd.sqsize = 31; 493 494 /* Invalid admin sqsize > max_aq_depth */ 495 memset(&rsp, 0, sizeof(rsp)); 496 cmd.connect_cmd.sqsize = 32; 497 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 498 rc = spdk_nvmf_ctrlr_connect(&req); 499 poll_threads(); 500 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 501 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 502 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 503 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 504 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 505 CU_ASSERT(qpair.ctrlr == NULL); 506 cmd.connect_cmd.sqsize = 31; 507 508 /* Invalid I/O sqsize > max_queue_depth */ 509 memset(&rsp, 0, sizeof(rsp)); 510 cmd.connect_cmd.qid = 1; 511 cmd.connect_cmd.sqsize = 64; 512 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 513 rc = spdk_nvmf_ctrlr_connect(&req); 514 poll_threads(); 515 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 516 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 517 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 518 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 519 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 520 CU_ASSERT(qpair.ctrlr == NULL); 521 cmd.connect_cmd.qid = 0; 522 cmd.connect_cmd.sqsize = 31; 523 524 /* Invalid cntlid for admin queue */ 525 memset(&rsp, 0, sizeof(rsp)); 526 connect_data.cntlid = 0x1234; 527 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 528 rc = spdk_nvmf_ctrlr_connect(&req); 529 poll_threads(); 530 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 531 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 532 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 533 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 534 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 535 CU_ASSERT(qpair.ctrlr == NULL); 536 connect_data.cntlid = 0xFFFF; 537 538 ctrlr.admin_qpair = &admin_qpair; 539 ctrlr.subsys = &subsystem; 540 541 /* Valid I/O queue connect command */ 542 memset(&rsp, 0, sizeof(rsp)); 543 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 544 cmd.connect_cmd.qid = 1; 545 cmd.connect_cmd.sqsize = 63; 546 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 547 rc = spdk_nvmf_ctrlr_connect(&req); 548 poll_threads(); 549 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 550 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 551 CU_ASSERT(qpair.ctrlr == &ctrlr); 552 qpair.ctrlr = NULL; 553 cmd.connect_cmd.sqsize = 31; 554 555 /* Non-existent controller */ 556 memset(&rsp, 0, sizeof(rsp)); 557 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, NULL); 558 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 559 rc = spdk_nvmf_ctrlr_connect(&req); 560 poll_threads(); 561 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 562 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 563 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 564 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 565 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 566 CU_ASSERT(qpair.ctrlr == NULL); 567 MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr); 568 569 /* I/O connect to discovery controller */ 570 memset(&rsp, 0, sizeof(rsp)); 571 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 572 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 573 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 574 rc = spdk_nvmf_ctrlr_connect(&req); 575 poll_threads(); 576 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 577 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 578 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 579 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 580 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 581 CU_ASSERT(qpair.ctrlr == NULL); 582 583 /* I/O connect to discovery controller keep-alive-timeout should be 0 */ 584 cmd.connect_cmd.qid = 0; 585 memset(&rsp, 0, sizeof(rsp)); 586 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 587 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 588 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 589 rc = spdk_nvmf_ctrlr_connect(&req); 590 poll_threads(); 591 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 592 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 593 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 594 CU_ASSERT(qpair.ctrlr == NULL); 595 cmd.connect_cmd.qid = 1; 596 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 597 598 /* I/O connect to disabled controller */ 599 memset(&rsp, 0, sizeof(rsp)); 600 ctrlr.vcprop.cc.bits.en = 0; 601 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 602 rc = spdk_nvmf_ctrlr_connect(&req); 603 poll_threads(); 604 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 605 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 606 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 607 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 608 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 609 CU_ASSERT(qpair.ctrlr == NULL); 610 ctrlr.vcprop.cc.bits.en = 1; 611 612 /* I/O connect with invalid IOSQES */ 613 memset(&rsp, 0, sizeof(rsp)); 614 ctrlr.vcprop.cc.bits.iosqes = 3; 615 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 616 rc = spdk_nvmf_ctrlr_connect(&req); 617 poll_threads(); 618 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 619 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 620 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 621 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 622 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 623 CU_ASSERT(qpair.ctrlr == NULL); 624 ctrlr.vcprop.cc.bits.iosqes = 6; 625 626 /* I/O connect with invalid IOCQES */ 627 memset(&rsp, 0, sizeof(rsp)); 628 ctrlr.vcprop.cc.bits.iocqes = 3; 629 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 630 rc = spdk_nvmf_ctrlr_connect(&req); 631 poll_threads(); 632 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 633 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 634 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 635 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 636 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 637 CU_ASSERT(qpair.ctrlr == NULL); 638 ctrlr.vcprop.cc.bits.iocqes = 4; 639 640 /* I/O connect with too many existing qpairs */ 641 memset(&rsp, 0, sizeof(rsp)); 642 spdk_bit_array_set(ctrlr.qpair_mask, 0); 643 spdk_bit_array_set(ctrlr.qpair_mask, 1); 644 spdk_bit_array_set(ctrlr.qpair_mask, 2); 645 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 646 rc = spdk_nvmf_ctrlr_connect(&req); 647 poll_threads(); 648 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 649 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 650 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 651 CU_ASSERT(qpair.ctrlr == NULL); 652 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 653 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 654 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 655 656 /* I/O connect with duplicate queue ID */ 657 memset(&rsp, 0, sizeof(rsp)); 658 memset(&qpair2, 0, sizeof(qpair2)); 659 qpair2.group = &group; 660 qpair2.qid = 1; 661 spdk_bit_array_set(ctrlr.qpair_mask, 1); 662 cmd.connect_cmd.qid = 1; 663 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 664 rc = spdk_nvmf_ctrlr_connect(&req); 665 poll_threads(); 666 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 667 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 668 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 669 CU_ASSERT(qpair.ctrlr == NULL); 670 671 /* Clean up globals */ 672 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 673 MOCK_CLEAR(spdk_nvmf_poll_group_create); 674 675 spdk_bit_array_free(&ctrlr.qpair_mask); 676 free(sgroups); 677 } 678 679 static void 680 test_get_ns_id_desc_list(void) 681 { 682 struct spdk_nvmf_subsystem subsystem; 683 struct spdk_nvmf_qpair qpair; 684 struct spdk_nvmf_ctrlr ctrlr; 685 struct spdk_nvmf_request req; 686 struct spdk_nvmf_ns *ns_ptrs[1]; 687 struct spdk_nvmf_ns ns; 688 union nvmf_h2c_msg cmd; 689 union nvmf_c2h_msg rsp; 690 struct spdk_bdev bdev; 691 uint8_t buf[4096]; 692 693 memset(&subsystem, 0, sizeof(subsystem)); 694 ns_ptrs[0] = &ns; 695 subsystem.ns = ns_ptrs; 696 subsystem.max_nsid = 1; 697 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 698 699 memset(&ns, 0, sizeof(ns)); 700 ns.opts.nsid = 1; 701 ns.bdev = &bdev; 702 703 memset(&qpair, 0, sizeof(qpair)); 704 qpair.ctrlr = &ctrlr; 705 706 memset(&ctrlr, 0, sizeof(ctrlr)); 707 ctrlr.subsys = &subsystem; 708 ctrlr.vcprop.cc.bits.en = 1; 709 710 memset(&req, 0, sizeof(req)); 711 req.qpair = &qpair; 712 req.cmd = &cmd; 713 req.rsp = &rsp; 714 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 715 req.data = buf; 716 req.length = sizeof(buf); 717 718 memset(&cmd, 0, sizeof(cmd)); 719 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 720 cmd.nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 721 722 /* Invalid NSID */ 723 cmd.nvme_cmd.nsid = 0; 724 memset(&rsp, 0, sizeof(rsp)); 725 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 726 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 727 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 728 729 /* Valid NSID, but ns has no IDs defined */ 730 cmd.nvme_cmd.nsid = 1; 731 memset(&rsp, 0, sizeof(rsp)); 732 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 733 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 734 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 735 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 736 737 /* Valid NSID, only EUI64 defined */ 738 ns.opts.eui64[0] = 0x11; 739 ns.opts.eui64[7] = 0xFF; 740 memset(&rsp, 0, sizeof(rsp)); 741 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 742 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 743 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 744 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 745 CU_ASSERT(buf[1] == 8); 746 CU_ASSERT(buf[4] == 0x11); 747 CU_ASSERT(buf[11] == 0xFF); 748 CU_ASSERT(buf[13] == 0); 749 750 /* Valid NSID, only NGUID defined */ 751 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 752 ns.opts.nguid[0] = 0x22; 753 ns.opts.nguid[15] = 0xEE; 754 memset(&rsp, 0, sizeof(rsp)); 755 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 756 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 757 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 758 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 759 CU_ASSERT(buf[1] == 16); 760 CU_ASSERT(buf[4] == 0x22); 761 CU_ASSERT(buf[19] == 0xEE); 762 CU_ASSERT(buf[21] == 0); 763 764 /* Valid NSID, both EUI64 and NGUID defined */ 765 ns.opts.eui64[0] = 0x11; 766 ns.opts.eui64[7] = 0xFF; 767 ns.opts.nguid[0] = 0x22; 768 ns.opts.nguid[15] = 0xEE; 769 memset(&rsp, 0, sizeof(rsp)); 770 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 771 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 772 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 773 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 774 CU_ASSERT(buf[1] == 8); 775 CU_ASSERT(buf[4] == 0x11); 776 CU_ASSERT(buf[11] == 0xFF); 777 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 778 CU_ASSERT(buf[13] == 16); 779 CU_ASSERT(buf[16] == 0x22); 780 CU_ASSERT(buf[31] == 0xEE); 781 CU_ASSERT(buf[33] == 0); 782 783 /* Valid NSID, EUI64, NGUID, and UUID defined */ 784 ns.opts.eui64[0] = 0x11; 785 ns.opts.eui64[7] = 0xFF; 786 ns.opts.nguid[0] = 0x22; 787 ns.opts.nguid[15] = 0xEE; 788 ns.opts.uuid.u.raw[0] = 0x33; 789 ns.opts.uuid.u.raw[15] = 0xDD; 790 memset(&rsp, 0, sizeof(rsp)); 791 CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 792 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 793 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 794 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 795 CU_ASSERT(buf[1] == 8); 796 CU_ASSERT(buf[4] == 0x11); 797 CU_ASSERT(buf[11] == 0xFF); 798 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 799 CU_ASSERT(buf[13] == 16); 800 CU_ASSERT(buf[16] == 0x22); 801 CU_ASSERT(buf[31] == 0xEE); 802 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 803 CU_ASSERT(buf[33] == 16); 804 CU_ASSERT(buf[36] == 0x33); 805 CU_ASSERT(buf[51] == 0xDD); 806 CU_ASSERT(buf[53] == 0); 807 } 808 809 static void 810 test_identify_ns(void) 811 { 812 struct spdk_nvmf_subsystem subsystem = {}; 813 struct spdk_nvmf_transport transport = {}; 814 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 815 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 816 struct spdk_nvme_cmd cmd = {}; 817 struct spdk_nvme_cpl rsp = {}; 818 struct spdk_nvme_ns_data nsdata = {}; 819 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 820 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 821 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 822 823 subsystem.ns = ns_arr; 824 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 825 826 /* Invalid NSID 0 */ 827 cmd.nsid = 0; 828 memset(&nsdata, 0, sizeof(nsdata)); 829 memset(&rsp, 0, sizeof(rsp)); 830 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 831 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 832 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 833 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 834 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 835 836 /* Valid NSID 1 */ 837 cmd.nsid = 1; 838 memset(&nsdata, 0, sizeof(nsdata)); 839 memset(&rsp, 0, sizeof(rsp)); 840 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 841 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 842 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 843 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 844 CU_ASSERT(nsdata.nsze == 1234); 845 846 /* Valid but inactive NSID 2 */ 847 cmd.nsid = 2; 848 memset(&nsdata, 0, sizeof(nsdata)); 849 memset(&rsp, 0, sizeof(rsp)); 850 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 851 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 852 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 853 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 854 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 855 856 /* Valid NSID 3 */ 857 cmd.nsid = 3; 858 memset(&nsdata, 0, sizeof(nsdata)); 859 memset(&rsp, 0, sizeof(rsp)); 860 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 861 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 862 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 863 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 864 CU_ASSERT(nsdata.nsze == 5678); 865 866 /* Invalid NSID 4 */ 867 cmd.nsid = 4; 868 memset(&nsdata, 0, sizeof(nsdata)); 869 memset(&rsp, 0, sizeof(rsp)); 870 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 871 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 872 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 873 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 874 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 875 876 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 877 cmd.nsid = 0xFFFFFFFF; 878 memset(&nsdata, 0, sizeof(nsdata)); 879 memset(&rsp, 0, sizeof(rsp)); 880 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 881 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 882 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 883 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 884 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 885 } 886 887 static void 888 test_set_get_features(void) 889 { 890 struct spdk_nvmf_subsystem subsystem = {}; 891 struct spdk_nvmf_qpair admin_qpair = {}; 892 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 893 union nvmf_h2c_msg cmd = {}; 894 union nvmf_c2h_msg rsp = {}; 895 struct spdk_nvmf_ns ns[3]; 896 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};; 897 struct spdk_nvmf_request req; 898 int rc; 899 900 subsystem.ns = ns_arr; 901 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 902 admin_qpair.ctrlr = &ctrlr; 903 req.qpair = &admin_qpair; 904 cmd.nvme_cmd.nsid = 1; 905 req.cmd = &cmd; 906 req.rsp = &rsp; 907 908 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 909 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 910 cmd.nvme_cmd.cdw11 = 0x1u; 911 ns[0].ptpl_file = "testcfg"; 912 rc = spdk_nvmf_ctrlr_set_features_reservation_persistence(&req); 913 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 914 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 915 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 916 CU_ASSERT(ns[0].ptpl_activated == true); 917 918 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 919 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 920 cmd.nvme_cmd.cdw10 = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 921 rc = spdk_nvmf_ctrlr_get_features_reservation_persistence(&req); 922 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 923 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 924 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 925 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 926 } 927 928 /* 929 * Reservation Unit Test Configuration 930 * -------- -------- -------- 931 * | Host A | | Host B | | Host C | 932 * -------- -------- -------- 933 * / \ | | 934 * -------- -------- ------- ------- 935 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 936 * -------- -------- ------- ------- 937 * \ \ / / 938 * \ \ / / 939 * \ \ / / 940 * -------------------------------------- 941 * | NAMESPACE 1 | 942 * -------------------------------------- 943 */ 944 945 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 946 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 947 948 static void 949 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 950 { 951 /* Host A has two controllers */ 952 spdk_uuid_generate(&g_ctrlr1_A.hostid); 953 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 954 955 /* Host B has 1 controller */ 956 spdk_uuid_generate(&g_ctrlr_B.hostid); 957 958 /* Host C has 1 controller */ 959 spdk_uuid_generate(&g_ctrlr_C.hostid); 960 961 memset(&g_ns_info, 0, sizeof(g_ns_info)); 962 g_ns_info.rtype = rtype; 963 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 964 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 965 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 966 } 967 968 static void 969 test_reservation_write_exclusive(void) 970 { 971 struct spdk_nvmf_request req = {}; 972 union nvmf_h2c_msg cmd = {}; 973 union nvmf_c2h_msg rsp = {}; 974 int rc; 975 976 req.cmd = &cmd; 977 req.rsp = &rsp; 978 979 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 980 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 981 g_ns_info.holder_id = g_ctrlr1_A.hostid; 982 983 /* Test Case: Issue a Read command from Host A and Host B */ 984 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 985 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 986 SPDK_CU_ASSERT_FATAL(rc == 0); 987 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 988 SPDK_CU_ASSERT_FATAL(rc == 0); 989 990 /* Test Case: Issue a DSM Write command from Host A and Host B */ 991 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 992 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 993 SPDK_CU_ASSERT_FATAL(rc == 0); 994 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 995 SPDK_CU_ASSERT_FATAL(rc < 0); 996 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 997 998 /* Test Case: Issue a Write command from Host C */ 999 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1000 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1001 SPDK_CU_ASSERT_FATAL(rc < 0); 1002 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1003 1004 /* Test Case: Issue a Read command from Host B */ 1005 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1006 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1007 SPDK_CU_ASSERT_FATAL(rc == 0); 1008 1009 /* Unregister Host C */ 1010 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1011 1012 /* Test Case: Read and Write commands from non-registrant Host C */ 1013 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1014 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1015 SPDK_CU_ASSERT_FATAL(rc < 0); 1016 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1017 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1018 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1019 SPDK_CU_ASSERT_FATAL(rc == 0); 1020 } 1021 1022 static void 1023 test_reservation_exclusive_access(void) 1024 { 1025 struct spdk_nvmf_request req = {}; 1026 union nvmf_h2c_msg cmd = {}; 1027 union nvmf_c2h_msg rsp = {}; 1028 int rc; 1029 1030 req.cmd = &cmd; 1031 req.rsp = &rsp; 1032 1033 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1034 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1035 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1036 1037 /* Test Case: Issue a Read command from Host B */ 1038 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1039 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1040 SPDK_CU_ASSERT_FATAL(rc < 0); 1041 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1042 1043 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1044 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1045 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1046 SPDK_CU_ASSERT_FATAL(rc == 0); 1047 } 1048 1049 static void 1050 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1051 { 1052 struct spdk_nvmf_request req = {}; 1053 union nvmf_h2c_msg cmd = {}; 1054 union nvmf_c2h_msg rsp = {}; 1055 int rc; 1056 1057 req.cmd = &cmd; 1058 req.rsp = &rsp; 1059 1060 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1061 ut_reservation_init(rtype); 1062 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1063 1064 /* Test Case: Issue a Read command from Host A and Host C */ 1065 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1066 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1067 SPDK_CU_ASSERT_FATAL(rc == 0); 1068 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1069 SPDK_CU_ASSERT_FATAL(rc == 0); 1070 1071 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1072 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1073 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1074 SPDK_CU_ASSERT_FATAL(rc == 0); 1075 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1076 SPDK_CU_ASSERT_FATAL(rc == 0); 1077 1078 /* Unregister Host C */ 1079 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1080 1081 /* Test Case: Read and Write commands from non-registrant Host C */ 1082 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1083 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1084 SPDK_CU_ASSERT_FATAL(rc == 0); 1085 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1086 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1087 SPDK_CU_ASSERT_FATAL(rc < 0); 1088 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1089 } 1090 1091 static void 1092 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1093 { 1094 _test_reservation_write_exclusive_regs_only_and_all_regs( 1095 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1096 _test_reservation_write_exclusive_regs_only_and_all_regs( 1097 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1098 } 1099 1100 static void 1101 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1102 { 1103 struct spdk_nvmf_request req = {}; 1104 union nvmf_h2c_msg cmd = {}; 1105 union nvmf_c2h_msg rsp = {}; 1106 int rc; 1107 1108 req.cmd = &cmd; 1109 req.rsp = &rsp; 1110 1111 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1112 ut_reservation_init(rtype); 1113 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1114 1115 /* Test Case: Issue a Write command from Host B */ 1116 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1117 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1118 SPDK_CU_ASSERT_FATAL(rc == 0); 1119 1120 /* Unregister Host B */ 1121 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1122 1123 /* Test Case: Issue a Read command from Host B */ 1124 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1125 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1126 SPDK_CU_ASSERT_FATAL(rc < 0); 1127 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1128 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1129 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1130 SPDK_CU_ASSERT_FATAL(rc < 0); 1131 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1132 } 1133 1134 static void 1135 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1136 { 1137 _test_reservation_exclusive_access_regs_only_and_all_regs( 1138 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1139 _test_reservation_exclusive_access_regs_only_and_all_regs( 1140 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1141 } 1142 1143 static void 1144 test_reservation_notification_log_page(void) 1145 { 1146 struct spdk_nvmf_ctrlr ctrlr; 1147 struct spdk_nvmf_qpair qpair; 1148 struct spdk_nvmf_ns ns; 1149 struct spdk_nvmf_request req; 1150 union nvmf_h2c_msg cmd; 1151 union nvmf_c2h_msg rsp = {{0}}; 1152 union spdk_nvme_async_event_completion event = {0}; 1153 struct spdk_nvme_reservation_notification_log logs[3]; 1154 1155 memset(&ctrlr, 0, sizeof(ctrlr)); 1156 ctrlr.thread = spdk_get_thread(); 1157 TAILQ_INIT(&ctrlr.log_head); 1158 ns.nsid = 1; 1159 1160 /* Test Case: Mask all the reservation notifications */ 1161 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1162 SPDK_NVME_RESERVATION_RELEASED_MASK | 1163 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1164 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1165 SPDK_NVME_REGISTRATION_PREEMPTED); 1166 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1167 SPDK_NVME_RESERVATION_RELEASED); 1168 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1169 SPDK_NVME_RESERVATION_PREEMPTED); 1170 poll_threads(); 1171 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1172 1173 /* Test Case: Unmask all the reservation notifications, 1174 * 3 log pages are generated, and AER was triggered. 1175 */ 1176 ns.mask = 0; 1177 ctrlr.num_avail_log_pages = 0; 1178 req.cmd = &cmd; 1179 req.rsp = &rsp; 1180 ctrlr.aer_req = &req; 1181 req.qpair = &qpair; 1182 TAILQ_INIT(&qpair.outstanding); 1183 qpair.ctrlr = NULL; 1184 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1185 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1186 1187 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1188 SPDK_NVME_REGISTRATION_PREEMPTED); 1189 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1190 SPDK_NVME_RESERVATION_RELEASED); 1191 spdk_nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1192 SPDK_NVME_RESERVATION_PREEMPTED); 1193 poll_threads(); 1194 event.raw = rsp.nvme_cpl.cdw0; 1195 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1196 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1197 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1198 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1199 1200 /* Test Case: Get Log Page to clear the log pages */ 1201 spdk_nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs)); 1202 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1203 } 1204 1205 static void 1206 test_get_dif_ctx(void) 1207 { 1208 struct spdk_nvmf_subsystem subsystem = {}; 1209 struct spdk_nvmf_request req = {}; 1210 struct spdk_nvmf_qpair qpair = {}; 1211 struct spdk_nvmf_ctrlr ctrlr = {}; 1212 struct spdk_nvmf_ns ns = {}; 1213 struct spdk_nvmf_ns *_ns = NULL; 1214 struct spdk_bdev bdev = {}; 1215 union nvmf_h2c_msg cmd = {}; 1216 struct spdk_dif_ctx dif_ctx = {}; 1217 bool ret; 1218 1219 ctrlr.subsys = &subsystem; 1220 1221 qpair.ctrlr = &ctrlr; 1222 1223 req.qpair = &qpair; 1224 req.cmd = &cmd; 1225 1226 ns.bdev = &bdev; 1227 1228 ctrlr.dif_insert_or_strip = false; 1229 1230 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1231 CU_ASSERT(ret == false); 1232 1233 ctrlr.dif_insert_or_strip = true; 1234 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1235 1236 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1237 CU_ASSERT(ret == false); 1238 1239 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1240 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1241 1242 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1243 CU_ASSERT(ret == false); 1244 1245 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1246 1247 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1248 CU_ASSERT(ret == false); 1249 1250 qpair.qid = 1; 1251 1252 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1253 CU_ASSERT(ret == false); 1254 1255 cmd.nvme_cmd.nsid = 1; 1256 1257 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1258 CU_ASSERT(ret == false); 1259 1260 subsystem.max_nsid = 1; 1261 subsystem.ns = &_ns; 1262 subsystem.ns[0] = &ns; 1263 1264 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1265 CU_ASSERT(ret == false); 1266 1267 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1268 1269 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1270 CU_ASSERT(ret == true); 1271 } 1272 1273 int main(int argc, char **argv) 1274 { 1275 CU_pSuite suite = NULL; 1276 unsigned int num_failures; 1277 1278 if (CU_initialize_registry() != CUE_SUCCESS) { 1279 return CU_get_error(); 1280 } 1281 1282 suite = CU_add_suite("nvmf", NULL, NULL); 1283 if (suite == NULL) { 1284 CU_cleanup_registry(); 1285 return CU_get_error(); 1286 } 1287 1288 if ( 1289 CU_add_test(suite, "get_log_page", test_get_log_page) == NULL || 1290 CU_add_test(suite, "process_fabrics_cmd", test_process_fabrics_cmd) == NULL || 1291 CU_add_test(suite, "connect", test_connect) == NULL || 1292 CU_add_test(suite, "get_ns_id_desc_list", test_get_ns_id_desc_list) == NULL || 1293 CU_add_test(suite, "identify_ns", test_identify_ns) == NULL || 1294 CU_add_test(suite, "reservation_write_exclusive", test_reservation_write_exclusive) == NULL || 1295 CU_add_test(suite, "reservation_exclusive_access", test_reservation_exclusive_access) == NULL || 1296 CU_add_test(suite, "reservation_write_exclusive_regs_only_and_all_regs", 1297 test_reservation_write_exclusive_regs_only_and_all_regs) == NULL || 1298 CU_add_test(suite, "reservation_exclusive_access_regs_only_and_all_regs", 1299 test_reservation_exclusive_access_regs_only_and_all_regs) == NULL || 1300 CU_add_test(suite, "reservation_notification_log_page", 1301 test_reservation_notification_log_page) == NULL || 1302 CU_add_test(suite, "get_dif_ctx", test_get_dif_ctx) == NULL || 1303 CU_add_test(suite, "set_get_features", 1304 test_set_get_features) == NULL 1305 ) { 1306 CU_cleanup_registry(); 1307 return CU_get_error(); 1308 } 1309 1310 allocate_threads(1); 1311 set_thread(0); 1312 1313 CU_basic_set_mode(CU_BRM_VERBOSE); 1314 CU_basic_run_tests(); 1315 num_failures = CU_get_number_of_failures(); 1316 CU_cleanup_registry(); 1317 1318 free_threads(); 1319 1320 return num_failures; 1321 } 1322