1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 39 #include "common/lib/ut_multithread.c" 40 #include "nvmf/ctrlr.c" 41 42 SPDK_LOG_REGISTER_COMPONENT(nvmf) 43 44 struct spdk_bdev { 45 int ut_mock; 46 uint64_t blockcnt; 47 }; 48 49 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 50 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 51 52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 53 struct spdk_nvmf_subsystem *, 54 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 55 NULL); 56 57 DEFINE_STUB(spdk_nvmf_poll_group_create, 58 struct spdk_nvmf_poll_group *, 59 (struct spdk_nvmf_tgt *tgt), 60 NULL); 61 62 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 63 const char *, 64 (const struct spdk_nvmf_subsystem *subsystem), 65 subsystem_default_sn); 66 67 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 68 const char *, 69 (const struct spdk_nvmf_subsystem *subsystem), 70 subsystem_default_mn); 71 72 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 73 bool, 74 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 75 true); 76 77 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 78 int, 79 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 80 0); 81 82 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 83 struct spdk_nvmf_ctrlr *, 84 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 85 NULL); 86 87 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 88 bool, 89 (struct spdk_nvmf_ctrlr *ctrlr), 90 false); 91 92 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 93 bool, 94 (struct spdk_nvmf_ctrlr *ctrlr), 95 false); 96 97 DEFINE_STUB_V(nvmf_get_discovery_log_page, 98 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 99 uint32_t iovcnt, uint64_t offset, uint32_t length)); 100 101 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 102 int, 103 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 104 0); 105 106 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 107 bool, 108 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 109 true); 110 111 DEFINE_STUB(nvmf_subsystem_find_listener, 112 struct spdk_nvmf_subsystem_listener *, 113 (struct spdk_nvmf_subsystem *subsystem, 114 const struct spdk_nvme_transport_id *trid), 115 (void *)0x1); 116 117 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 118 int, 119 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 120 struct spdk_nvmf_request *req), 121 0); 122 123 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 124 int, 125 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 126 struct spdk_nvmf_request *req), 127 0); 128 129 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 130 int, 131 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 132 struct spdk_nvmf_request *req), 133 0); 134 135 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 136 int, 137 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 138 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 139 0); 140 141 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 142 int, 143 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 144 struct spdk_nvmf_request *req), 145 0); 146 147 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 148 int, 149 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 150 struct spdk_nvmf_request *req), 151 0); 152 153 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 154 int, 155 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 156 struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 160 int, 161 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 162 struct spdk_nvmf_request *req), 163 0); 164 165 DEFINE_STUB(nvmf_transport_req_complete, 166 int, 167 (struct spdk_nvmf_request *req), 168 0); 169 170 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 171 172 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 173 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 174 struct spdk_dif_ctx *dif_ctx), 175 true); 176 177 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 178 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 179 180 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 181 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 182 183 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 184 struct spdk_nvmf_ctrlr *ctrlr)); 185 186 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 187 int, 188 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 189 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 190 0); 191 192 DEFINE_STUB(nvmf_transport_req_free, 193 int, 194 (struct spdk_nvmf_request *req), 195 0); 196 197 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 198 int, 199 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 200 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 201 0); 202 203 int 204 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 205 { 206 return 0; 207 } 208 209 void 210 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 211 bool dif_insert_or_strip) 212 { 213 uint64_t num_blocks; 214 215 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 216 num_blocks = ns->bdev->blockcnt; 217 nsdata->nsze = num_blocks; 218 nsdata->ncap = num_blocks; 219 nsdata->nuse = num_blocks; 220 nsdata->nlbaf = 0; 221 nsdata->flbas.format = 0; 222 nsdata->lbaf[0].lbads = spdk_u32log2(512); 223 } 224 225 struct spdk_nvmf_ns * 226 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 227 { 228 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 229 return subsystem->ns[0]; 230 } 231 232 struct spdk_nvmf_ns * 233 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 234 struct spdk_nvmf_ns *prev_ns) 235 { 236 uint32_t nsid; 237 238 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 239 nsid = prev_ns->nsid; 240 241 if (nsid >= subsystem->max_nsid) { 242 return NULL; 243 } 244 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 245 if (subsystem->ns[nsid - 1]) { 246 return subsystem->ns[nsid - 1]; 247 } 248 } 249 return NULL; 250 } 251 252 static void 253 test_get_log_page(void) 254 { 255 struct spdk_nvmf_subsystem subsystem = {}; 256 struct spdk_nvmf_request req = {}; 257 struct spdk_nvmf_qpair qpair = {}; 258 struct spdk_nvmf_ctrlr ctrlr = {}; 259 union nvmf_h2c_msg cmd = {}; 260 union nvmf_c2h_msg rsp = {}; 261 char data[4096]; 262 263 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 264 265 ctrlr.subsys = &subsystem; 266 267 qpair.ctrlr = &ctrlr; 268 269 req.qpair = &qpair; 270 req.cmd = &cmd; 271 req.rsp = &rsp; 272 req.data = &data; 273 req.length = sizeof(data); 274 275 /* Get Log Page - all valid */ 276 memset(&cmd, 0, sizeof(cmd)); 277 memset(&rsp, 0, sizeof(rsp)); 278 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 279 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 280 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 281 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 282 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 283 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 284 285 /* Get Log Page with invalid log ID */ 286 memset(&cmd, 0, sizeof(cmd)); 287 memset(&rsp, 0, sizeof(rsp)); 288 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 289 cmd.nvme_cmd.cdw10 = 0; 290 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 291 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 292 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 293 294 /* Get Log Page with invalid offset (not dword aligned) */ 295 memset(&cmd, 0, sizeof(cmd)); 296 memset(&rsp, 0, sizeof(rsp)); 297 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 298 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 299 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 300 cmd.nvme_cmd.cdw12 = 2; 301 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 302 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 303 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 304 305 /* Get Log Page without data buffer */ 306 memset(&cmd, 0, sizeof(cmd)); 307 memset(&rsp, 0, sizeof(rsp)); 308 req.data = NULL; 309 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 310 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 311 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 312 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 313 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 314 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 315 req.data = data; 316 } 317 318 static void 319 test_process_fabrics_cmd(void) 320 { 321 struct spdk_nvmf_request req = {}; 322 int ret; 323 struct spdk_nvmf_qpair req_qpair = {}; 324 union nvmf_h2c_msg req_cmd = {}; 325 union nvmf_c2h_msg req_rsp = {}; 326 327 req.qpair = &req_qpair; 328 req.cmd = &req_cmd; 329 req.rsp = &req_rsp; 330 req.qpair->ctrlr = NULL; 331 332 /* No ctrlr and invalid command check */ 333 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 334 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 335 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 336 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 337 } 338 339 static bool 340 nvme_status_success(const struct spdk_nvme_status *status) 341 { 342 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 343 } 344 345 static void 346 test_connect(void) 347 { 348 struct spdk_nvmf_fabric_connect_data connect_data; 349 struct spdk_nvmf_poll_group group; 350 struct spdk_nvmf_subsystem_poll_group *sgroups; 351 struct spdk_nvmf_transport transport; 352 struct spdk_nvmf_transport_ops tops = {}; 353 struct spdk_nvmf_subsystem subsystem; 354 struct spdk_nvmf_request req; 355 struct spdk_nvmf_qpair admin_qpair; 356 struct spdk_nvmf_qpair qpair; 357 struct spdk_nvmf_qpair qpair2; 358 struct spdk_nvmf_ctrlr ctrlr; 359 struct spdk_nvmf_tgt tgt; 360 union nvmf_h2c_msg cmd; 361 union nvmf_c2h_msg rsp; 362 const uint8_t hostid[16] = { 363 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 364 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 365 }; 366 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 367 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 368 int rc; 369 370 memset(&group, 0, sizeof(group)); 371 group.thread = spdk_get_thread(); 372 373 memset(&ctrlr, 0, sizeof(ctrlr)); 374 ctrlr.subsys = &subsystem; 375 ctrlr.qpair_mask = spdk_bit_array_create(3); 376 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 377 ctrlr.vcprop.cc.bits.en = 1; 378 ctrlr.vcprop.cc.bits.iosqes = 6; 379 ctrlr.vcprop.cc.bits.iocqes = 4; 380 381 memset(&admin_qpair, 0, sizeof(admin_qpair)); 382 admin_qpair.group = &group; 383 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 384 385 memset(&tgt, 0, sizeof(tgt)); 386 memset(&transport, 0, sizeof(transport)); 387 transport.ops = &tops; 388 transport.opts.max_aq_depth = 32; 389 transport.opts.max_queue_depth = 64; 390 transport.opts.max_qpairs_per_ctrlr = 3; 391 transport.tgt = &tgt; 392 393 memset(&qpair, 0, sizeof(qpair)); 394 qpair.transport = &transport; 395 qpair.group = &group; 396 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 397 TAILQ_INIT(&qpair.outstanding); 398 399 memset(&connect_data, 0, sizeof(connect_data)); 400 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 401 connect_data.cntlid = 0xFFFF; 402 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 403 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 404 405 memset(&subsystem, 0, sizeof(subsystem)); 406 subsystem.thread = spdk_get_thread(); 407 subsystem.id = 1; 408 TAILQ_INIT(&subsystem.ctrlrs); 409 subsystem.tgt = &tgt; 410 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 411 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 412 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 413 414 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 415 group.sgroups = sgroups; 416 417 memset(&cmd, 0, sizeof(cmd)); 418 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 419 cmd.connect_cmd.cid = 1; 420 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 421 cmd.connect_cmd.recfmt = 0; 422 cmd.connect_cmd.qid = 0; 423 cmd.connect_cmd.sqsize = 31; 424 cmd.connect_cmd.cattr = 0; 425 cmd.connect_cmd.kato = 120000; 426 427 memset(&req, 0, sizeof(req)); 428 req.qpair = &qpair; 429 req.length = sizeof(connect_data); 430 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 431 req.data = &connect_data; 432 req.cmd = &cmd; 433 req.rsp = &rsp; 434 435 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 436 MOCK_SET(spdk_nvmf_poll_group_create, &group); 437 438 /* Valid admin connect command */ 439 memset(&rsp, 0, sizeof(rsp)); 440 sgroups[subsystem.id].mgmt_io_outstanding++; 441 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 442 rc = nvmf_ctrlr_cmd_connect(&req); 443 poll_threads(); 444 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 445 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 446 CU_ASSERT(qpair.ctrlr != NULL); 447 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 448 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 449 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 450 free(qpair.ctrlr); 451 qpair.ctrlr = NULL; 452 453 /* Valid admin connect command with kato = 0 */ 454 cmd.connect_cmd.kato = 0; 455 memset(&rsp, 0, sizeof(rsp)); 456 sgroups[subsystem.id].mgmt_io_outstanding++; 457 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 458 rc = nvmf_ctrlr_cmd_connect(&req); 459 poll_threads(); 460 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 461 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 462 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 463 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 464 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 465 free(qpair.ctrlr); 466 qpair.ctrlr = NULL; 467 cmd.connect_cmd.kato = 120000; 468 469 /* Invalid data length */ 470 memset(&rsp, 0, sizeof(rsp)); 471 req.length = sizeof(connect_data) - 1; 472 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 473 rc = nvmf_ctrlr_cmd_connect(&req); 474 poll_threads(); 475 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 476 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 477 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 478 CU_ASSERT(qpair.ctrlr == NULL); 479 req.length = sizeof(connect_data); 480 481 /* Invalid recfmt */ 482 memset(&rsp, 0, sizeof(rsp)); 483 cmd.connect_cmd.recfmt = 1234; 484 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 485 rc = nvmf_ctrlr_cmd_connect(&req); 486 poll_threads(); 487 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 488 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 489 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 490 CU_ASSERT(qpair.ctrlr == NULL); 491 cmd.connect_cmd.recfmt = 0; 492 493 /* Subsystem not found */ 494 memset(&rsp, 0, sizeof(rsp)); 495 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 496 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 497 rc = nvmf_ctrlr_cmd_connect(&req); 498 poll_threads(); 499 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 500 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 501 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 502 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 503 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 504 CU_ASSERT(qpair.ctrlr == NULL); 505 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 506 507 /* Unterminated hostnqn */ 508 memset(&rsp, 0, sizeof(rsp)); 509 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 510 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 511 rc = nvmf_ctrlr_cmd_connect(&req); 512 poll_threads(); 513 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 514 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 515 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 516 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 517 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 518 CU_ASSERT(qpair.ctrlr == NULL); 519 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 520 521 /* Host not allowed */ 522 memset(&rsp, 0, sizeof(rsp)); 523 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 524 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 525 rc = nvmf_ctrlr_cmd_connect(&req); 526 poll_threads(); 527 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 528 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 529 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 530 CU_ASSERT(qpair.ctrlr == NULL); 531 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 532 533 /* Invalid sqsize == 0 */ 534 memset(&rsp, 0, sizeof(rsp)); 535 cmd.connect_cmd.sqsize = 0; 536 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 537 rc = nvmf_ctrlr_cmd_connect(&req); 538 poll_threads(); 539 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 540 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 541 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 542 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 543 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 544 CU_ASSERT(qpair.ctrlr == NULL); 545 cmd.connect_cmd.sqsize = 31; 546 547 /* Invalid admin sqsize > max_aq_depth */ 548 memset(&rsp, 0, sizeof(rsp)); 549 cmd.connect_cmd.sqsize = 32; 550 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 551 rc = nvmf_ctrlr_cmd_connect(&req); 552 poll_threads(); 553 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 554 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 555 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 556 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 557 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 558 CU_ASSERT(qpair.ctrlr == NULL); 559 cmd.connect_cmd.sqsize = 31; 560 561 /* Invalid I/O sqsize > max_queue_depth */ 562 memset(&rsp, 0, sizeof(rsp)); 563 cmd.connect_cmd.qid = 1; 564 cmd.connect_cmd.sqsize = 64; 565 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 566 rc = nvmf_ctrlr_cmd_connect(&req); 567 poll_threads(); 568 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 569 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 570 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 571 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 572 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 573 CU_ASSERT(qpair.ctrlr == NULL); 574 cmd.connect_cmd.qid = 0; 575 cmd.connect_cmd.sqsize = 31; 576 577 /* Invalid cntlid for admin queue */ 578 memset(&rsp, 0, sizeof(rsp)); 579 connect_data.cntlid = 0x1234; 580 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 581 rc = nvmf_ctrlr_cmd_connect(&req); 582 poll_threads(); 583 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 584 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 585 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 586 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 587 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 588 CU_ASSERT(qpair.ctrlr == NULL); 589 connect_data.cntlid = 0xFFFF; 590 591 ctrlr.admin_qpair = &admin_qpair; 592 ctrlr.subsys = &subsystem; 593 594 /* Valid I/O queue connect command */ 595 memset(&rsp, 0, sizeof(rsp)); 596 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 597 cmd.connect_cmd.qid = 1; 598 cmd.connect_cmd.sqsize = 63; 599 sgroups[subsystem.id].mgmt_io_outstanding++; 600 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 601 rc = nvmf_ctrlr_cmd_connect(&req); 602 poll_threads(); 603 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 604 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 605 CU_ASSERT(qpair.ctrlr == &ctrlr); 606 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 607 qpair.ctrlr = NULL; 608 cmd.connect_cmd.sqsize = 31; 609 610 /* Non-existent controller */ 611 memset(&rsp, 0, sizeof(rsp)); 612 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 613 sgroups[subsystem.id].mgmt_io_outstanding++; 614 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 615 rc = nvmf_ctrlr_cmd_connect(&req); 616 poll_threads(); 617 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 618 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 619 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 620 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 621 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 622 CU_ASSERT(qpair.ctrlr == NULL); 623 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 624 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 625 626 /* I/O connect to discovery controller */ 627 memset(&rsp, 0, sizeof(rsp)); 628 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 629 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 630 sgroups[subsystem.id].mgmt_io_outstanding++; 631 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 632 rc = nvmf_ctrlr_cmd_connect(&req); 633 poll_threads(); 634 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 635 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 636 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 637 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 638 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 639 CU_ASSERT(qpair.ctrlr == NULL); 640 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 641 642 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 643 cmd.connect_cmd.qid = 0; 644 cmd.connect_cmd.kato = 120000; 645 memset(&rsp, 0, sizeof(rsp)); 646 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 647 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 648 sgroups[subsystem.id].mgmt_io_outstanding++; 649 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 650 rc = nvmf_ctrlr_cmd_connect(&req); 651 poll_threads(); 652 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 653 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 654 CU_ASSERT(qpair.ctrlr != NULL); 655 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 656 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 657 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 658 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 659 free(qpair.ctrlr); 660 qpair.ctrlr = NULL; 661 662 /* I/O connect to discovery controller with keep-alive-timeout == 0. 663 * Then, a fixed timeout value is set to keep-alive-timeout. 664 */ 665 cmd.connect_cmd.kato = 0; 666 memset(&rsp, 0, sizeof(rsp)); 667 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 668 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 669 sgroups[subsystem.id].mgmt_io_outstanding++; 670 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 671 rc = nvmf_ctrlr_cmd_connect(&req); 672 poll_threads(); 673 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 674 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 675 CU_ASSERT(qpair.ctrlr != NULL); 676 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 677 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 678 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 679 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 680 free(qpair.ctrlr); 681 qpair.ctrlr = NULL; 682 cmd.connect_cmd.qid = 1; 683 cmd.connect_cmd.kato = 120000; 684 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 685 686 /* I/O connect to disabled controller */ 687 memset(&rsp, 0, sizeof(rsp)); 688 ctrlr.vcprop.cc.bits.en = 0; 689 sgroups[subsystem.id].mgmt_io_outstanding++; 690 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 691 rc = nvmf_ctrlr_cmd_connect(&req); 692 poll_threads(); 693 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 694 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 695 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 696 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 697 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 698 CU_ASSERT(qpair.ctrlr == NULL); 699 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 700 ctrlr.vcprop.cc.bits.en = 1; 701 702 /* I/O connect with invalid IOSQES */ 703 memset(&rsp, 0, sizeof(rsp)); 704 ctrlr.vcprop.cc.bits.iosqes = 3; 705 sgroups[subsystem.id].mgmt_io_outstanding++; 706 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 707 rc = nvmf_ctrlr_cmd_connect(&req); 708 poll_threads(); 709 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 710 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 711 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 712 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 713 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 714 CU_ASSERT(qpair.ctrlr == NULL); 715 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 716 ctrlr.vcprop.cc.bits.iosqes = 6; 717 718 /* I/O connect with invalid IOCQES */ 719 memset(&rsp, 0, sizeof(rsp)); 720 ctrlr.vcprop.cc.bits.iocqes = 3; 721 sgroups[subsystem.id].mgmt_io_outstanding++; 722 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 723 rc = nvmf_ctrlr_cmd_connect(&req); 724 poll_threads(); 725 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 726 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 727 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 728 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 729 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 730 CU_ASSERT(qpair.ctrlr == NULL); 731 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 732 ctrlr.vcprop.cc.bits.iocqes = 4; 733 734 /* I/O connect with too many existing qpairs */ 735 memset(&rsp, 0, sizeof(rsp)); 736 spdk_bit_array_set(ctrlr.qpair_mask, 0); 737 spdk_bit_array_set(ctrlr.qpair_mask, 1); 738 spdk_bit_array_set(ctrlr.qpair_mask, 2); 739 sgroups[subsystem.id].mgmt_io_outstanding++; 740 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 741 rc = nvmf_ctrlr_cmd_connect(&req); 742 poll_threads(); 743 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 744 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 745 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 746 CU_ASSERT(qpair.ctrlr == NULL); 747 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 748 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 749 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 750 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 751 752 /* I/O connect with duplicate queue ID */ 753 memset(&rsp, 0, sizeof(rsp)); 754 memset(&qpair2, 0, sizeof(qpair2)); 755 qpair2.group = &group; 756 qpair2.qid = 1; 757 spdk_bit_array_set(ctrlr.qpair_mask, 1); 758 cmd.connect_cmd.qid = 1; 759 sgroups[subsystem.id].mgmt_io_outstanding++; 760 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 761 rc = nvmf_ctrlr_cmd_connect(&req); 762 poll_threads(); 763 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 764 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 765 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 766 CU_ASSERT(qpair.ctrlr == NULL); 767 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 768 769 /* I/O connect when admin qpair is being destroyed */ 770 admin_qpair.group = NULL; 771 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 772 memset(&rsp, 0, sizeof(rsp)); 773 sgroups[subsystem.id].mgmt_io_outstanding++; 774 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 775 rc = nvmf_ctrlr_cmd_connect(&req); 776 poll_threads(); 777 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 778 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 779 CU_ASSERT(qpair.ctrlr == NULL); 780 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 781 admin_qpair.group = &group; 782 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 783 784 /* Clean up globals */ 785 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 786 MOCK_CLEAR(spdk_nvmf_poll_group_create); 787 788 spdk_bit_array_free(&ctrlr.qpair_mask); 789 free(sgroups); 790 } 791 792 static void 793 test_get_ns_id_desc_list(void) 794 { 795 struct spdk_nvmf_subsystem subsystem; 796 struct spdk_nvmf_qpair qpair; 797 struct spdk_nvmf_ctrlr ctrlr; 798 struct spdk_nvmf_request req; 799 struct spdk_nvmf_ns *ns_ptrs[1]; 800 struct spdk_nvmf_ns ns; 801 union nvmf_h2c_msg cmd; 802 union nvmf_c2h_msg rsp; 803 struct spdk_bdev bdev; 804 uint8_t buf[4096]; 805 806 memset(&subsystem, 0, sizeof(subsystem)); 807 ns_ptrs[0] = &ns; 808 subsystem.ns = ns_ptrs; 809 subsystem.max_nsid = 1; 810 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 811 812 memset(&ns, 0, sizeof(ns)); 813 ns.opts.nsid = 1; 814 ns.bdev = &bdev; 815 816 memset(&qpair, 0, sizeof(qpair)); 817 qpair.ctrlr = &ctrlr; 818 819 memset(&ctrlr, 0, sizeof(ctrlr)); 820 ctrlr.subsys = &subsystem; 821 ctrlr.vcprop.cc.bits.en = 1; 822 823 memset(&req, 0, sizeof(req)); 824 req.qpair = &qpair; 825 req.cmd = &cmd; 826 req.rsp = &rsp; 827 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 828 req.data = buf; 829 req.length = sizeof(buf); 830 831 memset(&cmd, 0, sizeof(cmd)); 832 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 833 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 834 835 /* Invalid NSID */ 836 cmd.nvme_cmd.nsid = 0; 837 memset(&rsp, 0, sizeof(rsp)); 838 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 839 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 840 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 841 842 /* Valid NSID, but ns has no IDs defined */ 843 cmd.nvme_cmd.nsid = 1; 844 memset(&rsp, 0, sizeof(rsp)); 845 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 846 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 847 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 848 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 849 850 /* Valid NSID, only EUI64 defined */ 851 ns.opts.eui64[0] = 0x11; 852 ns.opts.eui64[7] = 0xFF; 853 memset(&rsp, 0, sizeof(rsp)); 854 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 855 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 856 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 857 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 858 CU_ASSERT(buf[1] == 8); 859 CU_ASSERT(buf[4] == 0x11); 860 CU_ASSERT(buf[11] == 0xFF); 861 CU_ASSERT(buf[13] == 0); 862 863 /* Valid NSID, only NGUID defined */ 864 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 865 ns.opts.nguid[0] = 0x22; 866 ns.opts.nguid[15] = 0xEE; 867 memset(&rsp, 0, sizeof(rsp)); 868 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 869 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 870 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 871 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 872 CU_ASSERT(buf[1] == 16); 873 CU_ASSERT(buf[4] == 0x22); 874 CU_ASSERT(buf[19] == 0xEE); 875 CU_ASSERT(buf[21] == 0); 876 877 /* Valid NSID, both EUI64 and NGUID defined */ 878 ns.opts.eui64[0] = 0x11; 879 ns.opts.eui64[7] = 0xFF; 880 ns.opts.nguid[0] = 0x22; 881 ns.opts.nguid[15] = 0xEE; 882 memset(&rsp, 0, sizeof(rsp)); 883 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 884 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 885 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 886 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 887 CU_ASSERT(buf[1] == 8); 888 CU_ASSERT(buf[4] == 0x11); 889 CU_ASSERT(buf[11] == 0xFF); 890 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 891 CU_ASSERT(buf[13] == 16); 892 CU_ASSERT(buf[16] == 0x22); 893 CU_ASSERT(buf[31] == 0xEE); 894 CU_ASSERT(buf[33] == 0); 895 896 /* Valid NSID, EUI64, NGUID, and UUID defined */ 897 ns.opts.eui64[0] = 0x11; 898 ns.opts.eui64[7] = 0xFF; 899 ns.opts.nguid[0] = 0x22; 900 ns.opts.nguid[15] = 0xEE; 901 ns.opts.uuid.u.raw[0] = 0x33; 902 ns.opts.uuid.u.raw[15] = 0xDD; 903 memset(&rsp, 0, sizeof(rsp)); 904 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 905 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 906 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 907 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 908 CU_ASSERT(buf[1] == 8); 909 CU_ASSERT(buf[4] == 0x11); 910 CU_ASSERT(buf[11] == 0xFF); 911 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 912 CU_ASSERT(buf[13] == 16); 913 CU_ASSERT(buf[16] == 0x22); 914 CU_ASSERT(buf[31] == 0xEE); 915 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 916 CU_ASSERT(buf[33] == 16); 917 CU_ASSERT(buf[36] == 0x33); 918 CU_ASSERT(buf[51] == 0xDD); 919 CU_ASSERT(buf[53] == 0); 920 } 921 922 static void 923 test_identify_ns(void) 924 { 925 struct spdk_nvmf_subsystem subsystem = {}; 926 struct spdk_nvmf_transport transport = {}; 927 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 928 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 929 struct spdk_nvme_cmd cmd = {}; 930 struct spdk_nvme_cpl rsp = {}; 931 struct spdk_nvme_ns_data nsdata = {}; 932 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 933 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 934 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 935 936 subsystem.ns = ns_arr; 937 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 938 939 /* Invalid NSID 0 */ 940 cmd.nsid = 0; 941 memset(&nsdata, 0, sizeof(nsdata)); 942 memset(&rsp, 0, sizeof(rsp)); 943 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 944 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 945 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 946 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 947 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 948 949 /* Valid NSID 1 */ 950 cmd.nsid = 1; 951 memset(&nsdata, 0, sizeof(nsdata)); 952 memset(&rsp, 0, sizeof(rsp)); 953 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 954 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 955 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 956 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 957 CU_ASSERT(nsdata.nsze == 1234); 958 959 /* Valid but inactive NSID 2 */ 960 cmd.nsid = 2; 961 memset(&nsdata, 0, sizeof(nsdata)); 962 memset(&rsp, 0, sizeof(rsp)); 963 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 964 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 965 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 966 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 967 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 968 969 /* Valid NSID 3 */ 970 cmd.nsid = 3; 971 memset(&nsdata, 0, sizeof(nsdata)); 972 memset(&rsp, 0, sizeof(rsp)); 973 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 974 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 975 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 976 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 977 CU_ASSERT(nsdata.nsze == 5678); 978 979 /* Invalid NSID 4 */ 980 cmd.nsid = 4; 981 memset(&nsdata, 0, sizeof(nsdata)); 982 memset(&rsp, 0, sizeof(rsp)); 983 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 984 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 985 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 986 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 987 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 988 989 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 990 cmd.nsid = 0xFFFFFFFF; 991 memset(&nsdata, 0, sizeof(nsdata)); 992 memset(&rsp, 0, sizeof(rsp)); 993 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 994 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 995 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 996 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 997 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 998 } 999 1000 static void 1001 test_set_get_features(void) 1002 { 1003 struct spdk_nvmf_subsystem subsystem = {}; 1004 struct spdk_nvmf_qpair admin_qpair = {}; 1005 struct spdk_nvmf_subsystem_listener listener = {}; 1006 struct spdk_nvmf_ctrlr ctrlr = { 1007 .subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener 1008 }; 1009 union nvmf_h2c_msg cmd = {}; 1010 union nvmf_c2h_msg rsp = {}; 1011 struct spdk_nvmf_ns ns[3]; 1012 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};; 1013 struct spdk_nvmf_request req; 1014 int rc; 1015 1016 subsystem.ns = ns_arr; 1017 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1018 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1019 admin_qpair.ctrlr = &ctrlr; 1020 req.qpair = &admin_qpair; 1021 cmd.nvme_cmd.nsid = 1; 1022 req.cmd = &cmd; 1023 req.rsp = &rsp; 1024 1025 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1026 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1027 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1028 ns[0].ptpl_file = "testcfg"; 1029 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1030 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1031 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1032 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1033 CU_ASSERT(ns[0].ptpl_activated == true); 1034 1035 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1036 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1037 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1038 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1039 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1040 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1041 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1042 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1043 1044 1045 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1046 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1047 cmd.nvme_cmd.cdw11 = 0x42; 1048 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1049 1050 rc = nvmf_ctrlr_get_features(&req); 1051 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1052 1053 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1054 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1055 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1056 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1057 1058 rc = nvmf_ctrlr_get_features(&req); 1059 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1060 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1061 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1062 1063 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1064 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1065 cmd.nvme_cmd.cdw11 = 0x42; 1066 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1067 1068 rc = nvmf_ctrlr_set_features(&req); 1069 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1070 1071 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1072 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1073 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1074 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1075 1076 rc = nvmf_ctrlr_set_features(&req); 1077 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1078 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1079 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1080 1081 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1082 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1083 cmd.nvme_cmd.cdw11 = 0x42; 1084 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1085 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1086 1087 rc = nvmf_ctrlr_set_features(&req); 1088 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1089 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1090 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1091 1092 1093 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1094 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1095 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1096 1097 rc = nvmf_ctrlr_get_features(&req); 1098 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1099 1100 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1101 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1102 cmd.nvme_cmd.cdw11 = 0x42; 1103 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1104 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1105 1106 rc = nvmf_ctrlr_set_features(&req); 1107 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1108 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1109 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1110 1111 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1112 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1113 cmd.nvme_cmd.cdw11 = 0x42; 1114 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1115 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1116 1117 rc = nvmf_ctrlr_set_features(&req); 1118 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1119 } 1120 1121 /* 1122 * Reservation Unit Test Configuration 1123 * -------- -------- -------- 1124 * | Host A | | Host B | | Host C | 1125 * -------- -------- -------- 1126 * / \ | | 1127 * -------- -------- ------- ------- 1128 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1129 * -------- -------- ------- ------- 1130 * \ \ / / 1131 * \ \ / / 1132 * \ \ / / 1133 * -------------------------------------- 1134 * | NAMESPACE 1 | 1135 * -------------------------------------- 1136 */ 1137 1138 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1139 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1140 1141 static void 1142 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1143 { 1144 /* Host A has two controllers */ 1145 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1146 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1147 1148 /* Host B has 1 controller */ 1149 spdk_uuid_generate(&g_ctrlr_B.hostid); 1150 1151 /* Host C has 1 controller */ 1152 spdk_uuid_generate(&g_ctrlr_C.hostid); 1153 1154 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1155 g_ns_info.rtype = rtype; 1156 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1157 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1158 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1159 } 1160 1161 static void 1162 test_reservation_write_exclusive(void) 1163 { 1164 struct spdk_nvmf_request req = {}; 1165 union nvmf_h2c_msg cmd = {}; 1166 union nvmf_c2h_msg rsp = {}; 1167 int rc; 1168 1169 req.cmd = &cmd; 1170 req.rsp = &rsp; 1171 1172 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1173 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1174 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1175 1176 /* Test Case: Issue a Read command from Host A and Host B */ 1177 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1178 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1179 SPDK_CU_ASSERT_FATAL(rc == 0); 1180 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1181 SPDK_CU_ASSERT_FATAL(rc == 0); 1182 1183 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1184 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1185 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1186 SPDK_CU_ASSERT_FATAL(rc == 0); 1187 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1188 SPDK_CU_ASSERT_FATAL(rc < 0); 1189 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1190 1191 /* Test Case: Issue a Write command from Host C */ 1192 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1193 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1194 SPDK_CU_ASSERT_FATAL(rc < 0); 1195 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1196 1197 /* Test Case: Issue a Read command from Host B */ 1198 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1199 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1200 SPDK_CU_ASSERT_FATAL(rc == 0); 1201 1202 /* Unregister Host C */ 1203 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1204 1205 /* Test Case: Read and Write commands from non-registrant Host C */ 1206 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1207 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1208 SPDK_CU_ASSERT_FATAL(rc < 0); 1209 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1210 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1211 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1212 SPDK_CU_ASSERT_FATAL(rc == 0); 1213 } 1214 1215 static void 1216 test_reservation_exclusive_access(void) 1217 { 1218 struct spdk_nvmf_request req = {}; 1219 union nvmf_h2c_msg cmd = {}; 1220 union nvmf_c2h_msg rsp = {}; 1221 int rc; 1222 1223 req.cmd = &cmd; 1224 req.rsp = &rsp; 1225 1226 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1227 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1228 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1229 1230 /* Test Case: Issue a Read command from Host B */ 1231 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1232 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1233 SPDK_CU_ASSERT_FATAL(rc < 0); 1234 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1235 1236 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1237 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1238 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1239 SPDK_CU_ASSERT_FATAL(rc == 0); 1240 } 1241 1242 static void 1243 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1244 { 1245 struct spdk_nvmf_request req = {}; 1246 union nvmf_h2c_msg cmd = {}; 1247 union nvmf_c2h_msg rsp = {}; 1248 int rc; 1249 1250 req.cmd = &cmd; 1251 req.rsp = &rsp; 1252 1253 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1254 ut_reservation_init(rtype); 1255 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1256 1257 /* Test Case: Issue a Read command from Host A and Host C */ 1258 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1259 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1260 SPDK_CU_ASSERT_FATAL(rc == 0); 1261 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1262 SPDK_CU_ASSERT_FATAL(rc == 0); 1263 1264 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1265 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1266 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1267 SPDK_CU_ASSERT_FATAL(rc == 0); 1268 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1269 SPDK_CU_ASSERT_FATAL(rc == 0); 1270 1271 /* Unregister Host C */ 1272 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1273 1274 /* Test Case: Read and Write commands from non-registrant Host C */ 1275 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1276 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1277 SPDK_CU_ASSERT_FATAL(rc == 0); 1278 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1279 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1280 SPDK_CU_ASSERT_FATAL(rc < 0); 1281 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1282 } 1283 1284 static void 1285 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1286 { 1287 _test_reservation_write_exclusive_regs_only_and_all_regs( 1288 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1289 _test_reservation_write_exclusive_regs_only_and_all_regs( 1290 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1291 } 1292 1293 static void 1294 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1295 { 1296 struct spdk_nvmf_request req = {}; 1297 union nvmf_h2c_msg cmd = {}; 1298 union nvmf_c2h_msg rsp = {}; 1299 int rc; 1300 1301 req.cmd = &cmd; 1302 req.rsp = &rsp; 1303 1304 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1305 ut_reservation_init(rtype); 1306 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1307 1308 /* Test Case: Issue a Write command from Host B */ 1309 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1310 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1311 SPDK_CU_ASSERT_FATAL(rc == 0); 1312 1313 /* Unregister Host B */ 1314 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1315 1316 /* Test Case: Issue a Read command from Host B */ 1317 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1318 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1319 SPDK_CU_ASSERT_FATAL(rc < 0); 1320 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1321 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1322 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1323 SPDK_CU_ASSERT_FATAL(rc < 0); 1324 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1325 } 1326 1327 static void 1328 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1329 { 1330 _test_reservation_exclusive_access_regs_only_and_all_regs( 1331 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1332 _test_reservation_exclusive_access_regs_only_and_all_regs( 1333 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1334 } 1335 1336 static void 1337 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1338 { 1339 STAILQ_INIT(&ctrlr->async_events); 1340 } 1341 1342 static void 1343 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1344 { 1345 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1346 1347 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1348 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1349 free(event); 1350 } 1351 } 1352 1353 static int 1354 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1355 { 1356 int num = 0; 1357 struct spdk_nvmf_async_event_completion *event; 1358 1359 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1360 num++; 1361 } 1362 return num; 1363 } 1364 1365 static void 1366 test_reservation_notification_log_page(void) 1367 { 1368 struct spdk_nvmf_ctrlr ctrlr; 1369 struct spdk_nvmf_qpair qpair; 1370 struct spdk_nvmf_ns ns; 1371 struct spdk_nvmf_request req; 1372 union nvmf_h2c_msg cmd = {}; 1373 union nvmf_c2h_msg rsp = {}; 1374 union spdk_nvme_async_event_completion event = {}; 1375 struct spdk_nvme_reservation_notification_log logs[3]; 1376 1377 memset(&ctrlr, 0, sizeof(ctrlr)); 1378 ctrlr.thread = spdk_get_thread(); 1379 TAILQ_INIT(&ctrlr.log_head); 1380 init_pending_async_events(&ctrlr); 1381 ns.nsid = 1; 1382 1383 /* Test Case: Mask all the reservation notifications */ 1384 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1385 SPDK_NVME_RESERVATION_RELEASED_MASK | 1386 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1387 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1388 SPDK_NVME_REGISTRATION_PREEMPTED); 1389 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1390 SPDK_NVME_RESERVATION_RELEASED); 1391 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1392 SPDK_NVME_RESERVATION_PREEMPTED); 1393 poll_threads(); 1394 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1395 1396 /* Test Case: Unmask all the reservation notifications, 1397 * 3 log pages are generated, and AER was triggered. 1398 */ 1399 ns.mask = 0; 1400 ctrlr.num_avail_log_pages = 0; 1401 req.cmd = &cmd; 1402 req.rsp = &rsp; 1403 ctrlr.aer_req[0] = &req; 1404 ctrlr.nr_aer_reqs = 1; 1405 req.qpair = &qpair; 1406 TAILQ_INIT(&qpair.outstanding); 1407 qpair.ctrlr = NULL; 1408 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1409 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1410 1411 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1412 SPDK_NVME_REGISTRATION_PREEMPTED); 1413 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1414 SPDK_NVME_RESERVATION_RELEASED); 1415 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1416 SPDK_NVME_RESERVATION_PREEMPTED); 1417 poll_threads(); 1418 event.raw = rsp.nvme_cpl.cdw0; 1419 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1420 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1421 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1422 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1423 1424 /* Test Case: Get Log Page to clear the log pages */ 1425 nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs), 0); 1426 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1427 1428 cleanup_pending_async_events(&ctrlr); 1429 } 1430 1431 static void 1432 test_get_dif_ctx(void) 1433 { 1434 struct spdk_nvmf_subsystem subsystem = {}; 1435 struct spdk_nvmf_request req = {}; 1436 struct spdk_nvmf_qpair qpair = {}; 1437 struct spdk_nvmf_ctrlr ctrlr = {}; 1438 struct spdk_nvmf_ns ns = {}; 1439 struct spdk_nvmf_ns *_ns = NULL; 1440 struct spdk_bdev bdev = {}; 1441 union nvmf_h2c_msg cmd = {}; 1442 struct spdk_dif_ctx dif_ctx = {}; 1443 bool ret; 1444 1445 ctrlr.subsys = &subsystem; 1446 1447 qpair.ctrlr = &ctrlr; 1448 1449 req.qpair = &qpair; 1450 req.cmd = &cmd; 1451 1452 ns.bdev = &bdev; 1453 1454 ctrlr.dif_insert_or_strip = false; 1455 1456 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1457 CU_ASSERT(ret == false); 1458 1459 ctrlr.dif_insert_or_strip = true; 1460 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1461 1462 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1463 CU_ASSERT(ret == false); 1464 1465 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1466 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1467 1468 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1469 CU_ASSERT(ret == false); 1470 1471 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1472 1473 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1474 CU_ASSERT(ret == false); 1475 1476 qpair.qid = 1; 1477 1478 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1479 CU_ASSERT(ret == false); 1480 1481 cmd.nvme_cmd.nsid = 1; 1482 1483 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1484 CU_ASSERT(ret == false); 1485 1486 subsystem.max_nsid = 1; 1487 subsystem.ns = &_ns; 1488 subsystem.ns[0] = &ns; 1489 1490 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1491 CU_ASSERT(ret == false); 1492 1493 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1494 1495 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1496 CU_ASSERT(ret == true); 1497 } 1498 1499 static void 1500 test_identify_ctrlr(void) 1501 { 1502 struct spdk_nvmf_subsystem subsystem = { 1503 .subtype = SPDK_NVMF_SUBTYPE_NVME 1504 }; 1505 struct spdk_nvmf_transport_ops tops = {}; 1506 struct spdk_nvmf_transport transport = { 1507 .ops = &tops, 1508 .opts = { 1509 .in_capsule_data_size = 4096, 1510 }, 1511 }; 1512 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1513 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1514 struct spdk_nvme_ctrlr_data cdata = {}; 1515 uint32_t expected_ioccsz; 1516 1517 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1518 1519 /* Check ioccsz, TCP transport */ 1520 tops.type = SPDK_NVME_TRANSPORT_TCP; 1521 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1522 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1523 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1524 1525 /* Check ioccsz, RDMA transport */ 1526 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1527 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1528 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1529 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1530 1531 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1532 tops.type = SPDK_NVME_TRANSPORT_TCP; 1533 ctrlr.dif_insert_or_strip = true; 1534 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1535 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1536 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1537 } 1538 1539 static int 1540 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1541 { 1542 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1543 1544 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1545 }; 1546 1547 static void 1548 test_custom_admin_cmd(void) 1549 { 1550 struct spdk_nvmf_subsystem subsystem; 1551 struct spdk_nvmf_qpair qpair; 1552 struct spdk_nvmf_ctrlr ctrlr; 1553 struct spdk_nvmf_request req; 1554 struct spdk_nvmf_ns *ns_ptrs[1]; 1555 struct spdk_nvmf_ns ns; 1556 union nvmf_h2c_msg cmd; 1557 union nvmf_c2h_msg rsp; 1558 struct spdk_bdev bdev; 1559 uint8_t buf[4096]; 1560 int rc; 1561 1562 memset(&subsystem, 0, sizeof(subsystem)); 1563 ns_ptrs[0] = &ns; 1564 subsystem.ns = ns_ptrs; 1565 subsystem.max_nsid = 1; 1566 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1567 1568 memset(&ns, 0, sizeof(ns)); 1569 ns.opts.nsid = 1; 1570 ns.bdev = &bdev; 1571 1572 memset(&qpair, 0, sizeof(qpair)); 1573 qpair.ctrlr = &ctrlr; 1574 1575 memset(&ctrlr, 0, sizeof(ctrlr)); 1576 ctrlr.subsys = &subsystem; 1577 ctrlr.vcprop.cc.bits.en = 1; 1578 1579 memset(&req, 0, sizeof(req)); 1580 req.qpair = &qpair; 1581 req.cmd = &cmd; 1582 req.rsp = &rsp; 1583 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1584 req.data = buf; 1585 req.length = sizeof(buf); 1586 1587 memset(&cmd, 0, sizeof(cmd)); 1588 cmd.nvme_cmd.opc = 0xc1; 1589 cmd.nvme_cmd.nsid = 0; 1590 memset(&rsp, 0, sizeof(rsp)); 1591 1592 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1593 1594 /* Ensure that our hdlr is being called */ 1595 rc = nvmf_ctrlr_process_admin_cmd(&req); 1596 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1597 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1598 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1599 } 1600 1601 static void 1602 test_fused_compare_and_write(void) 1603 { 1604 struct spdk_nvmf_request req = {}; 1605 struct spdk_nvmf_qpair qpair = {}; 1606 struct spdk_nvme_cmd cmd = {}; 1607 union nvmf_c2h_msg rsp = {}; 1608 struct spdk_nvmf_ctrlr ctrlr = {}; 1609 struct spdk_nvmf_subsystem subsystem = {}; 1610 struct spdk_nvmf_ns ns = {}; 1611 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1612 struct spdk_nvmf_subsystem_listener listener = {}; 1613 struct spdk_bdev bdev = {}; 1614 1615 struct spdk_nvmf_poll_group group = {}; 1616 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1617 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1618 struct spdk_io_channel io_ch = {}; 1619 1620 ns.bdev = &bdev; 1621 1622 subsystem.id = 0; 1623 subsystem.max_nsid = 1; 1624 subsys_ns[0] = &ns; 1625 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1626 1627 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1628 1629 /* Enable controller */ 1630 ctrlr.vcprop.cc.bits.en = 1; 1631 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1632 ctrlr.listener = &listener; 1633 1634 group.num_sgroups = 1; 1635 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1636 sgroups.num_ns = 1; 1637 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1638 ns_info.channel = &io_ch; 1639 sgroups.ns_info = &ns_info; 1640 TAILQ_INIT(&sgroups.queued); 1641 group.sgroups = &sgroups; 1642 TAILQ_INIT(&qpair.outstanding); 1643 1644 qpair.ctrlr = &ctrlr; 1645 qpair.group = &group; 1646 qpair.qid = 1; 1647 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1648 1649 cmd.nsid = 1; 1650 1651 req.qpair = &qpair; 1652 req.cmd = (union nvmf_h2c_msg *)&cmd; 1653 req.rsp = &rsp; 1654 1655 /* SUCCESS/SUCCESS */ 1656 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1657 cmd.opc = SPDK_NVME_OPC_COMPARE; 1658 1659 spdk_nvmf_request_exec(&req); 1660 CU_ASSERT(qpair.first_fused_req != NULL); 1661 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1662 1663 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1664 cmd.opc = SPDK_NVME_OPC_WRITE; 1665 1666 spdk_nvmf_request_exec(&req); 1667 CU_ASSERT(qpair.first_fused_req == NULL); 1668 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1669 1670 /* Wrong sequence */ 1671 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1672 cmd.opc = SPDK_NVME_OPC_WRITE; 1673 1674 spdk_nvmf_request_exec(&req); 1675 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1676 CU_ASSERT(qpair.first_fused_req == NULL); 1677 1678 /* Write as FUSE_FIRST (Wrong op code) */ 1679 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1680 cmd.opc = SPDK_NVME_OPC_WRITE; 1681 1682 spdk_nvmf_request_exec(&req); 1683 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1684 CU_ASSERT(qpair.first_fused_req == NULL); 1685 1686 /* Compare as FUSE_SECOND (Wrong op code) */ 1687 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1688 cmd.opc = SPDK_NVME_OPC_COMPARE; 1689 1690 spdk_nvmf_request_exec(&req); 1691 CU_ASSERT(qpair.first_fused_req != NULL); 1692 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1693 1694 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1695 cmd.opc = SPDK_NVME_OPC_COMPARE; 1696 1697 spdk_nvmf_request_exec(&req); 1698 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1699 CU_ASSERT(qpair.first_fused_req == NULL); 1700 } 1701 1702 static void 1703 test_multi_async_event_reqs(void) 1704 { 1705 struct spdk_nvmf_subsystem subsystem = {}; 1706 struct spdk_nvmf_qpair qpair = {}; 1707 struct spdk_nvmf_ctrlr ctrlr = {}; 1708 struct spdk_nvmf_request req[5] = {}; 1709 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1710 struct spdk_nvmf_ns ns = {}; 1711 union nvmf_h2c_msg cmd[5] = {}; 1712 union nvmf_c2h_msg rsp[5] = {}; 1713 1714 struct spdk_nvmf_poll_group group = {}; 1715 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1716 1717 int i; 1718 1719 ns_ptrs[0] = &ns; 1720 subsystem.ns = ns_ptrs; 1721 subsystem.max_nsid = 1; 1722 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1723 1724 ns.opts.nsid = 1; 1725 group.sgroups = &sgroups; 1726 1727 qpair.ctrlr = &ctrlr; 1728 qpair.group = &group; 1729 TAILQ_INIT(&qpair.outstanding); 1730 1731 ctrlr.subsys = &subsystem; 1732 ctrlr.vcprop.cc.bits.en = 1; 1733 1734 for (i = 0; i < 5; i++) { 1735 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1736 cmd[i].nvme_cmd.nsid = 1; 1737 cmd[i].nvme_cmd.cid = i; 1738 1739 req[i].qpair = &qpair; 1740 req[i].cmd = &cmd[i]; 1741 req[i].rsp = &rsp[i]; 1742 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1743 } 1744 1745 /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */ 1746 sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS; 1747 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1748 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1749 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1750 } 1751 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 1752 1753 /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */ 1754 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1755 CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS); 1756 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1757 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 1758 1759 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 1760 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 1761 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1762 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1763 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 1764 1765 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 1766 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1767 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1768 CU_ASSERT(ctrlr.aer_req[2] == NULL); 1769 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 1770 1771 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 1772 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 1773 } 1774 1775 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 1776 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 1777 static void 1778 test_get_ana_log_page(void) 1779 { 1780 struct spdk_nvmf_subsystem subsystem = {}; 1781 struct spdk_nvmf_ctrlr ctrlr = {}; 1782 struct spdk_nvmf_subsystem_listener listener = {}; 1783 struct spdk_nvmf_ns ns[3]; 1784 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 1785 uint64_t offset; 1786 uint32_t length; 1787 int i; 1788 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1789 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1790 struct spdk_nvme_ana_page *ana_hdr; 1791 char _ana_desc[UT_ANA_DESC_SIZE]; 1792 struct spdk_nvme_ana_group_descriptor *ana_desc; 1793 1794 subsystem.ns = ns_arr; 1795 subsystem.max_nsid = 3; 1796 ctrlr.subsys = &subsystem; 1797 ctrlr.listener = &listener; 1798 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1799 1800 for (i = 0; i < 3; i++) { 1801 ns_arr[i]->nsid = i + 1; 1802 } 1803 1804 /* create expected page */ 1805 ana_hdr = (void *)&expected_page[0]; 1806 ana_hdr->num_ana_group_desc = 3; 1807 ana_hdr->change_count = 0; 1808 1809 /* descriptor may be unaligned. So create data and then copy it to the location. */ 1810 ana_desc = (void *)_ana_desc; 1811 offset = sizeof(struct spdk_nvme_ana_page); 1812 1813 for (i = 0; i < 3; i++) { 1814 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 1815 ana_desc->ana_group_id = ns_arr[i]->nsid; 1816 ana_desc->num_of_nsid = 1; 1817 ana_desc->change_count = 0; 1818 ana_desc->ana_state = ctrlr.listener->ana_state; 1819 ana_desc->nsid[0] = ns_arr[i]->nsid; 1820 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 1821 offset += UT_ANA_DESC_SIZE; 1822 } 1823 1824 /* read entire actual log page */ 1825 offset = 0; 1826 while (offset < UT_ANA_LOG_PAGE_SIZE) { 1827 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 1828 nvmf_get_ana_log_page(&ctrlr, &actual_page[offset], offset, length, 0); 1829 offset += length; 1830 } 1831 1832 /* compare expected page and actual page */ 1833 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1834 } 1835 1836 static void 1837 test_multi_async_events(void) 1838 { 1839 struct spdk_nvmf_subsystem subsystem = {}; 1840 struct spdk_nvmf_qpair qpair = {}; 1841 struct spdk_nvmf_ctrlr ctrlr = {}; 1842 struct spdk_nvmf_request req[4] = {}; 1843 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1844 struct spdk_nvmf_ns ns = {}; 1845 union nvmf_h2c_msg cmd[4] = {}; 1846 union nvmf_c2h_msg rsp[4] = {}; 1847 union spdk_nvme_async_event_completion event = {}; 1848 struct spdk_nvmf_poll_group group = {}; 1849 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1850 int i; 1851 1852 ns_ptrs[0] = &ns; 1853 subsystem.ns = ns_ptrs; 1854 subsystem.max_nsid = 1; 1855 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1856 1857 ns.opts.nsid = 1; 1858 group.sgroups = &sgroups; 1859 1860 qpair.ctrlr = &ctrlr; 1861 qpair.group = &group; 1862 TAILQ_INIT(&qpair.outstanding); 1863 1864 ctrlr.subsys = &subsystem; 1865 ctrlr.vcprop.cc.bits.en = 1; 1866 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 1867 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 1868 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 1869 init_pending_async_events(&ctrlr); 1870 1871 /* Target queue pending events when there is no outstanding AER request */ 1872 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 1873 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 1874 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 1875 1876 for (i = 0; i < 4; i++) { 1877 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1878 cmd[i].nvme_cmd.nsid = 1; 1879 cmd[i].nvme_cmd.cid = i; 1880 1881 req[i].qpair = &qpair; 1882 req[i].cmd = &cmd[i]; 1883 req[i].rsp = &rsp[i]; 1884 1885 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1886 1887 sgroups.mgmt_io_outstanding = 1; 1888 if (i < 3) { 1889 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1890 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 1891 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 1892 } else { 1893 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1894 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 1895 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 1896 } 1897 } 1898 1899 event.raw = rsp[0].nvme_cpl.cdw0; 1900 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 1901 event.raw = rsp[1].nvme_cpl.cdw0; 1902 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 1903 event.raw = rsp[2].nvme_cpl.cdw0; 1904 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 1905 1906 cleanup_pending_async_events(&ctrlr); 1907 } 1908 1909 static void 1910 test_rae(void) 1911 { 1912 struct spdk_nvmf_subsystem subsystem = {}; 1913 struct spdk_nvmf_qpair qpair = {}; 1914 struct spdk_nvmf_ctrlr ctrlr = {}; 1915 struct spdk_nvmf_request req[3] = {}; 1916 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1917 struct spdk_nvmf_ns ns = {}; 1918 union nvmf_h2c_msg cmd[3] = {}; 1919 union nvmf_c2h_msg rsp[3] = {}; 1920 union spdk_nvme_async_event_completion event = {}; 1921 struct spdk_nvmf_poll_group group = {}; 1922 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1923 int i; 1924 char data[4096]; 1925 1926 ns_ptrs[0] = &ns; 1927 subsystem.ns = ns_ptrs; 1928 subsystem.max_nsid = 1; 1929 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1930 1931 ns.opts.nsid = 1; 1932 group.sgroups = &sgroups; 1933 1934 qpair.ctrlr = &ctrlr; 1935 qpair.group = &group; 1936 TAILQ_INIT(&qpair.outstanding); 1937 1938 ctrlr.subsys = &subsystem; 1939 ctrlr.vcprop.cc.bits.en = 1; 1940 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 1941 init_pending_async_events(&ctrlr); 1942 1943 /* Target queue pending events when there is no outstanding AER request */ 1944 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 1945 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 1946 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 1947 /* only one event will be queued before RAE is clear */ 1948 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 1949 1950 req[0].qpair = &qpair; 1951 req[0].cmd = &cmd[0]; 1952 req[0].rsp = &rsp[0]; 1953 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1954 cmd[0].nvme_cmd.nsid = 1; 1955 cmd[0].nvme_cmd.cid = 0; 1956 1957 for (i = 1; i < 3; i++) { 1958 req[i].qpair = &qpair; 1959 req[i].cmd = &cmd[i]; 1960 req[i].rsp = &rsp[i]; 1961 req[i].data = &data; 1962 req[i].length = sizeof(data); 1963 1964 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 1965 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 1966 SPDK_NVME_LOG_CHANGED_NS_LIST; 1967 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 1968 spdk_nvme_bytes_to_numd(req[i].length); 1969 cmd[i].nvme_cmd.cid = i; 1970 } 1971 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 1972 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 1973 1974 /* consume the pending event */ 1975 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 1976 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1977 event.raw = rsp[0].nvme_cpl.cdw0; 1978 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 1979 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 1980 1981 /* get log with RAE set */ 1982 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1983 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1984 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1985 1986 /* will not generate new event until RAE is clear */ 1987 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 1988 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 1989 1990 /* get log with RAE clear */ 1991 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1992 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1993 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1994 1995 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 1996 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 1997 1998 cleanup_pending_async_events(&ctrlr); 1999 } 2000 2001 int main(int argc, char **argv) 2002 { 2003 CU_pSuite suite = NULL; 2004 unsigned int num_failures; 2005 2006 CU_set_error_action(CUEA_ABORT); 2007 CU_initialize_registry(); 2008 2009 suite = CU_add_suite("nvmf", NULL, NULL); 2010 CU_ADD_TEST(suite, test_get_log_page); 2011 CU_ADD_TEST(suite, test_process_fabrics_cmd); 2012 CU_ADD_TEST(suite, test_connect); 2013 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 2014 CU_ADD_TEST(suite, test_identify_ns); 2015 CU_ADD_TEST(suite, test_reservation_write_exclusive); 2016 CU_ADD_TEST(suite, test_reservation_exclusive_access); 2017 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 2018 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 2019 CU_ADD_TEST(suite, test_reservation_notification_log_page); 2020 CU_ADD_TEST(suite, test_get_dif_ctx); 2021 CU_ADD_TEST(suite, test_set_get_features); 2022 CU_ADD_TEST(suite, test_identify_ctrlr); 2023 CU_ADD_TEST(suite, test_custom_admin_cmd); 2024 CU_ADD_TEST(suite, test_fused_compare_and_write); 2025 CU_ADD_TEST(suite, test_multi_async_event_reqs); 2026 CU_ADD_TEST(suite, test_get_ana_log_page); 2027 CU_ADD_TEST(suite, test_multi_async_events); 2028 CU_ADD_TEST(suite, test_rae); 2029 2030 allocate_threads(1); 2031 set_thread(0); 2032 2033 CU_basic_set_mode(CU_BRM_VERBOSE); 2034 CU_basic_run_tests(); 2035 num_failures = CU_get_number_of_failures(); 2036 CU_cleanup_registry(); 2037 2038 free_threads(); 2039 2040 return num_failures; 2041 } 2042