1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 51 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 52 53 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 54 struct spdk_nvmf_subsystem *, 55 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 56 NULL); 57 58 DEFINE_STUB(spdk_nvmf_poll_group_create, 59 struct spdk_nvmf_poll_group *, 60 (struct spdk_nvmf_tgt *tgt), 61 NULL); 62 63 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 64 const char *, 65 (const struct spdk_nvmf_subsystem *subsystem), 66 subsystem_default_sn); 67 68 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 69 const char *, 70 (const struct spdk_nvmf_subsystem *subsystem), 71 subsystem_default_mn); 72 73 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 74 struct spdk_nvmf_ns *, 75 (struct spdk_nvmf_subsystem *subsystem), 76 NULL); 77 78 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 79 struct spdk_nvmf_ns *, 80 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 81 NULL); 82 83 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 84 bool, 85 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 86 true); 87 88 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 89 int, 90 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 91 0); 92 93 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 94 struct spdk_nvmf_ctrlr *, 95 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 96 NULL); 97 98 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 99 bool, 100 (struct spdk_nvmf_ctrlr *ctrlr), 101 false); 102 103 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 104 bool, 105 (struct spdk_nvmf_ctrlr *ctrlr), 106 false); 107 108 DEFINE_STUB_V(nvmf_get_discovery_log_page, 109 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 110 uint32_t iovcnt, uint64_t offset, uint32_t length)); 111 112 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 113 int, 114 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 115 0); 116 117 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 118 bool, 119 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 120 true); 121 122 DEFINE_STUB(nvmf_subsystem_find_listener, 123 struct spdk_nvmf_subsystem_listener *, 124 (struct spdk_nvmf_subsystem *subsystem, 125 const struct spdk_nvme_transport_id *trid), 126 (void *)0x1); 127 128 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 129 int, 130 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct spdk_nvmf_request *req), 132 0); 133 134 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 135 int, 136 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 struct spdk_nvmf_request *req), 138 0); 139 140 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 141 int, 142 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 struct spdk_nvmf_request *req), 144 0); 145 146 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 147 int, 148 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 149 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 150 0); 151 152 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 153 int, 154 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 155 struct spdk_nvmf_request *req), 156 0); 157 158 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 159 int, 160 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 161 struct spdk_nvmf_request *req), 162 0); 163 164 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 165 int, 166 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 167 struct spdk_nvmf_request *req), 168 0); 169 170 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 171 int, 172 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 173 struct spdk_nvmf_request *req), 174 0); 175 176 DEFINE_STUB(nvmf_transport_req_complete, 177 int, 178 (struct spdk_nvmf_request *req), 179 0); 180 181 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 182 183 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 184 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 185 struct spdk_dif_ctx *dif_ctx), 186 true); 187 188 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 189 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 190 191 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 192 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 193 194 int 195 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 196 { 197 return 0; 198 } 199 200 void 201 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 202 bool dif_insert_or_strip) 203 { 204 uint64_t num_blocks; 205 206 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 207 num_blocks = ns->bdev->blockcnt; 208 nsdata->nsze = num_blocks; 209 nsdata->ncap = num_blocks; 210 nsdata->nuse = num_blocks; 211 nsdata->nlbaf = 0; 212 nsdata->flbas.format = 0; 213 nsdata->lbaf[0].lbads = spdk_u32log2(512); 214 } 215 216 static void 217 test_get_log_page(void) 218 { 219 struct spdk_nvmf_subsystem subsystem = {}; 220 struct spdk_nvmf_request req = {}; 221 struct spdk_nvmf_qpair qpair = {}; 222 struct spdk_nvmf_ctrlr ctrlr = {}; 223 union nvmf_h2c_msg cmd = {}; 224 union nvmf_c2h_msg rsp = {}; 225 char data[4096]; 226 227 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 228 229 ctrlr.subsys = &subsystem; 230 231 qpair.ctrlr = &ctrlr; 232 233 req.qpair = &qpair; 234 req.cmd = &cmd; 235 req.rsp = &rsp; 236 req.data = &data; 237 req.length = sizeof(data); 238 239 /* Get Log Page - all valid */ 240 memset(&cmd, 0, sizeof(cmd)); 241 memset(&rsp, 0, sizeof(rsp)); 242 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 243 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 244 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 245 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 246 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 247 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 248 249 /* Get Log Page with invalid log ID */ 250 memset(&cmd, 0, sizeof(cmd)); 251 memset(&rsp, 0, sizeof(rsp)); 252 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 253 cmd.nvme_cmd.cdw10 = 0; 254 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 255 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 256 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 257 258 /* Get Log Page with invalid offset (not dword aligned) */ 259 memset(&cmd, 0, sizeof(cmd)); 260 memset(&rsp, 0, sizeof(rsp)); 261 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 262 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 263 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 264 cmd.nvme_cmd.cdw12 = 2; 265 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 266 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 267 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 268 269 /* Get Log Page without data buffer */ 270 memset(&cmd, 0, sizeof(cmd)); 271 memset(&rsp, 0, sizeof(rsp)); 272 req.data = NULL; 273 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 274 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 275 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 276 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 277 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 278 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 279 req.data = data; 280 } 281 282 static void 283 test_process_fabrics_cmd(void) 284 { 285 struct spdk_nvmf_request req = {}; 286 int ret; 287 struct spdk_nvmf_qpair req_qpair = {}; 288 union nvmf_h2c_msg req_cmd = {}; 289 union nvmf_c2h_msg req_rsp = {}; 290 291 req.qpair = &req_qpair; 292 req.cmd = &req_cmd; 293 req.rsp = &req_rsp; 294 req.qpair->ctrlr = NULL; 295 296 /* No ctrlr and invalid command check */ 297 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 298 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 299 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 300 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 301 } 302 303 static bool 304 nvme_status_success(const struct spdk_nvme_status *status) 305 { 306 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 307 } 308 309 static void 310 test_connect(void) 311 { 312 struct spdk_nvmf_fabric_connect_data connect_data; 313 struct spdk_nvmf_poll_group group; 314 struct spdk_nvmf_subsystem_poll_group *sgroups; 315 struct spdk_nvmf_transport transport; 316 struct spdk_nvmf_transport_ops tops = {}; 317 struct spdk_nvmf_subsystem subsystem; 318 struct spdk_nvmf_request req; 319 struct spdk_nvmf_qpair admin_qpair; 320 struct spdk_nvmf_qpair qpair; 321 struct spdk_nvmf_qpair qpair2; 322 struct spdk_nvmf_ctrlr ctrlr; 323 struct spdk_nvmf_tgt tgt; 324 union nvmf_h2c_msg cmd; 325 union nvmf_c2h_msg rsp; 326 const uint8_t hostid[16] = { 327 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 328 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 329 }; 330 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 331 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 332 int rc; 333 334 memset(&group, 0, sizeof(group)); 335 group.thread = spdk_get_thread(); 336 337 memset(&ctrlr, 0, sizeof(ctrlr)); 338 ctrlr.subsys = &subsystem; 339 ctrlr.qpair_mask = spdk_bit_array_create(3); 340 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 341 ctrlr.vcprop.cc.bits.en = 1; 342 ctrlr.vcprop.cc.bits.iosqes = 6; 343 ctrlr.vcprop.cc.bits.iocqes = 4; 344 345 memset(&admin_qpair, 0, sizeof(admin_qpair)); 346 admin_qpair.group = &group; 347 348 memset(&tgt, 0, sizeof(tgt)); 349 memset(&transport, 0, sizeof(transport)); 350 transport.ops = &tops; 351 transport.opts.max_aq_depth = 32; 352 transport.opts.max_queue_depth = 64; 353 transport.opts.max_qpairs_per_ctrlr = 3; 354 transport.tgt = &tgt; 355 356 memset(&qpair, 0, sizeof(qpair)); 357 qpair.transport = &transport; 358 qpair.group = &group; 359 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 360 TAILQ_INIT(&qpair.outstanding); 361 362 memset(&connect_data, 0, sizeof(connect_data)); 363 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 364 connect_data.cntlid = 0xFFFF; 365 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 366 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 367 368 memset(&subsystem, 0, sizeof(subsystem)); 369 subsystem.thread = spdk_get_thread(); 370 subsystem.id = 1; 371 TAILQ_INIT(&subsystem.ctrlrs); 372 subsystem.tgt = &tgt; 373 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 374 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 375 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 376 377 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 378 group.sgroups = sgroups; 379 380 memset(&cmd, 0, sizeof(cmd)); 381 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 382 cmd.connect_cmd.cid = 1; 383 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 384 cmd.connect_cmd.recfmt = 0; 385 cmd.connect_cmd.qid = 0; 386 cmd.connect_cmd.sqsize = 31; 387 cmd.connect_cmd.cattr = 0; 388 cmd.connect_cmd.kato = 120000; 389 390 memset(&req, 0, sizeof(req)); 391 req.qpair = &qpair; 392 req.length = sizeof(connect_data); 393 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 394 req.data = &connect_data; 395 req.cmd = &cmd; 396 req.rsp = &rsp; 397 398 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 399 MOCK_SET(spdk_nvmf_poll_group_create, &group); 400 401 /* Valid admin connect command */ 402 memset(&rsp, 0, sizeof(rsp)); 403 sgroups[subsystem.id].io_outstanding++; 404 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 405 rc = nvmf_ctrlr_cmd_connect(&req); 406 poll_threads(); 407 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 408 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 409 CU_ASSERT(qpair.ctrlr != NULL); 410 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 411 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 412 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 413 free(qpair.ctrlr); 414 qpair.ctrlr = NULL; 415 416 /* Valid admin connect command with kato = 0 */ 417 cmd.connect_cmd.kato = 0; 418 memset(&rsp, 0, sizeof(rsp)); 419 sgroups[subsystem.id].io_outstanding++; 420 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 421 rc = nvmf_ctrlr_cmd_connect(&req); 422 poll_threads(); 423 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 424 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 425 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 426 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 427 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 428 free(qpair.ctrlr); 429 qpair.ctrlr = NULL; 430 cmd.connect_cmd.kato = 120000; 431 432 /* Invalid data length */ 433 memset(&rsp, 0, sizeof(rsp)); 434 req.length = sizeof(connect_data) - 1; 435 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 436 rc = nvmf_ctrlr_cmd_connect(&req); 437 poll_threads(); 438 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 439 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 440 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 441 CU_ASSERT(qpair.ctrlr == NULL); 442 req.length = sizeof(connect_data); 443 444 /* Invalid recfmt */ 445 memset(&rsp, 0, sizeof(rsp)); 446 cmd.connect_cmd.recfmt = 1234; 447 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 448 rc = nvmf_ctrlr_cmd_connect(&req); 449 poll_threads(); 450 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 451 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 452 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 453 CU_ASSERT(qpair.ctrlr == NULL); 454 cmd.connect_cmd.recfmt = 0; 455 456 /* Subsystem not found */ 457 memset(&rsp, 0, sizeof(rsp)); 458 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 459 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 460 rc = nvmf_ctrlr_cmd_connect(&req); 461 poll_threads(); 462 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 463 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 464 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 465 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 466 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 467 CU_ASSERT(qpair.ctrlr == NULL); 468 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 469 470 /* Unterminated hostnqn */ 471 memset(&rsp, 0, sizeof(rsp)); 472 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 473 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 474 rc = nvmf_ctrlr_cmd_connect(&req); 475 poll_threads(); 476 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 477 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 478 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 479 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 480 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 481 CU_ASSERT(qpair.ctrlr == NULL); 482 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 483 484 /* Host not allowed */ 485 memset(&rsp, 0, sizeof(rsp)); 486 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 487 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 488 rc = nvmf_ctrlr_cmd_connect(&req); 489 poll_threads(); 490 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 491 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 492 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 493 CU_ASSERT(qpair.ctrlr == NULL); 494 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 495 496 /* Invalid sqsize == 0 */ 497 memset(&rsp, 0, sizeof(rsp)); 498 cmd.connect_cmd.sqsize = 0; 499 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 500 rc = nvmf_ctrlr_cmd_connect(&req); 501 poll_threads(); 502 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 503 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 504 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 505 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 506 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 507 CU_ASSERT(qpair.ctrlr == NULL); 508 cmd.connect_cmd.sqsize = 31; 509 510 /* Invalid admin sqsize > max_aq_depth */ 511 memset(&rsp, 0, sizeof(rsp)); 512 cmd.connect_cmd.sqsize = 32; 513 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 514 rc = nvmf_ctrlr_cmd_connect(&req); 515 poll_threads(); 516 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 517 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 518 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 519 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 520 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 521 CU_ASSERT(qpair.ctrlr == NULL); 522 cmd.connect_cmd.sqsize = 31; 523 524 /* Invalid I/O sqsize > max_queue_depth */ 525 memset(&rsp, 0, sizeof(rsp)); 526 cmd.connect_cmd.qid = 1; 527 cmd.connect_cmd.sqsize = 64; 528 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 529 rc = nvmf_ctrlr_cmd_connect(&req); 530 poll_threads(); 531 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 532 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 533 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 534 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 535 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 536 CU_ASSERT(qpair.ctrlr == NULL); 537 cmd.connect_cmd.qid = 0; 538 cmd.connect_cmd.sqsize = 31; 539 540 /* Invalid cntlid for admin queue */ 541 memset(&rsp, 0, sizeof(rsp)); 542 connect_data.cntlid = 0x1234; 543 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 544 rc = nvmf_ctrlr_cmd_connect(&req); 545 poll_threads(); 546 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 547 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 548 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 549 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 550 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 551 CU_ASSERT(qpair.ctrlr == NULL); 552 connect_data.cntlid = 0xFFFF; 553 554 ctrlr.admin_qpair = &admin_qpair; 555 ctrlr.subsys = &subsystem; 556 557 /* Valid I/O queue connect command */ 558 memset(&rsp, 0, sizeof(rsp)); 559 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 560 cmd.connect_cmd.qid = 1; 561 cmd.connect_cmd.sqsize = 63; 562 sgroups[subsystem.id].io_outstanding++; 563 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 564 rc = nvmf_ctrlr_cmd_connect(&req); 565 poll_threads(); 566 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 567 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 568 CU_ASSERT(qpair.ctrlr == &ctrlr); 569 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 570 qpair.ctrlr = NULL; 571 cmd.connect_cmd.sqsize = 31; 572 573 /* Non-existent controller */ 574 memset(&rsp, 0, sizeof(rsp)); 575 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 576 sgroups[subsystem.id].io_outstanding++; 577 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 578 rc = nvmf_ctrlr_cmd_connect(&req); 579 poll_threads(); 580 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 581 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 582 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 583 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 584 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 585 CU_ASSERT(qpair.ctrlr == NULL); 586 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 587 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 588 589 /* I/O connect to discovery controller */ 590 memset(&rsp, 0, sizeof(rsp)); 591 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 592 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 593 sgroups[subsystem.id].io_outstanding++; 594 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 595 rc = nvmf_ctrlr_cmd_connect(&req); 596 poll_threads(); 597 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 598 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 599 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 600 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 601 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 602 CU_ASSERT(qpair.ctrlr == NULL); 603 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 604 605 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 606 cmd.connect_cmd.qid = 0; 607 cmd.connect_cmd.kato = 120000; 608 memset(&rsp, 0, sizeof(rsp)); 609 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 610 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 611 sgroups[subsystem.id].io_outstanding++; 612 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 613 rc = nvmf_ctrlr_cmd_connect(&req); 614 poll_threads(); 615 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 616 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 617 CU_ASSERT(qpair.ctrlr != NULL); 618 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 619 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 620 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 621 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 622 free(qpair.ctrlr); 623 qpair.ctrlr = NULL; 624 625 /* I/O connect to discovery controller with keep-alive-timeout == 0. 626 * Then, a fixed timeout value is set to keep-alive-timeout. 627 */ 628 cmd.connect_cmd.kato = 0; 629 memset(&rsp, 0, sizeof(rsp)); 630 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 631 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 632 sgroups[subsystem.id].io_outstanding++; 633 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 634 rc = nvmf_ctrlr_cmd_connect(&req); 635 poll_threads(); 636 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 637 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 638 CU_ASSERT(qpair.ctrlr != NULL); 639 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 640 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 641 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 642 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 643 free(qpair.ctrlr); 644 qpair.ctrlr = NULL; 645 cmd.connect_cmd.qid = 1; 646 cmd.connect_cmd.kato = 120000; 647 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 648 649 /* I/O connect to disabled controller */ 650 memset(&rsp, 0, sizeof(rsp)); 651 ctrlr.vcprop.cc.bits.en = 0; 652 sgroups[subsystem.id].io_outstanding++; 653 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 654 rc = nvmf_ctrlr_cmd_connect(&req); 655 poll_threads(); 656 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 657 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 658 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 659 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 660 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 661 CU_ASSERT(qpair.ctrlr == NULL); 662 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 663 ctrlr.vcprop.cc.bits.en = 1; 664 665 /* I/O connect with invalid IOSQES */ 666 memset(&rsp, 0, sizeof(rsp)); 667 ctrlr.vcprop.cc.bits.iosqes = 3; 668 sgroups[subsystem.id].io_outstanding++; 669 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 670 rc = nvmf_ctrlr_cmd_connect(&req); 671 poll_threads(); 672 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 673 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 674 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 675 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 676 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 677 CU_ASSERT(qpair.ctrlr == NULL); 678 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 679 ctrlr.vcprop.cc.bits.iosqes = 6; 680 681 /* I/O connect with invalid IOCQES */ 682 memset(&rsp, 0, sizeof(rsp)); 683 ctrlr.vcprop.cc.bits.iocqes = 3; 684 sgroups[subsystem.id].io_outstanding++; 685 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 686 rc = nvmf_ctrlr_cmd_connect(&req); 687 poll_threads(); 688 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 689 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 690 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 691 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 692 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 693 CU_ASSERT(qpair.ctrlr == NULL); 694 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 695 ctrlr.vcprop.cc.bits.iocqes = 4; 696 697 /* I/O connect with too many existing qpairs */ 698 memset(&rsp, 0, sizeof(rsp)); 699 spdk_bit_array_set(ctrlr.qpair_mask, 0); 700 spdk_bit_array_set(ctrlr.qpair_mask, 1); 701 spdk_bit_array_set(ctrlr.qpair_mask, 2); 702 sgroups[subsystem.id].io_outstanding++; 703 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 704 rc = nvmf_ctrlr_cmd_connect(&req); 705 poll_threads(); 706 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 707 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 708 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 709 CU_ASSERT(qpair.ctrlr == NULL); 710 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 711 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 712 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 713 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 714 715 /* I/O connect with duplicate queue ID */ 716 memset(&rsp, 0, sizeof(rsp)); 717 memset(&qpair2, 0, sizeof(qpair2)); 718 qpair2.group = &group; 719 qpair2.qid = 1; 720 spdk_bit_array_set(ctrlr.qpair_mask, 1); 721 cmd.connect_cmd.qid = 1; 722 sgroups[subsystem.id].io_outstanding++; 723 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 724 rc = nvmf_ctrlr_cmd_connect(&req); 725 poll_threads(); 726 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 727 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 728 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 729 CU_ASSERT(qpair.ctrlr == NULL); 730 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 731 732 /* Clean up globals */ 733 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 734 MOCK_CLEAR(spdk_nvmf_poll_group_create); 735 736 spdk_bit_array_free(&ctrlr.qpair_mask); 737 free(sgroups); 738 } 739 740 static void 741 test_get_ns_id_desc_list(void) 742 { 743 struct spdk_nvmf_subsystem subsystem; 744 struct spdk_nvmf_qpair qpair; 745 struct spdk_nvmf_ctrlr ctrlr; 746 struct spdk_nvmf_request req; 747 struct spdk_nvmf_ns *ns_ptrs[1]; 748 struct spdk_nvmf_ns ns; 749 union nvmf_h2c_msg cmd; 750 union nvmf_c2h_msg rsp; 751 struct spdk_bdev bdev; 752 uint8_t buf[4096]; 753 754 memset(&subsystem, 0, sizeof(subsystem)); 755 ns_ptrs[0] = &ns; 756 subsystem.ns = ns_ptrs; 757 subsystem.max_nsid = 1; 758 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 759 760 memset(&ns, 0, sizeof(ns)); 761 ns.opts.nsid = 1; 762 ns.bdev = &bdev; 763 764 memset(&qpair, 0, sizeof(qpair)); 765 qpair.ctrlr = &ctrlr; 766 767 memset(&ctrlr, 0, sizeof(ctrlr)); 768 ctrlr.subsys = &subsystem; 769 ctrlr.vcprop.cc.bits.en = 1; 770 771 memset(&req, 0, sizeof(req)); 772 req.qpair = &qpair; 773 req.cmd = &cmd; 774 req.rsp = &rsp; 775 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 776 req.data = buf; 777 req.length = sizeof(buf); 778 779 memset(&cmd, 0, sizeof(cmd)); 780 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 781 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 782 783 /* Invalid NSID */ 784 cmd.nvme_cmd.nsid = 0; 785 memset(&rsp, 0, sizeof(rsp)); 786 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 787 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 788 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 789 790 /* Valid NSID, but ns has no IDs defined */ 791 cmd.nvme_cmd.nsid = 1; 792 memset(&rsp, 0, sizeof(rsp)); 793 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 794 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 795 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 796 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 797 798 /* Valid NSID, only EUI64 defined */ 799 ns.opts.eui64[0] = 0x11; 800 ns.opts.eui64[7] = 0xFF; 801 memset(&rsp, 0, sizeof(rsp)); 802 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 803 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 804 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 805 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 806 CU_ASSERT(buf[1] == 8); 807 CU_ASSERT(buf[4] == 0x11); 808 CU_ASSERT(buf[11] == 0xFF); 809 CU_ASSERT(buf[13] == 0); 810 811 /* Valid NSID, only NGUID defined */ 812 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 813 ns.opts.nguid[0] = 0x22; 814 ns.opts.nguid[15] = 0xEE; 815 memset(&rsp, 0, sizeof(rsp)); 816 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 817 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 818 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 819 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 820 CU_ASSERT(buf[1] == 16); 821 CU_ASSERT(buf[4] == 0x22); 822 CU_ASSERT(buf[19] == 0xEE); 823 CU_ASSERT(buf[21] == 0); 824 825 /* Valid NSID, both EUI64 and NGUID defined */ 826 ns.opts.eui64[0] = 0x11; 827 ns.opts.eui64[7] = 0xFF; 828 ns.opts.nguid[0] = 0x22; 829 ns.opts.nguid[15] = 0xEE; 830 memset(&rsp, 0, sizeof(rsp)); 831 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 832 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 833 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 834 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 835 CU_ASSERT(buf[1] == 8); 836 CU_ASSERT(buf[4] == 0x11); 837 CU_ASSERT(buf[11] == 0xFF); 838 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 839 CU_ASSERT(buf[13] == 16); 840 CU_ASSERT(buf[16] == 0x22); 841 CU_ASSERT(buf[31] == 0xEE); 842 CU_ASSERT(buf[33] == 0); 843 844 /* Valid NSID, EUI64, NGUID, and UUID defined */ 845 ns.opts.eui64[0] = 0x11; 846 ns.opts.eui64[7] = 0xFF; 847 ns.opts.nguid[0] = 0x22; 848 ns.opts.nguid[15] = 0xEE; 849 ns.opts.uuid.u.raw[0] = 0x33; 850 ns.opts.uuid.u.raw[15] = 0xDD; 851 memset(&rsp, 0, sizeof(rsp)); 852 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 853 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 854 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 855 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 856 CU_ASSERT(buf[1] == 8); 857 CU_ASSERT(buf[4] == 0x11); 858 CU_ASSERT(buf[11] == 0xFF); 859 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 860 CU_ASSERT(buf[13] == 16); 861 CU_ASSERT(buf[16] == 0x22); 862 CU_ASSERT(buf[31] == 0xEE); 863 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 864 CU_ASSERT(buf[33] == 16); 865 CU_ASSERT(buf[36] == 0x33); 866 CU_ASSERT(buf[51] == 0xDD); 867 CU_ASSERT(buf[53] == 0); 868 } 869 870 static void 871 test_identify_ns(void) 872 { 873 struct spdk_nvmf_subsystem subsystem = {}; 874 struct spdk_nvmf_transport transport = {}; 875 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 876 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 877 struct spdk_nvme_cmd cmd = {}; 878 struct spdk_nvme_cpl rsp = {}; 879 struct spdk_nvme_ns_data nsdata = {}; 880 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 881 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 882 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 883 884 subsystem.ns = ns_arr; 885 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 886 887 /* Invalid NSID 0 */ 888 cmd.nsid = 0; 889 memset(&nsdata, 0, sizeof(nsdata)); 890 memset(&rsp, 0, sizeof(rsp)); 891 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 892 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 893 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 894 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 895 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 896 897 /* Valid NSID 1 */ 898 cmd.nsid = 1; 899 memset(&nsdata, 0, sizeof(nsdata)); 900 memset(&rsp, 0, sizeof(rsp)); 901 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 902 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 903 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 904 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 905 CU_ASSERT(nsdata.nsze == 1234); 906 907 /* Valid but inactive NSID 2 */ 908 cmd.nsid = 2; 909 memset(&nsdata, 0, sizeof(nsdata)); 910 memset(&rsp, 0, sizeof(rsp)); 911 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 912 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 913 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 914 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 915 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 916 917 /* Valid NSID 3 */ 918 cmd.nsid = 3; 919 memset(&nsdata, 0, sizeof(nsdata)); 920 memset(&rsp, 0, sizeof(rsp)); 921 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 922 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 923 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 924 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 925 CU_ASSERT(nsdata.nsze == 5678); 926 927 /* Invalid NSID 4 */ 928 cmd.nsid = 4; 929 memset(&nsdata, 0, sizeof(nsdata)); 930 memset(&rsp, 0, sizeof(rsp)); 931 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 932 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 933 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 934 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 935 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 936 937 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 938 cmd.nsid = 0xFFFFFFFF; 939 memset(&nsdata, 0, sizeof(nsdata)); 940 memset(&rsp, 0, sizeof(rsp)); 941 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 942 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 943 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 944 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 945 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 946 } 947 948 static void 949 test_set_get_features(void) 950 { 951 struct spdk_nvmf_subsystem subsystem = {}; 952 struct spdk_nvmf_qpair admin_qpair = {}; 953 struct spdk_nvmf_subsystem_listener listener = {}; 954 struct spdk_nvmf_ctrlr ctrlr = { 955 .subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener 956 }; 957 union nvmf_h2c_msg cmd = {}; 958 union nvmf_c2h_msg rsp = {}; 959 struct spdk_nvmf_ns ns[3]; 960 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};; 961 struct spdk_nvmf_request req; 962 int rc; 963 964 subsystem.ns = ns_arr; 965 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 966 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 967 admin_qpair.ctrlr = &ctrlr; 968 req.qpair = &admin_qpair; 969 cmd.nvme_cmd.nsid = 1; 970 req.cmd = &cmd; 971 req.rsp = &rsp; 972 973 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 974 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 975 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 976 ns[0].ptpl_file = "testcfg"; 977 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 978 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 979 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 980 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 981 CU_ASSERT(ns[0].ptpl_activated == true); 982 983 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 984 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 985 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 986 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 987 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 988 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 989 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 990 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 991 992 993 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 994 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 995 cmd.nvme_cmd.cdw11 = 0x42; 996 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 997 998 rc = nvmf_ctrlr_get_features(&req); 999 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1000 1001 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1002 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1003 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1004 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1005 1006 rc = nvmf_ctrlr_get_features(&req); 1007 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1008 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1009 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1010 1011 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1012 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1013 cmd.nvme_cmd.cdw11 = 0x42; 1014 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1015 1016 rc = nvmf_ctrlr_set_features(&req); 1017 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1018 1019 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1020 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1021 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1022 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1023 1024 rc = nvmf_ctrlr_set_features(&req); 1025 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1026 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1027 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1028 1029 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1030 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1031 cmd.nvme_cmd.cdw11 = 0x42; 1032 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1033 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1034 1035 rc = nvmf_ctrlr_set_features(&req); 1036 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1037 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1038 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1039 1040 1041 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1042 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1043 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1044 1045 rc = nvmf_ctrlr_get_features(&req); 1046 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1047 1048 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1049 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1050 cmd.nvme_cmd.cdw11 = 0x42; 1051 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1052 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1053 1054 rc = nvmf_ctrlr_set_features(&req); 1055 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1056 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1057 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1058 1059 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1060 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1061 cmd.nvme_cmd.cdw11 = 0x42; 1062 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1063 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1064 1065 rc = nvmf_ctrlr_set_features(&req); 1066 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1067 } 1068 1069 /* 1070 * Reservation Unit Test Configuration 1071 * -------- -------- -------- 1072 * | Host A | | Host B | | Host C | 1073 * -------- -------- -------- 1074 * / \ | | 1075 * -------- -------- ------- ------- 1076 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1077 * -------- -------- ------- ------- 1078 * \ \ / / 1079 * \ \ / / 1080 * \ \ / / 1081 * -------------------------------------- 1082 * | NAMESPACE 1 | 1083 * -------------------------------------- 1084 */ 1085 1086 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1087 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1088 1089 static void 1090 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1091 { 1092 /* Host A has two controllers */ 1093 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1094 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1095 1096 /* Host B has 1 controller */ 1097 spdk_uuid_generate(&g_ctrlr_B.hostid); 1098 1099 /* Host C has 1 controller */ 1100 spdk_uuid_generate(&g_ctrlr_C.hostid); 1101 1102 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1103 g_ns_info.rtype = rtype; 1104 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1105 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1106 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1107 } 1108 1109 static void 1110 test_reservation_write_exclusive(void) 1111 { 1112 struct spdk_nvmf_request req = {}; 1113 union nvmf_h2c_msg cmd = {}; 1114 union nvmf_c2h_msg rsp = {}; 1115 int rc; 1116 1117 req.cmd = &cmd; 1118 req.rsp = &rsp; 1119 1120 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1121 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1122 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1123 1124 /* Test Case: Issue a Read command from Host A and Host B */ 1125 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1126 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1127 SPDK_CU_ASSERT_FATAL(rc == 0); 1128 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1129 SPDK_CU_ASSERT_FATAL(rc == 0); 1130 1131 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1132 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1133 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1134 SPDK_CU_ASSERT_FATAL(rc == 0); 1135 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1136 SPDK_CU_ASSERT_FATAL(rc < 0); 1137 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1138 1139 /* Test Case: Issue a Write command from Host C */ 1140 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1141 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1142 SPDK_CU_ASSERT_FATAL(rc < 0); 1143 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1144 1145 /* Test Case: Issue a Read command from Host B */ 1146 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1147 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1148 SPDK_CU_ASSERT_FATAL(rc == 0); 1149 1150 /* Unregister Host C */ 1151 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1152 1153 /* Test Case: Read and Write commands from non-registrant Host C */ 1154 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1155 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1156 SPDK_CU_ASSERT_FATAL(rc < 0); 1157 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1158 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1159 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1160 SPDK_CU_ASSERT_FATAL(rc == 0); 1161 } 1162 1163 static void 1164 test_reservation_exclusive_access(void) 1165 { 1166 struct spdk_nvmf_request req = {}; 1167 union nvmf_h2c_msg cmd = {}; 1168 union nvmf_c2h_msg rsp = {}; 1169 int rc; 1170 1171 req.cmd = &cmd; 1172 req.rsp = &rsp; 1173 1174 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1175 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1176 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1177 1178 /* Test Case: Issue a Read command from Host B */ 1179 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1180 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1181 SPDK_CU_ASSERT_FATAL(rc < 0); 1182 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1183 1184 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1185 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1186 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1187 SPDK_CU_ASSERT_FATAL(rc == 0); 1188 } 1189 1190 static void 1191 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1192 { 1193 struct spdk_nvmf_request req = {}; 1194 union nvmf_h2c_msg cmd = {}; 1195 union nvmf_c2h_msg rsp = {}; 1196 int rc; 1197 1198 req.cmd = &cmd; 1199 req.rsp = &rsp; 1200 1201 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1202 ut_reservation_init(rtype); 1203 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1204 1205 /* Test Case: Issue a Read command from Host A and Host C */ 1206 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1207 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1208 SPDK_CU_ASSERT_FATAL(rc == 0); 1209 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1210 SPDK_CU_ASSERT_FATAL(rc == 0); 1211 1212 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1213 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1214 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1215 SPDK_CU_ASSERT_FATAL(rc == 0); 1216 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1217 SPDK_CU_ASSERT_FATAL(rc == 0); 1218 1219 /* Unregister Host C */ 1220 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1221 1222 /* Test Case: Read and Write commands from non-registrant Host C */ 1223 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1224 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1225 SPDK_CU_ASSERT_FATAL(rc == 0); 1226 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1227 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1228 SPDK_CU_ASSERT_FATAL(rc < 0); 1229 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1230 } 1231 1232 static void 1233 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1234 { 1235 _test_reservation_write_exclusive_regs_only_and_all_regs( 1236 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1237 _test_reservation_write_exclusive_regs_only_and_all_regs( 1238 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1239 } 1240 1241 static void 1242 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1243 { 1244 struct spdk_nvmf_request req = {}; 1245 union nvmf_h2c_msg cmd = {}; 1246 union nvmf_c2h_msg rsp = {}; 1247 int rc; 1248 1249 req.cmd = &cmd; 1250 req.rsp = &rsp; 1251 1252 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1253 ut_reservation_init(rtype); 1254 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1255 1256 /* Test Case: Issue a Write command from Host B */ 1257 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1258 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1259 SPDK_CU_ASSERT_FATAL(rc == 0); 1260 1261 /* Unregister Host B */ 1262 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1263 1264 /* Test Case: Issue a Read command from Host B */ 1265 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1266 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1267 SPDK_CU_ASSERT_FATAL(rc < 0); 1268 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1269 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1270 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1271 SPDK_CU_ASSERT_FATAL(rc < 0); 1272 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1273 } 1274 1275 static void 1276 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1277 { 1278 _test_reservation_exclusive_access_regs_only_and_all_regs( 1279 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1280 _test_reservation_exclusive_access_regs_only_and_all_regs( 1281 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1282 } 1283 1284 static void 1285 test_reservation_notification_log_page(void) 1286 { 1287 struct spdk_nvmf_ctrlr ctrlr; 1288 struct spdk_nvmf_qpair qpair; 1289 struct spdk_nvmf_ns ns; 1290 struct spdk_nvmf_request req; 1291 union nvmf_h2c_msg cmd = {}; 1292 union nvmf_c2h_msg rsp = {}; 1293 union spdk_nvme_async_event_completion event = {}; 1294 struct spdk_nvme_reservation_notification_log logs[3]; 1295 1296 memset(&ctrlr, 0, sizeof(ctrlr)); 1297 ctrlr.thread = spdk_get_thread(); 1298 TAILQ_INIT(&ctrlr.log_head); 1299 ns.nsid = 1; 1300 1301 /* Test Case: Mask all the reservation notifications */ 1302 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1303 SPDK_NVME_RESERVATION_RELEASED_MASK | 1304 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1305 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1306 SPDK_NVME_REGISTRATION_PREEMPTED); 1307 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1308 SPDK_NVME_RESERVATION_RELEASED); 1309 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1310 SPDK_NVME_RESERVATION_PREEMPTED); 1311 poll_threads(); 1312 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1313 1314 /* Test Case: Unmask all the reservation notifications, 1315 * 3 log pages are generated, and AER was triggered. 1316 */ 1317 ns.mask = 0; 1318 ctrlr.num_avail_log_pages = 0; 1319 req.cmd = &cmd; 1320 req.rsp = &rsp; 1321 ctrlr.aer_req[0] = &req; 1322 ctrlr.nr_aer_reqs = 1; 1323 req.qpair = &qpair; 1324 TAILQ_INIT(&qpair.outstanding); 1325 qpair.ctrlr = NULL; 1326 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1327 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1328 1329 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1330 SPDK_NVME_REGISTRATION_PREEMPTED); 1331 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1332 SPDK_NVME_RESERVATION_RELEASED); 1333 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1334 SPDK_NVME_RESERVATION_PREEMPTED); 1335 poll_threads(); 1336 event.raw = rsp.nvme_cpl.cdw0; 1337 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1338 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1339 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1340 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1341 1342 /* Test Case: Get Log Page to clear the log pages */ 1343 nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs)); 1344 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1345 } 1346 1347 static void 1348 test_get_dif_ctx(void) 1349 { 1350 struct spdk_nvmf_subsystem subsystem = {}; 1351 struct spdk_nvmf_request req = {}; 1352 struct spdk_nvmf_qpair qpair = {}; 1353 struct spdk_nvmf_ctrlr ctrlr = {}; 1354 struct spdk_nvmf_ns ns = {}; 1355 struct spdk_nvmf_ns *_ns = NULL; 1356 struct spdk_bdev bdev = {}; 1357 union nvmf_h2c_msg cmd = {}; 1358 struct spdk_dif_ctx dif_ctx = {}; 1359 bool ret; 1360 1361 ctrlr.subsys = &subsystem; 1362 1363 qpair.ctrlr = &ctrlr; 1364 1365 req.qpair = &qpair; 1366 req.cmd = &cmd; 1367 1368 ns.bdev = &bdev; 1369 1370 ctrlr.dif_insert_or_strip = false; 1371 1372 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1373 CU_ASSERT(ret == false); 1374 1375 ctrlr.dif_insert_or_strip = true; 1376 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1377 1378 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1379 CU_ASSERT(ret == false); 1380 1381 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1382 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1383 1384 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1385 CU_ASSERT(ret == false); 1386 1387 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1388 1389 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1390 CU_ASSERT(ret == false); 1391 1392 qpair.qid = 1; 1393 1394 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1395 CU_ASSERT(ret == false); 1396 1397 cmd.nvme_cmd.nsid = 1; 1398 1399 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1400 CU_ASSERT(ret == false); 1401 1402 subsystem.max_nsid = 1; 1403 subsystem.ns = &_ns; 1404 subsystem.ns[0] = &ns; 1405 1406 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1407 CU_ASSERT(ret == false); 1408 1409 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1410 1411 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1412 CU_ASSERT(ret == true); 1413 } 1414 1415 static void 1416 test_identify_ctrlr(void) 1417 { 1418 struct spdk_nvmf_subsystem subsystem = { 1419 .subtype = SPDK_NVMF_SUBTYPE_NVME 1420 }; 1421 struct spdk_nvmf_transport_ops tops = {}; 1422 struct spdk_nvmf_transport transport = { 1423 .ops = &tops, 1424 .opts = { 1425 .in_capsule_data_size = 4096, 1426 }, 1427 }; 1428 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1429 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1430 struct spdk_nvme_ctrlr_data cdata = {}; 1431 uint32_t expected_ioccsz; 1432 1433 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1434 1435 /* Check ioccsz, TCP transport */ 1436 tops.type = SPDK_NVME_TRANSPORT_TCP; 1437 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1438 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1439 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1440 1441 /* Check ioccsz, RDMA transport */ 1442 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1443 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1444 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1445 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1446 1447 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1448 tops.type = SPDK_NVME_TRANSPORT_TCP; 1449 ctrlr.dif_insert_or_strip = true; 1450 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1451 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1452 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1453 } 1454 1455 static int 1456 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1457 { 1458 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1459 1460 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1461 }; 1462 1463 static void 1464 test_custom_admin_cmd(void) 1465 { 1466 struct spdk_nvmf_subsystem subsystem; 1467 struct spdk_nvmf_qpair qpair; 1468 struct spdk_nvmf_ctrlr ctrlr; 1469 struct spdk_nvmf_request req; 1470 struct spdk_nvmf_ns *ns_ptrs[1]; 1471 struct spdk_nvmf_ns ns; 1472 union nvmf_h2c_msg cmd; 1473 union nvmf_c2h_msg rsp; 1474 struct spdk_bdev bdev; 1475 uint8_t buf[4096]; 1476 int rc; 1477 1478 memset(&subsystem, 0, sizeof(subsystem)); 1479 ns_ptrs[0] = &ns; 1480 subsystem.ns = ns_ptrs; 1481 subsystem.max_nsid = 1; 1482 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1483 1484 memset(&ns, 0, sizeof(ns)); 1485 ns.opts.nsid = 1; 1486 ns.bdev = &bdev; 1487 1488 memset(&qpair, 0, sizeof(qpair)); 1489 qpair.ctrlr = &ctrlr; 1490 1491 memset(&ctrlr, 0, sizeof(ctrlr)); 1492 ctrlr.subsys = &subsystem; 1493 ctrlr.vcprop.cc.bits.en = 1; 1494 1495 memset(&req, 0, sizeof(req)); 1496 req.qpair = &qpair; 1497 req.cmd = &cmd; 1498 req.rsp = &rsp; 1499 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1500 req.data = buf; 1501 req.length = sizeof(buf); 1502 1503 memset(&cmd, 0, sizeof(cmd)); 1504 cmd.nvme_cmd.opc = 0xc1; 1505 cmd.nvme_cmd.nsid = 0; 1506 memset(&rsp, 0, sizeof(rsp)); 1507 1508 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1509 1510 /* Ensure that our hdlr is being called */ 1511 rc = nvmf_ctrlr_process_admin_cmd(&req); 1512 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1513 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1514 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1515 } 1516 1517 static void 1518 test_fused_compare_and_write(void) 1519 { 1520 struct spdk_nvmf_request req = {}; 1521 struct spdk_nvmf_qpair qpair = {}; 1522 struct spdk_nvme_cmd cmd = {}; 1523 union nvmf_c2h_msg rsp = {}; 1524 struct spdk_nvmf_ctrlr ctrlr = {}; 1525 struct spdk_nvmf_subsystem subsystem = {}; 1526 struct spdk_nvmf_ns ns = {}; 1527 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1528 struct spdk_nvmf_subsystem_listener listener = {}; 1529 struct spdk_bdev bdev = {}; 1530 1531 struct spdk_nvmf_poll_group group = {}; 1532 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1533 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1534 1535 ns.bdev = &bdev; 1536 1537 subsystem.id = 0; 1538 subsystem.max_nsid = 1; 1539 subsys_ns[0] = &ns; 1540 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1541 1542 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1543 1544 /* Enable controller */ 1545 ctrlr.vcprop.cc.bits.en = 1; 1546 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1547 ctrlr.listener = &listener; 1548 1549 group.num_sgroups = 1; 1550 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1551 sgroups.num_ns = 1; 1552 sgroups.ns_info = &ns_info; 1553 TAILQ_INIT(&sgroups.queued); 1554 group.sgroups = &sgroups; 1555 TAILQ_INIT(&qpair.outstanding); 1556 1557 qpair.ctrlr = &ctrlr; 1558 qpair.group = &group; 1559 qpair.qid = 1; 1560 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1561 1562 cmd.nsid = 1; 1563 1564 req.qpair = &qpair; 1565 req.cmd = (union nvmf_h2c_msg *)&cmd; 1566 req.rsp = &rsp; 1567 1568 /* SUCCESS/SUCCESS */ 1569 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1570 cmd.opc = SPDK_NVME_OPC_COMPARE; 1571 1572 spdk_nvmf_request_exec(&req); 1573 CU_ASSERT(qpair.first_fused_req != NULL); 1574 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1575 1576 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1577 cmd.opc = SPDK_NVME_OPC_WRITE; 1578 1579 spdk_nvmf_request_exec(&req); 1580 CU_ASSERT(qpair.first_fused_req == NULL); 1581 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1582 1583 /* Wrong sequence */ 1584 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1585 cmd.opc = SPDK_NVME_OPC_WRITE; 1586 1587 spdk_nvmf_request_exec(&req); 1588 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1589 CU_ASSERT(qpair.first_fused_req == NULL); 1590 1591 /* Write as FUSE_FIRST (Wrong op code) */ 1592 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1593 cmd.opc = SPDK_NVME_OPC_WRITE; 1594 1595 spdk_nvmf_request_exec(&req); 1596 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1597 CU_ASSERT(qpair.first_fused_req == NULL); 1598 1599 /* Compare as FUSE_SECOND (Wrong op code) */ 1600 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1601 cmd.opc = SPDK_NVME_OPC_COMPARE; 1602 1603 spdk_nvmf_request_exec(&req); 1604 CU_ASSERT(qpair.first_fused_req != NULL); 1605 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1606 1607 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1608 cmd.opc = SPDK_NVME_OPC_COMPARE; 1609 1610 spdk_nvmf_request_exec(&req); 1611 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1612 CU_ASSERT(qpair.first_fused_req == NULL); 1613 } 1614 1615 static void 1616 test_multi_async_event_reqs(void) 1617 { 1618 struct spdk_nvmf_subsystem subsystem = {}; 1619 struct spdk_nvmf_qpair qpair = {}; 1620 struct spdk_nvmf_ctrlr ctrlr = {}; 1621 struct spdk_nvmf_request req[5] = {}; 1622 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1623 struct spdk_nvmf_ns ns = {}; 1624 union nvmf_h2c_msg cmd[5] = {}; 1625 union nvmf_c2h_msg rsp[5] = {}; 1626 1627 struct spdk_nvmf_poll_group group = {}; 1628 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1629 1630 int i; 1631 1632 ns_ptrs[0] = &ns; 1633 subsystem.ns = ns_ptrs; 1634 subsystem.max_nsid = 1; 1635 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1636 1637 ns.opts.nsid = 1; 1638 group.sgroups = &sgroups; 1639 1640 qpair.ctrlr = &ctrlr; 1641 qpair.group = &group; 1642 TAILQ_INIT(&qpair.outstanding); 1643 1644 ctrlr.subsys = &subsystem; 1645 ctrlr.vcprop.cc.bits.en = 1; 1646 1647 for (i = 0; i < 5; i++) { 1648 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1649 cmd[i].nvme_cmd.nsid = 1; 1650 cmd[i].nvme_cmd.cid = i; 1651 1652 req[i].qpair = &qpair; 1653 req[i].cmd = &cmd[i]; 1654 req[i].rsp = &rsp[i]; 1655 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1656 } 1657 1658 /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */ 1659 sgroups.io_outstanding = NVMF_MAX_ASYNC_EVENTS; 1660 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1661 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1662 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1663 } 1664 CU_ASSERT(sgroups.io_outstanding == 0); 1665 1666 /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */ 1667 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1668 CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS); 1669 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1670 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 1671 1672 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 1673 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 1674 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1675 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1676 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 1677 1678 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 1679 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1680 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1681 CU_ASSERT(ctrlr.aer_req[2] == NULL); 1682 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 1683 1684 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 1685 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 1686 } 1687 1688 int main(int argc, char **argv) 1689 { 1690 CU_pSuite suite = NULL; 1691 unsigned int num_failures; 1692 1693 CU_set_error_action(CUEA_ABORT); 1694 CU_initialize_registry(); 1695 1696 suite = CU_add_suite("nvmf", NULL, NULL); 1697 CU_ADD_TEST(suite, test_get_log_page); 1698 CU_ADD_TEST(suite, test_process_fabrics_cmd); 1699 CU_ADD_TEST(suite, test_connect); 1700 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 1701 CU_ADD_TEST(suite, test_identify_ns); 1702 CU_ADD_TEST(suite, test_reservation_write_exclusive); 1703 CU_ADD_TEST(suite, test_reservation_exclusive_access); 1704 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 1705 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 1706 CU_ADD_TEST(suite, test_reservation_notification_log_page); 1707 CU_ADD_TEST(suite, test_get_dif_ctx); 1708 CU_ADD_TEST(suite, test_set_get_features); 1709 CU_ADD_TEST(suite, test_identify_ctrlr); 1710 CU_ADD_TEST(suite, test_custom_admin_cmd); 1711 CU_ADD_TEST(suite, test_fused_compare_and_write); 1712 CU_ADD_TEST(suite, test_multi_async_event_reqs); 1713 1714 allocate_threads(1); 1715 set_thread(0); 1716 1717 CU_basic_set_mode(CU_BRM_VERBOSE); 1718 CU_basic_run_tests(); 1719 num_failures = CU_get_number_of_failures(); 1720 CU_cleanup_registry(); 1721 1722 free_threads(); 1723 1724 return num_failures; 1725 } 1726