1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 51 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 52 53 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 54 struct spdk_nvmf_subsystem *, 55 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 56 NULL); 57 58 DEFINE_STUB(spdk_nvmf_poll_group_create, 59 struct spdk_nvmf_poll_group *, 60 (struct spdk_nvmf_tgt *tgt), 61 NULL); 62 63 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 64 const char *, 65 (const struct spdk_nvmf_subsystem *subsystem), 66 subsystem_default_sn); 67 68 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 69 const char *, 70 (const struct spdk_nvmf_subsystem *subsystem), 71 subsystem_default_mn); 72 73 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 74 struct spdk_nvmf_ns *, 75 (struct spdk_nvmf_subsystem *subsystem), 76 NULL); 77 78 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 79 struct spdk_nvmf_ns *, 80 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 81 NULL); 82 83 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 84 bool, 85 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 86 true); 87 88 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 89 int, 90 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 91 0); 92 93 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 94 struct spdk_nvmf_ctrlr *, 95 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 96 NULL); 97 98 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 99 bool, 100 (struct spdk_nvmf_ctrlr *ctrlr), 101 false); 102 103 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 104 bool, 105 (struct spdk_nvmf_ctrlr *ctrlr), 106 false); 107 108 DEFINE_STUB_V(nvmf_get_discovery_log_page, 109 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 110 uint32_t iovcnt, uint64_t offset, uint32_t length)); 111 112 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 113 int, 114 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 115 0); 116 117 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 118 bool, 119 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 120 true); 121 122 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 123 int, 124 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 125 struct spdk_nvmf_request *req), 126 0); 127 128 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 129 int, 130 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct spdk_nvmf_request *req), 132 0); 133 134 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 135 int, 136 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 struct spdk_nvmf_request *req), 138 0); 139 140 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 141 int, 142 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 144 0); 145 146 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 147 int, 148 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 149 struct spdk_nvmf_request *req), 150 0); 151 152 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 153 int, 154 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 155 struct spdk_nvmf_request *req), 156 0); 157 158 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 159 int, 160 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 161 struct spdk_nvmf_request *req), 162 0); 163 164 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 165 int, 166 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 167 struct spdk_nvmf_request *req), 168 0); 169 170 DEFINE_STUB(nvmf_transport_req_complete, 171 int, 172 (struct spdk_nvmf_request *req), 173 0); 174 175 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 176 177 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 178 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 179 struct spdk_dif_ctx *dif_ctx), 180 true); 181 182 int 183 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 184 { 185 return 0; 186 } 187 188 void 189 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 190 bool dif_insert_or_strip) 191 { 192 uint64_t num_blocks; 193 194 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 195 num_blocks = ns->bdev->blockcnt; 196 nsdata->nsze = num_blocks; 197 nsdata->ncap = num_blocks; 198 nsdata->nuse = num_blocks; 199 nsdata->nlbaf = 0; 200 nsdata->flbas.format = 0; 201 nsdata->lbaf[0].lbads = spdk_u32log2(512); 202 } 203 204 static void 205 test_get_log_page(void) 206 { 207 struct spdk_nvmf_subsystem subsystem = {}; 208 struct spdk_nvmf_request req = {}; 209 struct spdk_nvmf_qpair qpair = {}; 210 struct spdk_nvmf_ctrlr ctrlr = {}; 211 union nvmf_h2c_msg cmd = {}; 212 union nvmf_c2h_msg rsp = {}; 213 char data[4096]; 214 215 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 216 217 ctrlr.subsys = &subsystem; 218 219 qpair.ctrlr = &ctrlr; 220 221 req.qpair = &qpair; 222 req.cmd = &cmd; 223 req.rsp = &rsp; 224 req.data = &data; 225 req.length = sizeof(data); 226 227 /* Get Log Page - all valid */ 228 memset(&cmd, 0, sizeof(cmd)); 229 memset(&rsp, 0, sizeof(rsp)); 230 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 231 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 232 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 233 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 234 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 235 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 236 237 /* Get Log Page with invalid log ID */ 238 memset(&cmd, 0, sizeof(cmd)); 239 memset(&rsp, 0, sizeof(rsp)); 240 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 241 cmd.nvme_cmd.cdw10 = 0; 242 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 243 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 244 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 245 246 /* Get Log Page with invalid offset (not dword aligned) */ 247 memset(&cmd, 0, sizeof(cmd)); 248 memset(&rsp, 0, sizeof(rsp)); 249 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 250 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 251 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 252 cmd.nvme_cmd.cdw12 = 2; 253 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 254 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 255 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 256 257 /* Get Log Page without data buffer */ 258 memset(&cmd, 0, sizeof(cmd)); 259 memset(&rsp, 0, sizeof(rsp)); 260 req.data = NULL; 261 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 262 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 263 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 264 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 265 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 266 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 267 req.data = data; 268 } 269 270 static void 271 test_process_fabrics_cmd(void) 272 { 273 struct spdk_nvmf_request req = {}; 274 int ret; 275 struct spdk_nvmf_qpair req_qpair = {}; 276 union nvmf_h2c_msg req_cmd = {}; 277 union nvmf_c2h_msg req_rsp = {}; 278 279 req.qpair = &req_qpair; 280 req.cmd = &req_cmd; 281 req.rsp = &req_rsp; 282 req.qpair->ctrlr = NULL; 283 284 /* No ctrlr and invalid command check */ 285 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 286 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 287 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 288 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 289 } 290 291 static bool 292 nvme_status_success(const struct spdk_nvme_status *status) 293 { 294 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 295 } 296 297 static void 298 test_connect(void) 299 { 300 struct spdk_nvmf_fabric_connect_data connect_data; 301 struct spdk_nvmf_poll_group group; 302 struct spdk_nvmf_subsystem_poll_group *sgroups; 303 struct spdk_nvmf_transport transport; 304 struct spdk_nvmf_transport_ops tops = {}; 305 struct spdk_nvmf_subsystem subsystem; 306 struct spdk_nvmf_request req; 307 struct spdk_nvmf_qpair admin_qpair; 308 struct spdk_nvmf_qpair qpair; 309 struct spdk_nvmf_qpair qpair2; 310 struct spdk_nvmf_ctrlr ctrlr; 311 struct spdk_nvmf_tgt tgt; 312 union nvmf_h2c_msg cmd; 313 union nvmf_c2h_msg rsp; 314 const uint8_t hostid[16] = { 315 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 316 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 317 }; 318 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 319 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 320 int rc; 321 322 memset(&group, 0, sizeof(group)); 323 group.thread = spdk_get_thread(); 324 325 memset(&ctrlr, 0, sizeof(ctrlr)); 326 ctrlr.subsys = &subsystem; 327 ctrlr.qpair_mask = spdk_bit_array_create(3); 328 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 329 ctrlr.vcprop.cc.bits.en = 1; 330 ctrlr.vcprop.cc.bits.iosqes = 6; 331 ctrlr.vcprop.cc.bits.iocqes = 4; 332 333 memset(&admin_qpair, 0, sizeof(admin_qpair)); 334 admin_qpair.group = &group; 335 336 memset(&tgt, 0, sizeof(tgt)); 337 memset(&transport, 0, sizeof(transport)); 338 transport.ops = &tops; 339 transport.opts.max_aq_depth = 32; 340 transport.opts.max_queue_depth = 64; 341 transport.opts.max_qpairs_per_ctrlr = 3; 342 transport.tgt = &tgt; 343 344 memset(&qpair, 0, sizeof(qpair)); 345 qpair.transport = &transport; 346 qpair.group = &group; 347 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 348 TAILQ_INIT(&qpair.outstanding); 349 350 memset(&connect_data, 0, sizeof(connect_data)); 351 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 352 connect_data.cntlid = 0xFFFF; 353 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 354 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 355 356 memset(&subsystem, 0, sizeof(subsystem)); 357 subsystem.thread = spdk_get_thread(); 358 subsystem.id = 1; 359 TAILQ_INIT(&subsystem.ctrlrs); 360 subsystem.tgt = &tgt; 361 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 362 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 363 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 364 365 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 366 group.sgroups = sgroups; 367 368 memset(&cmd, 0, sizeof(cmd)); 369 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 370 cmd.connect_cmd.cid = 1; 371 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 372 cmd.connect_cmd.recfmt = 0; 373 cmd.connect_cmd.qid = 0; 374 cmd.connect_cmd.sqsize = 31; 375 cmd.connect_cmd.cattr = 0; 376 cmd.connect_cmd.kato = 120000; 377 378 memset(&req, 0, sizeof(req)); 379 req.qpair = &qpair; 380 req.length = sizeof(connect_data); 381 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 382 req.data = &connect_data; 383 req.cmd = &cmd; 384 req.rsp = &rsp; 385 386 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 387 MOCK_SET(spdk_nvmf_poll_group_create, &group); 388 389 /* Valid admin connect command */ 390 memset(&rsp, 0, sizeof(rsp)); 391 sgroups[subsystem.id].io_outstanding++; 392 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 393 rc = nvmf_ctrlr_cmd_connect(&req); 394 poll_threads(); 395 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 396 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 397 CU_ASSERT(qpair.ctrlr != NULL); 398 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 399 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 400 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 401 free(qpair.ctrlr); 402 qpair.ctrlr = NULL; 403 404 /* Valid admin connect command with kato = 0 */ 405 cmd.connect_cmd.kato = 0; 406 memset(&rsp, 0, sizeof(rsp)); 407 sgroups[subsystem.id].io_outstanding++; 408 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 409 rc = nvmf_ctrlr_cmd_connect(&req); 410 poll_threads(); 411 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 412 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 413 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 414 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 415 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 416 free(qpair.ctrlr); 417 qpair.ctrlr = NULL; 418 cmd.connect_cmd.kato = 120000; 419 420 /* Invalid data length */ 421 memset(&rsp, 0, sizeof(rsp)); 422 req.length = sizeof(connect_data) - 1; 423 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 424 rc = nvmf_ctrlr_cmd_connect(&req); 425 poll_threads(); 426 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 427 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 428 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 429 CU_ASSERT(qpair.ctrlr == NULL); 430 req.length = sizeof(connect_data); 431 432 /* Invalid recfmt */ 433 memset(&rsp, 0, sizeof(rsp)); 434 cmd.connect_cmd.recfmt = 1234; 435 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 436 rc = nvmf_ctrlr_cmd_connect(&req); 437 poll_threads(); 438 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 439 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 440 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 441 CU_ASSERT(qpair.ctrlr == NULL); 442 cmd.connect_cmd.recfmt = 0; 443 444 /* Subsystem not found */ 445 memset(&rsp, 0, sizeof(rsp)); 446 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 447 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 448 rc = nvmf_ctrlr_cmd_connect(&req); 449 poll_threads(); 450 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 451 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 452 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 453 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 454 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 455 CU_ASSERT(qpair.ctrlr == NULL); 456 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 457 458 /* Unterminated hostnqn */ 459 memset(&rsp, 0, sizeof(rsp)); 460 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 461 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 462 rc = nvmf_ctrlr_cmd_connect(&req); 463 poll_threads(); 464 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 465 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 466 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 467 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 468 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 469 CU_ASSERT(qpair.ctrlr == NULL); 470 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 471 472 /* Host not allowed */ 473 memset(&rsp, 0, sizeof(rsp)); 474 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 475 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 476 rc = nvmf_ctrlr_cmd_connect(&req); 477 poll_threads(); 478 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 479 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 480 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 481 CU_ASSERT(qpair.ctrlr == NULL); 482 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 483 484 /* Invalid sqsize == 0 */ 485 memset(&rsp, 0, sizeof(rsp)); 486 cmd.connect_cmd.sqsize = 0; 487 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 488 rc = nvmf_ctrlr_cmd_connect(&req); 489 poll_threads(); 490 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 491 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 492 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 493 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 494 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 495 CU_ASSERT(qpair.ctrlr == NULL); 496 cmd.connect_cmd.sqsize = 31; 497 498 /* Invalid admin sqsize > max_aq_depth */ 499 memset(&rsp, 0, sizeof(rsp)); 500 cmd.connect_cmd.sqsize = 32; 501 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 502 rc = nvmf_ctrlr_cmd_connect(&req); 503 poll_threads(); 504 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 505 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 506 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 507 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 508 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 509 CU_ASSERT(qpair.ctrlr == NULL); 510 cmd.connect_cmd.sqsize = 31; 511 512 /* Invalid I/O sqsize > max_queue_depth */ 513 memset(&rsp, 0, sizeof(rsp)); 514 cmd.connect_cmd.qid = 1; 515 cmd.connect_cmd.sqsize = 64; 516 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 517 rc = nvmf_ctrlr_cmd_connect(&req); 518 poll_threads(); 519 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 520 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 521 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 522 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 523 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 524 CU_ASSERT(qpair.ctrlr == NULL); 525 cmd.connect_cmd.qid = 0; 526 cmd.connect_cmd.sqsize = 31; 527 528 /* Invalid cntlid for admin queue */ 529 memset(&rsp, 0, sizeof(rsp)); 530 connect_data.cntlid = 0x1234; 531 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 532 rc = nvmf_ctrlr_cmd_connect(&req); 533 poll_threads(); 534 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 535 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 536 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 537 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 538 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 539 CU_ASSERT(qpair.ctrlr == NULL); 540 connect_data.cntlid = 0xFFFF; 541 542 ctrlr.admin_qpair = &admin_qpair; 543 ctrlr.subsys = &subsystem; 544 545 /* Valid I/O queue connect command */ 546 memset(&rsp, 0, sizeof(rsp)); 547 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 548 cmd.connect_cmd.qid = 1; 549 cmd.connect_cmd.sqsize = 63; 550 sgroups[subsystem.id].io_outstanding++; 551 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 552 rc = nvmf_ctrlr_cmd_connect(&req); 553 poll_threads(); 554 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 555 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 556 CU_ASSERT(qpair.ctrlr == &ctrlr); 557 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 558 qpair.ctrlr = NULL; 559 cmd.connect_cmd.sqsize = 31; 560 561 /* Non-existent controller */ 562 memset(&rsp, 0, sizeof(rsp)); 563 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 564 sgroups[subsystem.id].io_outstanding++; 565 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 566 rc = nvmf_ctrlr_cmd_connect(&req); 567 poll_threads(); 568 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 569 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 570 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 571 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 572 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 573 CU_ASSERT(qpair.ctrlr == NULL); 574 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 575 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 576 577 /* I/O connect to discovery controller */ 578 memset(&rsp, 0, sizeof(rsp)); 579 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 580 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 581 sgroups[subsystem.id].io_outstanding++; 582 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 583 rc = nvmf_ctrlr_cmd_connect(&req); 584 poll_threads(); 585 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 586 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 587 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 588 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 589 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 590 CU_ASSERT(qpair.ctrlr == NULL); 591 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 592 593 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 594 cmd.connect_cmd.qid = 0; 595 cmd.connect_cmd.kato = 120000; 596 memset(&rsp, 0, sizeof(rsp)); 597 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 598 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 599 sgroups[subsystem.id].io_outstanding++; 600 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 601 rc = nvmf_ctrlr_cmd_connect(&req); 602 poll_threads(); 603 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 604 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 605 CU_ASSERT(qpair.ctrlr != NULL); 606 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 607 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 608 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 609 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 610 free(qpair.ctrlr); 611 qpair.ctrlr = NULL; 612 613 /* I/O connect to discovery controller with keep-alive-timeout == 0. 614 * Then, a fixed timeout value is set to keep-alive-timeout. 615 */ 616 cmd.connect_cmd.kato = 0; 617 memset(&rsp, 0, sizeof(rsp)); 618 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 619 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 620 sgroups[subsystem.id].io_outstanding++; 621 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 622 rc = nvmf_ctrlr_cmd_connect(&req); 623 poll_threads(); 624 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 625 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 626 CU_ASSERT(qpair.ctrlr != NULL); 627 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 628 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 629 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 630 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 631 free(qpair.ctrlr); 632 qpair.ctrlr = NULL; 633 cmd.connect_cmd.qid = 1; 634 cmd.connect_cmd.kato = 120000; 635 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 636 637 /* I/O connect to disabled controller */ 638 memset(&rsp, 0, sizeof(rsp)); 639 ctrlr.vcprop.cc.bits.en = 0; 640 sgroups[subsystem.id].io_outstanding++; 641 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 642 rc = nvmf_ctrlr_cmd_connect(&req); 643 poll_threads(); 644 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 645 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 646 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 647 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 648 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 649 CU_ASSERT(qpair.ctrlr == NULL); 650 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 651 ctrlr.vcprop.cc.bits.en = 1; 652 653 /* I/O connect with invalid IOSQES */ 654 memset(&rsp, 0, sizeof(rsp)); 655 ctrlr.vcprop.cc.bits.iosqes = 3; 656 sgroups[subsystem.id].io_outstanding++; 657 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 658 rc = nvmf_ctrlr_cmd_connect(&req); 659 poll_threads(); 660 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 661 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 662 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 663 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 664 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 665 CU_ASSERT(qpair.ctrlr == NULL); 666 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 667 ctrlr.vcprop.cc.bits.iosqes = 6; 668 669 /* I/O connect with invalid IOCQES */ 670 memset(&rsp, 0, sizeof(rsp)); 671 ctrlr.vcprop.cc.bits.iocqes = 3; 672 sgroups[subsystem.id].io_outstanding++; 673 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 674 rc = nvmf_ctrlr_cmd_connect(&req); 675 poll_threads(); 676 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 677 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 678 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 679 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 680 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 681 CU_ASSERT(qpair.ctrlr == NULL); 682 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 683 ctrlr.vcprop.cc.bits.iocqes = 4; 684 685 /* I/O connect with too many existing qpairs */ 686 memset(&rsp, 0, sizeof(rsp)); 687 spdk_bit_array_set(ctrlr.qpair_mask, 0); 688 spdk_bit_array_set(ctrlr.qpair_mask, 1); 689 spdk_bit_array_set(ctrlr.qpair_mask, 2); 690 sgroups[subsystem.id].io_outstanding++; 691 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 692 rc = nvmf_ctrlr_cmd_connect(&req); 693 poll_threads(); 694 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 695 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 696 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 697 CU_ASSERT(qpair.ctrlr == NULL); 698 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 699 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 700 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 701 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 702 703 /* I/O connect with duplicate queue ID */ 704 memset(&rsp, 0, sizeof(rsp)); 705 memset(&qpair2, 0, sizeof(qpair2)); 706 qpair2.group = &group; 707 qpair2.qid = 1; 708 spdk_bit_array_set(ctrlr.qpair_mask, 1); 709 cmd.connect_cmd.qid = 1; 710 sgroups[subsystem.id].io_outstanding++; 711 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 712 rc = nvmf_ctrlr_cmd_connect(&req); 713 poll_threads(); 714 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 715 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 716 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 717 CU_ASSERT(qpair.ctrlr == NULL); 718 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 719 720 /* Clean up globals */ 721 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 722 MOCK_CLEAR(spdk_nvmf_poll_group_create); 723 724 spdk_bit_array_free(&ctrlr.qpair_mask); 725 free(sgroups); 726 } 727 728 static void 729 test_get_ns_id_desc_list(void) 730 { 731 struct spdk_nvmf_subsystem subsystem; 732 struct spdk_nvmf_qpair qpair; 733 struct spdk_nvmf_ctrlr ctrlr; 734 struct spdk_nvmf_request req; 735 struct spdk_nvmf_ns *ns_ptrs[1]; 736 struct spdk_nvmf_ns ns; 737 union nvmf_h2c_msg cmd; 738 union nvmf_c2h_msg rsp; 739 struct spdk_bdev bdev; 740 uint8_t buf[4096]; 741 742 memset(&subsystem, 0, sizeof(subsystem)); 743 ns_ptrs[0] = &ns; 744 subsystem.ns = ns_ptrs; 745 subsystem.max_nsid = 1; 746 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 747 748 memset(&ns, 0, sizeof(ns)); 749 ns.opts.nsid = 1; 750 ns.bdev = &bdev; 751 752 memset(&qpair, 0, sizeof(qpair)); 753 qpair.ctrlr = &ctrlr; 754 755 memset(&ctrlr, 0, sizeof(ctrlr)); 756 ctrlr.subsys = &subsystem; 757 ctrlr.vcprop.cc.bits.en = 1; 758 759 memset(&req, 0, sizeof(req)); 760 req.qpair = &qpair; 761 req.cmd = &cmd; 762 req.rsp = &rsp; 763 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 764 req.data = buf; 765 req.length = sizeof(buf); 766 767 memset(&cmd, 0, sizeof(cmd)); 768 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 769 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 770 771 /* Invalid NSID */ 772 cmd.nvme_cmd.nsid = 0; 773 memset(&rsp, 0, sizeof(rsp)); 774 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 775 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 776 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 777 778 /* Valid NSID, but ns has no IDs defined */ 779 cmd.nvme_cmd.nsid = 1; 780 memset(&rsp, 0, sizeof(rsp)); 781 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 782 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 783 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 784 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 785 786 /* Valid NSID, only EUI64 defined */ 787 ns.opts.eui64[0] = 0x11; 788 ns.opts.eui64[7] = 0xFF; 789 memset(&rsp, 0, sizeof(rsp)); 790 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 791 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 792 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 793 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 794 CU_ASSERT(buf[1] == 8); 795 CU_ASSERT(buf[4] == 0x11); 796 CU_ASSERT(buf[11] == 0xFF); 797 CU_ASSERT(buf[13] == 0); 798 799 /* Valid NSID, only NGUID defined */ 800 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 801 ns.opts.nguid[0] = 0x22; 802 ns.opts.nguid[15] = 0xEE; 803 memset(&rsp, 0, sizeof(rsp)); 804 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 805 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 806 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 807 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 808 CU_ASSERT(buf[1] == 16); 809 CU_ASSERT(buf[4] == 0x22); 810 CU_ASSERT(buf[19] == 0xEE); 811 CU_ASSERT(buf[21] == 0); 812 813 /* Valid NSID, both EUI64 and NGUID defined */ 814 ns.opts.eui64[0] = 0x11; 815 ns.opts.eui64[7] = 0xFF; 816 ns.opts.nguid[0] = 0x22; 817 ns.opts.nguid[15] = 0xEE; 818 memset(&rsp, 0, sizeof(rsp)); 819 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 820 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 821 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 822 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 823 CU_ASSERT(buf[1] == 8); 824 CU_ASSERT(buf[4] == 0x11); 825 CU_ASSERT(buf[11] == 0xFF); 826 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 827 CU_ASSERT(buf[13] == 16); 828 CU_ASSERT(buf[16] == 0x22); 829 CU_ASSERT(buf[31] == 0xEE); 830 CU_ASSERT(buf[33] == 0); 831 832 /* Valid NSID, EUI64, NGUID, and UUID defined */ 833 ns.opts.eui64[0] = 0x11; 834 ns.opts.eui64[7] = 0xFF; 835 ns.opts.nguid[0] = 0x22; 836 ns.opts.nguid[15] = 0xEE; 837 ns.opts.uuid.u.raw[0] = 0x33; 838 ns.opts.uuid.u.raw[15] = 0xDD; 839 memset(&rsp, 0, sizeof(rsp)); 840 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 841 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 842 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 843 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 844 CU_ASSERT(buf[1] == 8); 845 CU_ASSERT(buf[4] == 0x11); 846 CU_ASSERT(buf[11] == 0xFF); 847 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 848 CU_ASSERT(buf[13] == 16); 849 CU_ASSERT(buf[16] == 0x22); 850 CU_ASSERT(buf[31] == 0xEE); 851 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 852 CU_ASSERT(buf[33] == 16); 853 CU_ASSERT(buf[36] == 0x33); 854 CU_ASSERT(buf[51] == 0xDD); 855 CU_ASSERT(buf[53] == 0); 856 } 857 858 static void 859 test_identify_ns(void) 860 { 861 struct spdk_nvmf_subsystem subsystem = {}; 862 struct spdk_nvmf_transport transport = {}; 863 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 864 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 865 struct spdk_nvme_cmd cmd = {}; 866 struct spdk_nvme_cpl rsp = {}; 867 struct spdk_nvme_ns_data nsdata = {}; 868 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 869 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 870 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 871 872 subsystem.ns = ns_arr; 873 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 874 875 /* Invalid NSID 0 */ 876 cmd.nsid = 0; 877 memset(&nsdata, 0, sizeof(nsdata)); 878 memset(&rsp, 0, sizeof(rsp)); 879 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 880 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 881 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 882 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 883 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 884 885 /* Valid NSID 1 */ 886 cmd.nsid = 1; 887 memset(&nsdata, 0, sizeof(nsdata)); 888 memset(&rsp, 0, sizeof(rsp)); 889 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 890 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 891 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 892 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 893 CU_ASSERT(nsdata.nsze == 1234); 894 895 /* Valid but inactive NSID 2 */ 896 cmd.nsid = 2; 897 memset(&nsdata, 0, sizeof(nsdata)); 898 memset(&rsp, 0, sizeof(rsp)); 899 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 900 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 901 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 902 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 903 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 904 905 /* Valid NSID 3 */ 906 cmd.nsid = 3; 907 memset(&nsdata, 0, sizeof(nsdata)); 908 memset(&rsp, 0, sizeof(rsp)); 909 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 910 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 911 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 912 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 913 CU_ASSERT(nsdata.nsze == 5678); 914 915 /* Invalid NSID 4 */ 916 cmd.nsid = 4; 917 memset(&nsdata, 0, sizeof(nsdata)); 918 memset(&rsp, 0, sizeof(rsp)); 919 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 920 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 921 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 922 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 923 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 924 925 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 926 cmd.nsid = 0xFFFFFFFF; 927 memset(&nsdata, 0, sizeof(nsdata)); 928 memset(&rsp, 0, sizeof(rsp)); 929 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 930 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 931 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 932 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 933 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 934 } 935 936 static void 937 test_set_get_features(void) 938 { 939 struct spdk_nvmf_subsystem subsystem = {}; 940 struct spdk_nvmf_qpair admin_qpair = {}; 941 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 942 union nvmf_h2c_msg cmd = {}; 943 union nvmf_c2h_msg rsp = {}; 944 struct spdk_nvmf_ns ns[3]; 945 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};; 946 struct spdk_nvmf_request req; 947 int rc; 948 949 subsystem.ns = ns_arr; 950 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 951 admin_qpair.ctrlr = &ctrlr; 952 req.qpair = &admin_qpair; 953 cmd.nvme_cmd.nsid = 1; 954 req.cmd = &cmd; 955 req.rsp = &rsp; 956 957 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 958 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 959 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 960 ns[0].ptpl_file = "testcfg"; 961 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 962 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 963 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 964 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 965 CU_ASSERT(ns[0].ptpl_activated == true); 966 967 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 968 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 969 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 970 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 971 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 972 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 973 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 974 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 975 976 977 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 978 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 979 cmd.nvme_cmd.cdw11 = 0x42; 980 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 981 982 rc = nvmf_ctrlr_get_features(&req); 983 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 984 985 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 986 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 987 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 988 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 989 990 rc = nvmf_ctrlr_get_features(&req); 991 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 992 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 993 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 994 995 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 996 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 997 cmd.nvme_cmd.cdw11 = 0x42; 998 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 999 1000 rc = nvmf_ctrlr_set_features(&req); 1001 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1002 1003 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1004 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1005 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1006 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1007 1008 rc = nvmf_ctrlr_set_features(&req); 1009 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1010 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1011 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1012 1013 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1014 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1015 cmd.nvme_cmd.cdw11 = 0x42; 1016 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1017 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1018 1019 rc = nvmf_ctrlr_set_features(&req); 1020 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1021 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1022 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1023 1024 1025 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1026 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1027 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1028 1029 rc = nvmf_ctrlr_get_features(&req); 1030 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1031 1032 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1033 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1034 cmd.nvme_cmd.cdw11 = 0x42; 1035 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1036 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1037 1038 rc = nvmf_ctrlr_set_features(&req); 1039 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1040 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1041 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1042 1043 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1044 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1045 cmd.nvme_cmd.cdw11 = 0x42; 1046 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1047 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1048 1049 rc = nvmf_ctrlr_set_features(&req); 1050 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1051 } 1052 1053 /* 1054 * Reservation Unit Test Configuration 1055 * -------- -------- -------- 1056 * | Host A | | Host B | | Host C | 1057 * -------- -------- -------- 1058 * / \ | | 1059 * -------- -------- ------- ------- 1060 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1061 * -------- -------- ------- ------- 1062 * \ \ / / 1063 * \ \ / / 1064 * \ \ / / 1065 * -------------------------------------- 1066 * | NAMESPACE 1 | 1067 * -------------------------------------- 1068 */ 1069 1070 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1071 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1072 1073 static void 1074 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1075 { 1076 /* Host A has two controllers */ 1077 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1078 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1079 1080 /* Host B has 1 controller */ 1081 spdk_uuid_generate(&g_ctrlr_B.hostid); 1082 1083 /* Host C has 1 controller */ 1084 spdk_uuid_generate(&g_ctrlr_C.hostid); 1085 1086 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1087 g_ns_info.rtype = rtype; 1088 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1089 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1090 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1091 } 1092 1093 static void 1094 test_reservation_write_exclusive(void) 1095 { 1096 struct spdk_nvmf_request req = {}; 1097 union nvmf_h2c_msg cmd = {}; 1098 union nvmf_c2h_msg rsp = {}; 1099 int rc; 1100 1101 req.cmd = &cmd; 1102 req.rsp = &rsp; 1103 1104 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1105 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1106 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1107 1108 /* Test Case: Issue a Read command from Host A and Host B */ 1109 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1110 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1111 SPDK_CU_ASSERT_FATAL(rc == 0); 1112 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1113 SPDK_CU_ASSERT_FATAL(rc == 0); 1114 1115 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1116 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1117 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1118 SPDK_CU_ASSERT_FATAL(rc == 0); 1119 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1120 SPDK_CU_ASSERT_FATAL(rc < 0); 1121 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1122 1123 /* Test Case: Issue a Write command from Host C */ 1124 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1125 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1126 SPDK_CU_ASSERT_FATAL(rc < 0); 1127 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1128 1129 /* Test Case: Issue a Read command from Host B */ 1130 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1131 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1132 SPDK_CU_ASSERT_FATAL(rc == 0); 1133 1134 /* Unregister Host C */ 1135 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1136 1137 /* Test Case: Read and Write commands from non-registrant Host C */ 1138 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1139 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1140 SPDK_CU_ASSERT_FATAL(rc < 0); 1141 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1142 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1143 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1144 SPDK_CU_ASSERT_FATAL(rc == 0); 1145 } 1146 1147 static void 1148 test_reservation_exclusive_access(void) 1149 { 1150 struct spdk_nvmf_request req = {}; 1151 union nvmf_h2c_msg cmd = {}; 1152 union nvmf_c2h_msg rsp = {}; 1153 int rc; 1154 1155 req.cmd = &cmd; 1156 req.rsp = &rsp; 1157 1158 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1159 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1160 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1161 1162 /* Test Case: Issue a Read command from Host B */ 1163 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1164 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1165 SPDK_CU_ASSERT_FATAL(rc < 0); 1166 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1167 1168 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1169 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1170 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1171 SPDK_CU_ASSERT_FATAL(rc == 0); 1172 } 1173 1174 static void 1175 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1176 { 1177 struct spdk_nvmf_request req = {}; 1178 union nvmf_h2c_msg cmd = {}; 1179 union nvmf_c2h_msg rsp = {}; 1180 int rc; 1181 1182 req.cmd = &cmd; 1183 req.rsp = &rsp; 1184 1185 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1186 ut_reservation_init(rtype); 1187 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1188 1189 /* Test Case: Issue a Read command from Host A and Host C */ 1190 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1191 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1192 SPDK_CU_ASSERT_FATAL(rc == 0); 1193 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1194 SPDK_CU_ASSERT_FATAL(rc == 0); 1195 1196 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1197 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1198 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1199 SPDK_CU_ASSERT_FATAL(rc == 0); 1200 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1201 SPDK_CU_ASSERT_FATAL(rc == 0); 1202 1203 /* Unregister Host C */ 1204 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1205 1206 /* Test Case: Read and Write commands from non-registrant Host C */ 1207 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1208 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1209 SPDK_CU_ASSERT_FATAL(rc == 0); 1210 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1211 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1212 SPDK_CU_ASSERT_FATAL(rc < 0); 1213 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1214 } 1215 1216 static void 1217 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1218 { 1219 _test_reservation_write_exclusive_regs_only_and_all_regs( 1220 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1221 _test_reservation_write_exclusive_regs_only_and_all_regs( 1222 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1223 } 1224 1225 static void 1226 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1227 { 1228 struct spdk_nvmf_request req = {}; 1229 union nvmf_h2c_msg cmd = {}; 1230 union nvmf_c2h_msg rsp = {}; 1231 int rc; 1232 1233 req.cmd = &cmd; 1234 req.rsp = &rsp; 1235 1236 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1237 ut_reservation_init(rtype); 1238 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1239 1240 /* Test Case: Issue a Write command from Host B */ 1241 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1242 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1243 SPDK_CU_ASSERT_FATAL(rc == 0); 1244 1245 /* Unregister Host B */ 1246 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1247 1248 /* Test Case: Issue a Read command from Host B */ 1249 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1250 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1251 SPDK_CU_ASSERT_FATAL(rc < 0); 1252 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1253 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1254 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1255 SPDK_CU_ASSERT_FATAL(rc < 0); 1256 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1257 } 1258 1259 static void 1260 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1261 { 1262 _test_reservation_exclusive_access_regs_only_and_all_regs( 1263 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1264 _test_reservation_exclusive_access_regs_only_and_all_regs( 1265 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1266 } 1267 1268 static void 1269 test_reservation_notification_log_page(void) 1270 { 1271 struct spdk_nvmf_ctrlr ctrlr; 1272 struct spdk_nvmf_qpair qpair; 1273 struct spdk_nvmf_ns ns; 1274 struct spdk_nvmf_request req; 1275 union nvmf_h2c_msg cmd = {}; 1276 union nvmf_c2h_msg rsp = {}; 1277 union spdk_nvme_async_event_completion event = {}; 1278 struct spdk_nvme_reservation_notification_log logs[3]; 1279 1280 memset(&ctrlr, 0, sizeof(ctrlr)); 1281 ctrlr.thread = spdk_get_thread(); 1282 TAILQ_INIT(&ctrlr.log_head); 1283 ns.nsid = 1; 1284 1285 /* Test Case: Mask all the reservation notifications */ 1286 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1287 SPDK_NVME_RESERVATION_RELEASED_MASK | 1288 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1289 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1290 SPDK_NVME_REGISTRATION_PREEMPTED); 1291 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1292 SPDK_NVME_RESERVATION_RELEASED); 1293 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1294 SPDK_NVME_RESERVATION_PREEMPTED); 1295 poll_threads(); 1296 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1297 1298 /* Test Case: Unmask all the reservation notifications, 1299 * 3 log pages are generated, and AER was triggered. 1300 */ 1301 ns.mask = 0; 1302 ctrlr.num_avail_log_pages = 0; 1303 req.cmd = &cmd; 1304 req.rsp = &rsp; 1305 ctrlr.aer_req[0] = &req; 1306 ctrlr.nr_aer_reqs = 1; 1307 req.qpair = &qpair; 1308 TAILQ_INIT(&qpair.outstanding); 1309 qpair.ctrlr = NULL; 1310 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1311 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1312 1313 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1314 SPDK_NVME_REGISTRATION_PREEMPTED); 1315 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1316 SPDK_NVME_RESERVATION_RELEASED); 1317 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1318 SPDK_NVME_RESERVATION_PREEMPTED); 1319 poll_threads(); 1320 event.raw = rsp.nvme_cpl.cdw0; 1321 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1322 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1323 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1324 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1325 1326 /* Test Case: Get Log Page to clear the log pages */ 1327 nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs)); 1328 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1329 } 1330 1331 static void 1332 test_get_dif_ctx(void) 1333 { 1334 struct spdk_nvmf_subsystem subsystem = {}; 1335 struct spdk_nvmf_request req = {}; 1336 struct spdk_nvmf_qpair qpair = {}; 1337 struct spdk_nvmf_ctrlr ctrlr = {}; 1338 struct spdk_nvmf_ns ns = {}; 1339 struct spdk_nvmf_ns *_ns = NULL; 1340 struct spdk_bdev bdev = {}; 1341 union nvmf_h2c_msg cmd = {}; 1342 struct spdk_dif_ctx dif_ctx = {}; 1343 bool ret; 1344 1345 ctrlr.subsys = &subsystem; 1346 1347 qpair.ctrlr = &ctrlr; 1348 1349 req.qpair = &qpair; 1350 req.cmd = &cmd; 1351 1352 ns.bdev = &bdev; 1353 1354 ctrlr.dif_insert_or_strip = false; 1355 1356 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1357 CU_ASSERT(ret == false); 1358 1359 ctrlr.dif_insert_or_strip = true; 1360 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1361 1362 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1363 CU_ASSERT(ret == false); 1364 1365 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1366 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1367 1368 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1369 CU_ASSERT(ret == false); 1370 1371 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1372 1373 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1374 CU_ASSERT(ret == false); 1375 1376 qpair.qid = 1; 1377 1378 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1379 CU_ASSERT(ret == false); 1380 1381 cmd.nvme_cmd.nsid = 1; 1382 1383 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1384 CU_ASSERT(ret == false); 1385 1386 subsystem.max_nsid = 1; 1387 subsystem.ns = &_ns; 1388 subsystem.ns[0] = &ns; 1389 1390 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1391 CU_ASSERT(ret == false); 1392 1393 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1394 1395 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1396 CU_ASSERT(ret == true); 1397 } 1398 1399 static void 1400 test_identify_ctrlr(void) 1401 { 1402 struct spdk_nvmf_subsystem subsystem = { 1403 .subtype = SPDK_NVMF_SUBTYPE_NVME 1404 }; 1405 struct spdk_nvmf_transport_ops tops = {}; 1406 struct spdk_nvmf_transport transport = { 1407 .ops = &tops, 1408 .opts = { 1409 .in_capsule_data_size = 4096, 1410 }, 1411 }; 1412 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1413 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1414 struct spdk_nvme_ctrlr_data cdata = {}; 1415 uint32_t expected_ioccsz; 1416 1417 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1418 1419 /* Check ioccsz, TCP transport */ 1420 tops.type = SPDK_NVME_TRANSPORT_TCP; 1421 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1422 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1423 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1424 1425 /* Check ioccsz, RDMA transport */ 1426 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1427 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1428 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1429 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1430 1431 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1432 tops.type = SPDK_NVME_TRANSPORT_TCP; 1433 ctrlr.dif_insert_or_strip = true; 1434 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1435 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1436 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1437 } 1438 1439 static int 1440 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1441 { 1442 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1443 1444 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1445 }; 1446 1447 static void 1448 test_custom_admin_cmd(void) 1449 { 1450 struct spdk_nvmf_subsystem subsystem; 1451 struct spdk_nvmf_qpair qpair; 1452 struct spdk_nvmf_ctrlr ctrlr; 1453 struct spdk_nvmf_request req; 1454 struct spdk_nvmf_ns *ns_ptrs[1]; 1455 struct spdk_nvmf_ns ns; 1456 union nvmf_h2c_msg cmd; 1457 union nvmf_c2h_msg rsp; 1458 struct spdk_bdev bdev; 1459 uint8_t buf[4096]; 1460 int rc; 1461 1462 memset(&subsystem, 0, sizeof(subsystem)); 1463 ns_ptrs[0] = &ns; 1464 subsystem.ns = ns_ptrs; 1465 subsystem.max_nsid = 1; 1466 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1467 1468 memset(&ns, 0, sizeof(ns)); 1469 ns.opts.nsid = 1; 1470 ns.bdev = &bdev; 1471 1472 memset(&qpair, 0, sizeof(qpair)); 1473 qpair.ctrlr = &ctrlr; 1474 1475 memset(&ctrlr, 0, sizeof(ctrlr)); 1476 ctrlr.subsys = &subsystem; 1477 ctrlr.vcprop.cc.bits.en = 1; 1478 1479 memset(&req, 0, sizeof(req)); 1480 req.qpair = &qpair; 1481 req.cmd = &cmd; 1482 req.rsp = &rsp; 1483 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1484 req.data = buf; 1485 req.length = sizeof(buf); 1486 1487 memset(&cmd, 0, sizeof(cmd)); 1488 cmd.nvme_cmd.opc = 0xc1; 1489 cmd.nvme_cmd.nsid = 0; 1490 memset(&rsp, 0, sizeof(rsp)); 1491 1492 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1493 1494 /* Ensure that our hdlr is being called */ 1495 rc = nvmf_ctrlr_process_admin_cmd(&req); 1496 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1497 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1498 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1499 } 1500 1501 static void 1502 test_fused_compare_and_write(void) 1503 { 1504 struct spdk_nvmf_request req = {}; 1505 struct spdk_nvmf_qpair qpair = {}; 1506 struct spdk_nvme_cmd cmd = {}; 1507 union nvmf_c2h_msg rsp = {}; 1508 struct spdk_nvmf_ctrlr ctrlr = {}; 1509 struct spdk_nvmf_subsystem subsystem = {}; 1510 struct spdk_nvmf_ns ns = {}; 1511 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1512 struct spdk_bdev bdev = {}; 1513 1514 struct spdk_nvmf_poll_group group = {}; 1515 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1516 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1517 1518 ns.bdev = &bdev; 1519 1520 subsystem.id = 0; 1521 subsystem.max_nsid = 1; 1522 subsys_ns[0] = &ns; 1523 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1524 1525 /* Enable controller */ 1526 ctrlr.vcprop.cc.bits.en = 1; 1527 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1528 1529 group.num_sgroups = 1; 1530 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1531 sgroups.num_ns = 1; 1532 sgroups.ns_info = &ns_info; 1533 TAILQ_INIT(&sgroups.queued); 1534 group.sgroups = &sgroups; 1535 TAILQ_INIT(&qpair.outstanding); 1536 1537 qpair.ctrlr = &ctrlr; 1538 qpair.group = &group; 1539 qpair.qid = 1; 1540 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1541 1542 cmd.nsid = 1; 1543 1544 req.qpair = &qpair; 1545 req.cmd = (union nvmf_h2c_msg *)&cmd; 1546 req.rsp = &rsp; 1547 1548 /* SUCCESS/SUCCESS */ 1549 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1550 cmd.opc = SPDK_NVME_OPC_COMPARE; 1551 1552 spdk_nvmf_request_exec(&req); 1553 CU_ASSERT(qpair.first_fused_req != NULL); 1554 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1555 1556 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1557 cmd.opc = SPDK_NVME_OPC_WRITE; 1558 1559 spdk_nvmf_request_exec(&req); 1560 CU_ASSERT(qpair.first_fused_req == NULL); 1561 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1562 1563 /* Wrong sequence */ 1564 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1565 cmd.opc = SPDK_NVME_OPC_WRITE; 1566 1567 spdk_nvmf_request_exec(&req); 1568 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1569 CU_ASSERT(qpair.first_fused_req == NULL); 1570 1571 /* Write as FUSE_FIRST (Wrong op code) */ 1572 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1573 cmd.opc = SPDK_NVME_OPC_WRITE; 1574 1575 spdk_nvmf_request_exec(&req); 1576 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1577 CU_ASSERT(qpair.first_fused_req == NULL); 1578 1579 /* Compare as FUSE_SECOND (Wrong op code) */ 1580 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1581 cmd.opc = SPDK_NVME_OPC_COMPARE; 1582 1583 spdk_nvmf_request_exec(&req); 1584 CU_ASSERT(qpair.first_fused_req != NULL); 1585 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1586 1587 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1588 cmd.opc = SPDK_NVME_OPC_COMPARE; 1589 1590 spdk_nvmf_request_exec(&req); 1591 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1592 CU_ASSERT(qpair.first_fused_req == NULL); 1593 } 1594 1595 static void 1596 test_multi_async_event_reqs(void) 1597 { 1598 struct spdk_nvmf_subsystem subsystem = {}; 1599 struct spdk_nvmf_qpair qpair = {}; 1600 struct spdk_nvmf_ctrlr ctrlr = {}; 1601 struct spdk_nvmf_request req[5] = {}; 1602 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1603 struct spdk_nvmf_ns ns = {}; 1604 union nvmf_h2c_msg cmd[5] = {}; 1605 union nvmf_c2h_msg rsp[5] = {}; 1606 1607 struct spdk_nvmf_poll_group group = {}; 1608 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1609 1610 int i; 1611 1612 ns_ptrs[0] = &ns; 1613 subsystem.ns = ns_ptrs; 1614 subsystem.max_nsid = 1; 1615 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1616 1617 ns.opts.nsid = 1; 1618 group.sgroups = &sgroups; 1619 1620 qpair.ctrlr = &ctrlr; 1621 qpair.group = &group; 1622 TAILQ_INIT(&qpair.outstanding); 1623 1624 ctrlr.subsys = &subsystem; 1625 ctrlr.vcprop.cc.bits.en = 1; 1626 1627 for (i = 0; i < 5; i++) { 1628 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1629 cmd[i].nvme_cmd.nsid = 1; 1630 cmd[i].nvme_cmd.cid = i; 1631 1632 req[i].qpair = &qpair; 1633 req[i].cmd = &cmd[i]; 1634 req[i].rsp = &rsp[i]; 1635 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1636 } 1637 1638 /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */ 1639 sgroups.io_outstanding = NVMF_MAX_ASYNC_EVENTS; 1640 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1641 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1642 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1643 } 1644 CU_ASSERT(sgroups.io_outstanding == 0); 1645 1646 /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */ 1647 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1648 CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS); 1649 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1650 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 1651 1652 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 1653 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 1654 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1655 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1656 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 1657 1658 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 1659 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1660 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1661 CU_ASSERT(ctrlr.aer_req[2] == NULL); 1662 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 1663 1664 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 1665 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 1666 } 1667 1668 int main(int argc, char **argv) 1669 { 1670 CU_pSuite suite = NULL; 1671 unsigned int num_failures; 1672 1673 CU_set_error_action(CUEA_ABORT); 1674 CU_initialize_registry(); 1675 1676 suite = CU_add_suite("nvmf", NULL, NULL); 1677 CU_ADD_TEST(suite, test_get_log_page); 1678 CU_ADD_TEST(suite, test_process_fabrics_cmd); 1679 CU_ADD_TEST(suite, test_connect); 1680 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 1681 CU_ADD_TEST(suite, test_identify_ns); 1682 CU_ADD_TEST(suite, test_reservation_write_exclusive); 1683 CU_ADD_TEST(suite, test_reservation_exclusive_access); 1684 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 1685 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 1686 CU_ADD_TEST(suite, test_reservation_notification_log_page); 1687 CU_ADD_TEST(suite, test_get_dif_ctx); 1688 CU_ADD_TEST(suite, test_set_get_features); 1689 CU_ADD_TEST(suite, test_identify_ctrlr); 1690 CU_ADD_TEST(suite, test_custom_admin_cmd); 1691 CU_ADD_TEST(suite, test_fused_compare_and_write); 1692 CU_ADD_TEST(suite, test_multi_async_event_reqs); 1693 1694 allocate_threads(1); 1695 set_thread(0); 1696 1697 CU_basic_set_mode(CU_BRM_VERBOSE); 1698 CU_basic_run_tests(); 1699 num_failures = CU_get_number_of_failures(); 1700 CU_cleanup_registry(); 1701 1702 free_threads(); 1703 1704 return num_failures; 1705 } 1706