1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT(nvmf) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 51 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 52 53 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 54 struct spdk_nvmf_subsystem *, 55 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 56 NULL); 57 58 DEFINE_STUB(spdk_nvmf_poll_group_create, 59 struct spdk_nvmf_poll_group *, 60 (struct spdk_nvmf_tgt *tgt), 61 NULL); 62 63 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 64 const char *, 65 (const struct spdk_nvmf_subsystem *subsystem), 66 subsystem_default_sn); 67 68 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 69 const char *, 70 (const struct spdk_nvmf_subsystem *subsystem), 71 subsystem_default_mn); 72 73 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 74 bool, 75 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 76 true); 77 78 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 79 int, 80 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 81 0); 82 83 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 84 struct spdk_nvmf_ctrlr *, 85 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 86 NULL); 87 88 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 89 bool, 90 (struct spdk_nvmf_ctrlr *ctrlr), 91 false); 92 93 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 94 bool, 95 (struct spdk_nvmf_ctrlr *ctrlr), 96 false); 97 98 DEFINE_STUB_V(nvmf_get_discovery_log_page, 99 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 100 uint32_t iovcnt, uint64_t offset, uint32_t length)); 101 102 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 103 int, 104 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 105 0); 106 107 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 108 bool, 109 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 110 true); 111 112 DEFINE_STUB(nvmf_subsystem_find_listener, 113 struct spdk_nvmf_subsystem_listener *, 114 (struct spdk_nvmf_subsystem *subsystem, 115 const struct spdk_nvme_transport_id *trid), 116 (void *)0x1); 117 118 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 119 int, 120 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 121 struct spdk_nvmf_request *req), 122 0); 123 124 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 125 int, 126 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 127 struct spdk_nvmf_request *req), 128 0); 129 130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 131 int, 132 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 struct spdk_nvmf_request *req), 134 0); 135 136 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 137 int, 138 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 139 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 140 0); 141 142 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 143 int, 144 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 145 struct spdk_nvmf_request *req), 146 0); 147 148 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 149 int, 150 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 151 struct spdk_nvmf_request *req), 152 0); 153 154 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 155 int, 156 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 157 struct spdk_nvmf_request *req), 158 0); 159 160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 161 int, 162 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 163 struct spdk_nvmf_request *req), 164 0); 165 166 DEFINE_STUB(nvmf_transport_req_complete, 167 int, 168 (struct spdk_nvmf_request *req), 169 0); 170 171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 172 173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 174 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 175 struct spdk_dif_ctx *dif_ctx), 176 true); 177 178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 179 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 180 181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 183 184 int 185 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 186 { 187 return 0; 188 } 189 190 void 191 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 192 bool dif_insert_or_strip) 193 { 194 uint64_t num_blocks; 195 196 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 197 num_blocks = ns->bdev->blockcnt; 198 nsdata->nsze = num_blocks; 199 nsdata->ncap = num_blocks; 200 nsdata->nuse = num_blocks; 201 nsdata->nlbaf = 0; 202 nsdata->flbas.format = 0; 203 nsdata->lbaf[0].lbads = spdk_u32log2(512); 204 } 205 206 struct spdk_nvmf_ns * 207 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 208 { 209 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 210 return subsystem->ns[0]; 211 } 212 213 struct spdk_nvmf_ns * 214 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 215 struct spdk_nvmf_ns *prev_ns) 216 { 217 uint32_t nsid; 218 219 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 220 nsid = prev_ns->nsid; 221 222 if (nsid >= subsystem->max_nsid) { 223 return NULL; 224 } 225 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 226 if (subsystem->ns[nsid - 1]) { 227 return subsystem->ns[nsid - 1]; 228 } 229 } 230 return NULL; 231 } 232 233 static void 234 test_get_log_page(void) 235 { 236 struct spdk_nvmf_subsystem subsystem = {}; 237 struct spdk_nvmf_request req = {}; 238 struct spdk_nvmf_qpair qpair = {}; 239 struct spdk_nvmf_ctrlr ctrlr = {}; 240 union nvmf_h2c_msg cmd = {}; 241 union nvmf_c2h_msg rsp = {}; 242 char data[4096]; 243 244 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 245 246 ctrlr.subsys = &subsystem; 247 248 qpair.ctrlr = &ctrlr; 249 250 req.qpair = &qpair; 251 req.cmd = &cmd; 252 req.rsp = &rsp; 253 req.data = &data; 254 req.length = sizeof(data); 255 256 /* Get Log Page - all valid */ 257 memset(&cmd, 0, sizeof(cmd)); 258 memset(&rsp, 0, sizeof(rsp)); 259 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 260 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 261 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 262 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 263 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 264 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 265 266 /* Get Log Page with invalid log ID */ 267 memset(&cmd, 0, sizeof(cmd)); 268 memset(&rsp, 0, sizeof(rsp)); 269 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 270 cmd.nvme_cmd.cdw10 = 0; 271 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 272 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 273 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 274 275 /* Get Log Page with invalid offset (not dword aligned) */ 276 memset(&cmd, 0, sizeof(cmd)); 277 memset(&rsp, 0, sizeof(rsp)); 278 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 279 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 280 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 281 cmd.nvme_cmd.cdw12 = 2; 282 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 283 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 284 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 285 286 /* Get Log Page without data buffer */ 287 memset(&cmd, 0, sizeof(cmd)); 288 memset(&rsp, 0, sizeof(rsp)); 289 req.data = NULL; 290 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 291 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 292 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 293 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 294 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 295 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 296 req.data = data; 297 } 298 299 static void 300 test_process_fabrics_cmd(void) 301 { 302 struct spdk_nvmf_request req = {}; 303 int ret; 304 struct spdk_nvmf_qpair req_qpair = {}; 305 union nvmf_h2c_msg req_cmd = {}; 306 union nvmf_c2h_msg req_rsp = {}; 307 308 req.qpair = &req_qpair; 309 req.cmd = &req_cmd; 310 req.rsp = &req_rsp; 311 req.qpair->ctrlr = NULL; 312 313 /* No ctrlr and invalid command check */ 314 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 315 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 316 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 317 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 318 } 319 320 static bool 321 nvme_status_success(const struct spdk_nvme_status *status) 322 { 323 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 324 } 325 326 static void 327 test_connect(void) 328 { 329 struct spdk_nvmf_fabric_connect_data connect_data; 330 struct spdk_nvmf_poll_group group; 331 struct spdk_nvmf_subsystem_poll_group *sgroups; 332 struct spdk_nvmf_transport transport; 333 struct spdk_nvmf_transport_ops tops = {}; 334 struct spdk_nvmf_subsystem subsystem; 335 struct spdk_nvmf_request req; 336 struct spdk_nvmf_qpair admin_qpair; 337 struct spdk_nvmf_qpair qpair; 338 struct spdk_nvmf_qpair qpair2; 339 struct spdk_nvmf_ctrlr ctrlr; 340 struct spdk_nvmf_tgt tgt; 341 union nvmf_h2c_msg cmd; 342 union nvmf_c2h_msg rsp; 343 const uint8_t hostid[16] = { 344 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 345 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 346 }; 347 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 348 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 349 int rc; 350 351 memset(&group, 0, sizeof(group)); 352 group.thread = spdk_get_thread(); 353 354 memset(&ctrlr, 0, sizeof(ctrlr)); 355 ctrlr.subsys = &subsystem; 356 ctrlr.qpair_mask = spdk_bit_array_create(3); 357 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 358 ctrlr.vcprop.cc.bits.en = 1; 359 ctrlr.vcprop.cc.bits.iosqes = 6; 360 ctrlr.vcprop.cc.bits.iocqes = 4; 361 362 memset(&admin_qpair, 0, sizeof(admin_qpair)); 363 admin_qpair.group = &group; 364 365 memset(&tgt, 0, sizeof(tgt)); 366 memset(&transport, 0, sizeof(transport)); 367 transport.ops = &tops; 368 transport.opts.max_aq_depth = 32; 369 transport.opts.max_queue_depth = 64; 370 transport.opts.max_qpairs_per_ctrlr = 3; 371 transport.tgt = &tgt; 372 373 memset(&qpair, 0, sizeof(qpair)); 374 qpair.transport = &transport; 375 qpair.group = &group; 376 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 377 TAILQ_INIT(&qpair.outstanding); 378 379 memset(&connect_data, 0, sizeof(connect_data)); 380 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 381 connect_data.cntlid = 0xFFFF; 382 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 383 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 384 385 memset(&subsystem, 0, sizeof(subsystem)); 386 subsystem.thread = spdk_get_thread(); 387 subsystem.id = 1; 388 TAILQ_INIT(&subsystem.ctrlrs); 389 subsystem.tgt = &tgt; 390 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 391 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 392 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 393 394 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 395 group.sgroups = sgroups; 396 397 memset(&cmd, 0, sizeof(cmd)); 398 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 399 cmd.connect_cmd.cid = 1; 400 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 401 cmd.connect_cmd.recfmt = 0; 402 cmd.connect_cmd.qid = 0; 403 cmd.connect_cmd.sqsize = 31; 404 cmd.connect_cmd.cattr = 0; 405 cmd.connect_cmd.kato = 120000; 406 407 memset(&req, 0, sizeof(req)); 408 req.qpair = &qpair; 409 req.length = sizeof(connect_data); 410 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 411 req.data = &connect_data; 412 req.cmd = &cmd; 413 req.rsp = &rsp; 414 415 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 416 MOCK_SET(spdk_nvmf_poll_group_create, &group); 417 418 /* Valid admin connect command */ 419 memset(&rsp, 0, sizeof(rsp)); 420 sgroups[subsystem.id].io_outstanding++; 421 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 422 rc = nvmf_ctrlr_cmd_connect(&req); 423 poll_threads(); 424 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 425 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 426 CU_ASSERT(qpair.ctrlr != NULL); 427 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 428 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 429 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 430 free(qpair.ctrlr); 431 qpair.ctrlr = NULL; 432 433 /* Valid admin connect command with kato = 0 */ 434 cmd.connect_cmd.kato = 0; 435 memset(&rsp, 0, sizeof(rsp)); 436 sgroups[subsystem.id].io_outstanding++; 437 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 438 rc = nvmf_ctrlr_cmd_connect(&req); 439 poll_threads(); 440 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 441 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 442 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 443 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 444 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 445 free(qpair.ctrlr); 446 qpair.ctrlr = NULL; 447 cmd.connect_cmd.kato = 120000; 448 449 /* Invalid data length */ 450 memset(&rsp, 0, sizeof(rsp)); 451 req.length = sizeof(connect_data) - 1; 452 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 453 rc = nvmf_ctrlr_cmd_connect(&req); 454 poll_threads(); 455 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 456 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 457 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 458 CU_ASSERT(qpair.ctrlr == NULL); 459 req.length = sizeof(connect_data); 460 461 /* Invalid recfmt */ 462 memset(&rsp, 0, sizeof(rsp)); 463 cmd.connect_cmd.recfmt = 1234; 464 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 465 rc = nvmf_ctrlr_cmd_connect(&req); 466 poll_threads(); 467 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 468 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 469 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 470 CU_ASSERT(qpair.ctrlr == NULL); 471 cmd.connect_cmd.recfmt = 0; 472 473 /* Subsystem not found */ 474 memset(&rsp, 0, sizeof(rsp)); 475 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 476 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 477 rc = nvmf_ctrlr_cmd_connect(&req); 478 poll_threads(); 479 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 480 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 481 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 482 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 483 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 484 CU_ASSERT(qpair.ctrlr == NULL); 485 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 486 487 /* Unterminated hostnqn */ 488 memset(&rsp, 0, sizeof(rsp)); 489 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 490 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 491 rc = nvmf_ctrlr_cmd_connect(&req); 492 poll_threads(); 493 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 494 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 495 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 496 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 497 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 498 CU_ASSERT(qpair.ctrlr == NULL); 499 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 500 501 /* Host not allowed */ 502 memset(&rsp, 0, sizeof(rsp)); 503 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 504 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 505 rc = nvmf_ctrlr_cmd_connect(&req); 506 poll_threads(); 507 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 508 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 509 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 510 CU_ASSERT(qpair.ctrlr == NULL); 511 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 512 513 /* Invalid sqsize == 0 */ 514 memset(&rsp, 0, sizeof(rsp)); 515 cmd.connect_cmd.sqsize = 0; 516 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 517 rc = nvmf_ctrlr_cmd_connect(&req); 518 poll_threads(); 519 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 520 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 521 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 522 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 523 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 524 CU_ASSERT(qpair.ctrlr == NULL); 525 cmd.connect_cmd.sqsize = 31; 526 527 /* Invalid admin sqsize > max_aq_depth */ 528 memset(&rsp, 0, sizeof(rsp)); 529 cmd.connect_cmd.sqsize = 32; 530 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 531 rc = nvmf_ctrlr_cmd_connect(&req); 532 poll_threads(); 533 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 534 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 535 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 536 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 537 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 538 CU_ASSERT(qpair.ctrlr == NULL); 539 cmd.connect_cmd.sqsize = 31; 540 541 /* Invalid I/O sqsize > max_queue_depth */ 542 memset(&rsp, 0, sizeof(rsp)); 543 cmd.connect_cmd.qid = 1; 544 cmd.connect_cmd.sqsize = 64; 545 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 546 rc = nvmf_ctrlr_cmd_connect(&req); 547 poll_threads(); 548 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 549 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 550 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 551 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 552 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 553 CU_ASSERT(qpair.ctrlr == NULL); 554 cmd.connect_cmd.qid = 0; 555 cmd.connect_cmd.sqsize = 31; 556 557 /* Invalid cntlid for admin queue */ 558 memset(&rsp, 0, sizeof(rsp)); 559 connect_data.cntlid = 0x1234; 560 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 561 rc = nvmf_ctrlr_cmd_connect(&req); 562 poll_threads(); 563 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 564 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 565 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 566 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 567 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 568 CU_ASSERT(qpair.ctrlr == NULL); 569 connect_data.cntlid = 0xFFFF; 570 571 ctrlr.admin_qpair = &admin_qpair; 572 ctrlr.subsys = &subsystem; 573 574 /* Valid I/O queue connect command */ 575 memset(&rsp, 0, sizeof(rsp)); 576 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 577 cmd.connect_cmd.qid = 1; 578 cmd.connect_cmd.sqsize = 63; 579 sgroups[subsystem.id].io_outstanding++; 580 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 581 rc = nvmf_ctrlr_cmd_connect(&req); 582 poll_threads(); 583 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 584 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 585 CU_ASSERT(qpair.ctrlr == &ctrlr); 586 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 587 qpair.ctrlr = NULL; 588 cmd.connect_cmd.sqsize = 31; 589 590 /* Non-existent controller */ 591 memset(&rsp, 0, sizeof(rsp)); 592 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 593 sgroups[subsystem.id].io_outstanding++; 594 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 595 rc = nvmf_ctrlr_cmd_connect(&req); 596 poll_threads(); 597 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 598 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 599 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 600 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 601 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 602 CU_ASSERT(qpair.ctrlr == NULL); 603 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 604 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 605 606 /* I/O connect to discovery controller */ 607 memset(&rsp, 0, sizeof(rsp)); 608 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 609 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 610 sgroups[subsystem.id].io_outstanding++; 611 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 612 rc = nvmf_ctrlr_cmd_connect(&req); 613 poll_threads(); 614 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 615 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 616 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 617 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 618 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 619 CU_ASSERT(qpair.ctrlr == NULL); 620 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 621 622 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 623 cmd.connect_cmd.qid = 0; 624 cmd.connect_cmd.kato = 120000; 625 memset(&rsp, 0, sizeof(rsp)); 626 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 627 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 628 sgroups[subsystem.id].io_outstanding++; 629 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 630 rc = nvmf_ctrlr_cmd_connect(&req); 631 poll_threads(); 632 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 633 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 634 CU_ASSERT(qpair.ctrlr != NULL); 635 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 636 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 637 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 638 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 639 free(qpair.ctrlr); 640 qpair.ctrlr = NULL; 641 642 /* I/O connect to discovery controller with keep-alive-timeout == 0. 643 * Then, a fixed timeout value is set to keep-alive-timeout. 644 */ 645 cmd.connect_cmd.kato = 0; 646 memset(&rsp, 0, sizeof(rsp)); 647 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 648 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 649 sgroups[subsystem.id].io_outstanding++; 650 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 651 rc = nvmf_ctrlr_cmd_connect(&req); 652 poll_threads(); 653 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 654 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 655 CU_ASSERT(qpair.ctrlr != NULL); 656 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 657 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 658 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 659 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 660 free(qpair.ctrlr); 661 qpair.ctrlr = NULL; 662 cmd.connect_cmd.qid = 1; 663 cmd.connect_cmd.kato = 120000; 664 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 665 666 /* I/O connect to disabled controller */ 667 memset(&rsp, 0, sizeof(rsp)); 668 ctrlr.vcprop.cc.bits.en = 0; 669 sgroups[subsystem.id].io_outstanding++; 670 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 671 rc = nvmf_ctrlr_cmd_connect(&req); 672 poll_threads(); 673 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 674 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 675 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 676 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 677 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 678 CU_ASSERT(qpair.ctrlr == NULL); 679 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 680 ctrlr.vcprop.cc.bits.en = 1; 681 682 /* I/O connect with invalid IOSQES */ 683 memset(&rsp, 0, sizeof(rsp)); 684 ctrlr.vcprop.cc.bits.iosqes = 3; 685 sgroups[subsystem.id].io_outstanding++; 686 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 687 rc = nvmf_ctrlr_cmd_connect(&req); 688 poll_threads(); 689 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 690 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 691 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 692 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 693 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 694 CU_ASSERT(qpair.ctrlr == NULL); 695 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 696 ctrlr.vcprop.cc.bits.iosqes = 6; 697 698 /* I/O connect with invalid IOCQES */ 699 memset(&rsp, 0, sizeof(rsp)); 700 ctrlr.vcprop.cc.bits.iocqes = 3; 701 sgroups[subsystem.id].io_outstanding++; 702 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 703 rc = nvmf_ctrlr_cmd_connect(&req); 704 poll_threads(); 705 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 706 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 707 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 708 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 709 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 710 CU_ASSERT(qpair.ctrlr == NULL); 711 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 712 ctrlr.vcprop.cc.bits.iocqes = 4; 713 714 /* I/O connect with too many existing qpairs */ 715 memset(&rsp, 0, sizeof(rsp)); 716 spdk_bit_array_set(ctrlr.qpair_mask, 0); 717 spdk_bit_array_set(ctrlr.qpair_mask, 1); 718 spdk_bit_array_set(ctrlr.qpair_mask, 2); 719 sgroups[subsystem.id].io_outstanding++; 720 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 721 rc = nvmf_ctrlr_cmd_connect(&req); 722 poll_threads(); 723 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 724 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 725 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 726 CU_ASSERT(qpair.ctrlr == NULL); 727 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 728 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 729 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 730 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 731 732 /* I/O connect with duplicate queue ID */ 733 memset(&rsp, 0, sizeof(rsp)); 734 memset(&qpair2, 0, sizeof(qpair2)); 735 qpair2.group = &group; 736 qpair2.qid = 1; 737 spdk_bit_array_set(ctrlr.qpair_mask, 1); 738 cmd.connect_cmd.qid = 1; 739 sgroups[subsystem.id].io_outstanding++; 740 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 741 rc = nvmf_ctrlr_cmd_connect(&req); 742 poll_threads(); 743 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 744 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 745 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 746 CU_ASSERT(qpair.ctrlr == NULL); 747 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 748 749 /* Clean up globals */ 750 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 751 MOCK_CLEAR(spdk_nvmf_poll_group_create); 752 753 spdk_bit_array_free(&ctrlr.qpair_mask); 754 free(sgroups); 755 } 756 757 static void 758 test_get_ns_id_desc_list(void) 759 { 760 struct spdk_nvmf_subsystem subsystem; 761 struct spdk_nvmf_qpair qpair; 762 struct spdk_nvmf_ctrlr ctrlr; 763 struct spdk_nvmf_request req; 764 struct spdk_nvmf_ns *ns_ptrs[1]; 765 struct spdk_nvmf_ns ns; 766 union nvmf_h2c_msg cmd; 767 union nvmf_c2h_msg rsp; 768 struct spdk_bdev bdev; 769 uint8_t buf[4096]; 770 771 memset(&subsystem, 0, sizeof(subsystem)); 772 ns_ptrs[0] = &ns; 773 subsystem.ns = ns_ptrs; 774 subsystem.max_nsid = 1; 775 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 776 777 memset(&ns, 0, sizeof(ns)); 778 ns.opts.nsid = 1; 779 ns.bdev = &bdev; 780 781 memset(&qpair, 0, sizeof(qpair)); 782 qpair.ctrlr = &ctrlr; 783 784 memset(&ctrlr, 0, sizeof(ctrlr)); 785 ctrlr.subsys = &subsystem; 786 ctrlr.vcprop.cc.bits.en = 1; 787 788 memset(&req, 0, sizeof(req)); 789 req.qpair = &qpair; 790 req.cmd = &cmd; 791 req.rsp = &rsp; 792 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 793 req.data = buf; 794 req.length = sizeof(buf); 795 796 memset(&cmd, 0, sizeof(cmd)); 797 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 798 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 799 800 /* Invalid NSID */ 801 cmd.nvme_cmd.nsid = 0; 802 memset(&rsp, 0, sizeof(rsp)); 803 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 804 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 805 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 806 807 /* Valid NSID, but ns has no IDs defined */ 808 cmd.nvme_cmd.nsid = 1; 809 memset(&rsp, 0, sizeof(rsp)); 810 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 811 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 812 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 813 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 814 815 /* Valid NSID, only EUI64 defined */ 816 ns.opts.eui64[0] = 0x11; 817 ns.opts.eui64[7] = 0xFF; 818 memset(&rsp, 0, sizeof(rsp)); 819 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 820 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 821 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 822 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 823 CU_ASSERT(buf[1] == 8); 824 CU_ASSERT(buf[4] == 0x11); 825 CU_ASSERT(buf[11] == 0xFF); 826 CU_ASSERT(buf[13] == 0); 827 828 /* Valid NSID, only NGUID defined */ 829 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 830 ns.opts.nguid[0] = 0x22; 831 ns.opts.nguid[15] = 0xEE; 832 memset(&rsp, 0, sizeof(rsp)); 833 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 834 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 835 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 836 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 837 CU_ASSERT(buf[1] == 16); 838 CU_ASSERT(buf[4] == 0x22); 839 CU_ASSERT(buf[19] == 0xEE); 840 CU_ASSERT(buf[21] == 0); 841 842 /* Valid NSID, both EUI64 and NGUID defined */ 843 ns.opts.eui64[0] = 0x11; 844 ns.opts.eui64[7] = 0xFF; 845 ns.opts.nguid[0] = 0x22; 846 ns.opts.nguid[15] = 0xEE; 847 memset(&rsp, 0, sizeof(rsp)); 848 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 849 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 850 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 851 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 852 CU_ASSERT(buf[1] == 8); 853 CU_ASSERT(buf[4] == 0x11); 854 CU_ASSERT(buf[11] == 0xFF); 855 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 856 CU_ASSERT(buf[13] == 16); 857 CU_ASSERT(buf[16] == 0x22); 858 CU_ASSERT(buf[31] == 0xEE); 859 CU_ASSERT(buf[33] == 0); 860 861 /* Valid NSID, EUI64, NGUID, and UUID defined */ 862 ns.opts.eui64[0] = 0x11; 863 ns.opts.eui64[7] = 0xFF; 864 ns.opts.nguid[0] = 0x22; 865 ns.opts.nguid[15] = 0xEE; 866 ns.opts.uuid.u.raw[0] = 0x33; 867 ns.opts.uuid.u.raw[15] = 0xDD; 868 memset(&rsp, 0, sizeof(rsp)); 869 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 870 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 871 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 872 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 873 CU_ASSERT(buf[1] == 8); 874 CU_ASSERT(buf[4] == 0x11); 875 CU_ASSERT(buf[11] == 0xFF); 876 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 877 CU_ASSERT(buf[13] == 16); 878 CU_ASSERT(buf[16] == 0x22); 879 CU_ASSERT(buf[31] == 0xEE); 880 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 881 CU_ASSERT(buf[33] == 16); 882 CU_ASSERT(buf[36] == 0x33); 883 CU_ASSERT(buf[51] == 0xDD); 884 CU_ASSERT(buf[53] == 0); 885 } 886 887 static void 888 test_identify_ns(void) 889 { 890 struct spdk_nvmf_subsystem subsystem = {}; 891 struct spdk_nvmf_transport transport = {}; 892 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 893 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 894 struct spdk_nvme_cmd cmd = {}; 895 struct spdk_nvme_cpl rsp = {}; 896 struct spdk_nvme_ns_data nsdata = {}; 897 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 898 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 899 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 900 901 subsystem.ns = ns_arr; 902 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 903 904 /* Invalid NSID 0 */ 905 cmd.nsid = 0; 906 memset(&nsdata, 0, sizeof(nsdata)); 907 memset(&rsp, 0, sizeof(rsp)); 908 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 909 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 910 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 911 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 912 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 913 914 /* Valid NSID 1 */ 915 cmd.nsid = 1; 916 memset(&nsdata, 0, sizeof(nsdata)); 917 memset(&rsp, 0, sizeof(rsp)); 918 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 919 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 920 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 921 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 922 CU_ASSERT(nsdata.nsze == 1234); 923 924 /* Valid but inactive NSID 2 */ 925 cmd.nsid = 2; 926 memset(&nsdata, 0, sizeof(nsdata)); 927 memset(&rsp, 0, sizeof(rsp)); 928 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 929 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 930 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 931 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 932 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 933 934 /* Valid NSID 3 */ 935 cmd.nsid = 3; 936 memset(&nsdata, 0, sizeof(nsdata)); 937 memset(&rsp, 0, sizeof(rsp)); 938 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 939 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 940 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 941 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 942 CU_ASSERT(nsdata.nsze == 5678); 943 944 /* Invalid NSID 4 */ 945 cmd.nsid = 4; 946 memset(&nsdata, 0, sizeof(nsdata)); 947 memset(&rsp, 0, sizeof(rsp)); 948 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 949 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 950 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 951 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 952 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 953 954 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 955 cmd.nsid = 0xFFFFFFFF; 956 memset(&nsdata, 0, sizeof(nsdata)); 957 memset(&rsp, 0, sizeof(rsp)); 958 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 959 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 960 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 961 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 962 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 963 } 964 965 static void 966 test_set_get_features(void) 967 { 968 struct spdk_nvmf_subsystem subsystem = {}; 969 struct spdk_nvmf_qpair admin_qpair = {}; 970 struct spdk_nvmf_subsystem_listener listener = {}; 971 struct spdk_nvmf_ctrlr ctrlr = { 972 .subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener 973 }; 974 union nvmf_h2c_msg cmd = {}; 975 union nvmf_c2h_msg rsp = {}; 976 struct spdk_nvmf_ns ns[3]; 977 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};; 978 struct spdk_nvmf_request req; 979 int rc; 980 981 subsystem.ns = ns_arr; 982 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 983 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 984 admin_qpair.ctrlr = &ctrlr; 985 req.qpair = &admin_qpair; 986 cmd.nvme_cmd.nsid = 1; 987 req.cmd = &cmd; 988 req.rsp = &rsp; 989 990 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 991 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 992 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 993 ns[0].ptpl_file = "testcfg"; 994 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 995 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 996 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 997 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 998 CU_ASSERT(ns[0].ptpl_activated == true); 999 1000 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1001 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1002 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1003 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1004 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1005 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1006 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1007 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1008 1009 1010 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1011 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1012 cmd.nvme_cmd.cdw11 = 0x42; 1013 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1014 1015 rc = nvmf_ctrlr_get_features(&req); 1016 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1017 1018 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1019 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1020 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1021 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1022 1023 rc = nvmf_ctrlr_get_features(&req); 1024 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1025 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1026 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1027 1028 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1029 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1030 cmd.nvme_cmd.cdw11 = 0x42; 1031 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1032 1033 rc = nvmf_ctrlr_set_features(&req); 1034 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1035 1036 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1037 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1038 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1039 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1040 1041 rc = nvmf_ctrlr_set_features(&req); 1042 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1043 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1044 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1045 1046 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1047 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1048 cmd.nvme_cmd.cdw11 = 0x42; 1049 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1050 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1051 1052 rc = nvmf_ctrlr_set_features(&req); 1053 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1054 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1055 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1056 1057 1058 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1059 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1060 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1061 1062 rc = nvmf_ctrlr_get_features(&req); 1063 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1064 1065 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1066 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1067 cmd.nvme_cmd.cdw11 = 0x42; 1068 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1069 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1070 1071 rc = nvmf_ctrlr_set_features(&req); 1072 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1073 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1074 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1075 1076 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1077 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1078 cmd.nvme_cmd.cdw11 = 0x42; 1079 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1080 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1081 1082 rc = nvmf_ctrlr_set_features(&req); 1083 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1084 } 1085 1086 /* 1087 * Reservation Unit Test Configuration 1088 * -------- -------- -------- 1089 * | Host A | | Host B | | Host C | 1090 * -------- -------- -------- 1091 * / \ | | 1092 * -------- -------- ------- ------- 1093 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1094 * -------- -------- ------- ------- 1095 * \ \ / / 1096 * \ \ / / 1097 * \ \ / / 1098 * -------------------------------------- 1099 * | NAMESPACE 1 | 1100 * -------------------------------------- 1101 */ 1102 1103 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1104 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1105 1106 static void 1107 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1108 { 1109 /* Host A has two controllers */ 1110 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1111 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1112 1113 /* Host B has 1 controller */ 1114 spdk_uuid_generate(&g_ctrlr_B.hostid); 1115 1116 /* Host C has 1 controller */ 1117 spdk_uuid_generate(&g_ctrlr_C.hostid); 1118 1119 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1120 g_ns_info.rtype = rtype; 1121 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1122 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1123 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1124 } 1125 1126 static void 1127 test_reservation_write_exclusive(void) 1128 { 1129 struct spdk_nvmf_request req = {}; 1130 union nvmf_h2c_msg cmd = {}; 1131 union nvmf_c2h_msg rsp = {}; 1132 int rc; 1133 1134 req.cmd = &cmd; 1135 req.rsp = &rsp; 1136 1137 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1138 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1139 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1140 1141 /* Test Case: Issue a Read command from Host A and Host B */ 1142 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1143 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1144 SPDK_CU_ASSERT_FATAL(rc == 0); 1145 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1146 SPDK_CU_ASSERT_FATAL(rc == 0); 1147 1148 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1149 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1150 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1151 SPDK_CU_ASSERT_FATAL(rc == 0); 1152 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1153 SPDK_CU_ASSERT_FATAL(rc < 0); 1154 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1155 1156 /* Test Case: Issue a Write command from Host C */ 1157 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1158 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1159 SPDK_CU_ASSERT_FATAL(rc < 0); 1160 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1161 1162 /* Test Case: Issue a Read command from Host B */ 1163 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1164 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1165 SPDK_CU_ASSERT_FATAL(rc == 0); 1166 1167 /* Unregister Host C */ 1168 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1169 1170 /* Test Case: Read and Write commands from non-registrant Host C */ 1171 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1172 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1173 SPDK_CU_ASSERT_FATAL(rc < 0); 1174 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1175 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1176 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1177 SPDK_CU_ASSERT_FATAL(rc == 0); 1178 } 1179 1180 static void 1181 test_reservation_exclusive_access(void) 1182 { 1183 struct spdk_nvmf_request req = {}; 1184 union nvmf_h2c_msg cmd = {}; 1185 union nvmf_c2h_msg rsp = {}; 1186 int rc; 1187 1188 req.cmd = &cmd; 1189 req.rsp = &rsp; 1190 1191 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1192 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1193 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1194 1195 /* Test Case: Issue a Read command from Host B */ 1196 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1197 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1198 SPDK_CU_ASSERT_FATAL(rc < 0); 1199 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1200 1201 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1202 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1203 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1204 SPDK_CU_ASSERT_FATAL(rc == 0); 1205 } 1206 1207 static void 1208 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1209 { 1210 struct spdk_nvmf_request req = {}; 1211 union nvmf_h2c_msg cmd = {}; 1212 union nvmf_c2h_msg rsp = {}; 1213 int rc; 1214 1215 req.cmd = &cmd; 1216 req.rsp = &rsp; 1217 1218 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1219 ut_reservation_init(rtype); 1220 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1221 1222 /* Test Case: Issue a Read command from Host A and Host C */ 1223 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1224 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1225 SPDK_CU_ASSERT_FATAL(rc == 0); 1226 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1227 SPDK_CU_ASSERT_FATAL(rc == 0); 1228 1229 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1230 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1231 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1232 SPDK_CU_ASSERT_FATAL(rc == 0); 1233 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1234 SPDK_CU_ASSERT_FATAL(rc == 0); 1235 1236 /* Unregister Host C */ 1237 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1238 1239 /* Test Case: Read and Write commands from non-registrant Host C */ 1240 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1241 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1242 SPDK_CU_ASSERT_FATAL(rc == 0); 1243 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1244 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1245 SPDK_CU_ASSERT_FATAL(rc < 0); 1246 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1247 } 1248 1249 static void 1250 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1251 { 1252 _test_reservation_write_exclusive_regs_only_and_all_regs( 1253 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1254 _test_reservation_write_exclusive_regs_only_and_all_regs( 1255 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1256 } 1257 1258 static void 1259 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1260 { 1261 struct spdk_nvmf_request req = {}; 1262 union nvmf_h2c_msg cmd = {}; 1263 union nvmf_c2h_msg rsp = {}; 1264 int rc; 1265 1266 req.cmd = &cmd; 1267 req.rsp = &rsp; 1268 1269 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1270 ut_reservation_init(rtype); 1271 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1272 1273 /* Test Case: Issue a Write command from Host B */ 1274 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1275 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1276 SPDK_CU_ASSERT_FATAL(rc == 0); 1277 1278 /* Unregister Host B */ 1279 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1280 1281 /* Test Case: Issue a Read command from Host B */ 1282 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1283 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1284 SPDK_CU_ASSERT_FATAL(rc < 0); 1285 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1286 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1287 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1288 SPDK_CU_ASSERT_FATAL(rc < 0); 1289 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1290 } 1291 1292 static void 1293 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1294 { 1295 _test_reservation_exclusive_access_regs_only_and_all_regs( 1296 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1297 _test_reservation_exclusive_access_regs_only_and_all_regs( 1298 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1299 } 1300 1301 static void 1302 test_reservation_notification_log_page(void) 1303 { 1304 struct spdk_nvmf_ctrlr ctrlr; 1305 struct spdk_nvmf_qpair qpair; 1306 struct spdk_nvmf_ns ns; 1307 struct spdk_nvmf_request req; 1308 union nvmf_h2c_msg cmd = {}; 1309 union nvmf_c2h_msg rsp = {}; 1310 union spdk_nvme_async_event_completion event = {}; 1311 struct spdk_nvme_reservation_notification_log logs[3]; 1312 1313 memset(&ctrlr, 0, sizeof(ctrlr)); 1314 ctrlr.thread = spdk_get_thread(); 1315 TAILQ_INIT(&ctrlr.log_head); 1316 ns.nsid = 1; 1317 1318 /* Test Case: Mask all the reservation notifications */ 1319 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1320 SPDK_NVME_RESERVATION_RELEASED_MASK | 1321 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1322 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1323 SPDK_NVME_REGISTRATION_PREEMPTED); 1324 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1325 SPDK_NVME_RESERVATION_RELEASED); 1326 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1327 SPDK_NVME_RESERVATION_PREEMPTED); 1328 poll_threads(); 1329 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1330 1331 /* Test Case: Unmask all the reservation notifications, 1332 * 3 log pages are generated, and AER was triggered. 1333 */ 1334 ns.mask = 0; 1335 ctrlr.num_avail_log_pages = 0; 1336 req.cmd = &cmd; 1337 req.rsp = &rsp; 1338 ctrlr.aer_req[0] = &req; 1339 ctrlr.nr_aer_reqs = 1; 1340 req.qpair = &qpair; 1341 TAILQ_INIT(&qpair.outstanding); 1342 qpair.ctrlr = NULL; 1343 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1344 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1345 1346 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1347 SPDK_NVME_REGISTRATION_PREEMPTED); 1348 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1349 SPDK_NVME_RESERVATION_RELEASED); 1350 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1351 SPDK_NVME_RESERVATION_PREEMPTED); 1352 poll_threads(); 1353 event.raw = rsp.nvme_cpl.cdw0; 1354 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1355 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1356 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1357 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1358 1359 /* Test Case: Get Log Page to clear the log pages */ 1360 nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs)); 1361 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1362 } 1363 1364 static void 1365 test_get_dif_ctx(void) 1366 { 1367 struct spdk_nvmf_subsystem subsystem = {}; 1368 struct spdk_nvmf_request req = {}; 1369 struct spdk_nvmf_qpair qpair = {}; 1370 struct spdk_nvmf_ctrlr ctrlr = {}; 1371 struct spdk_nvmf_ns ns = {}; 1372 struct spdk_nvmf_ns *_ns = NULL; 1373 struct spdk_bdev bdev = {}; 1374 union nvmf_h2c_msg cmd = {}; 1375 struct spdk_dif_ctx dif_ctx = {}; 1376 bool ret; 1377 1378 ctrlr.subsys = &subsystem; 1379 1380 qpair.ctrlr = &ctrlr; 1381 1382 req.qpair = &qpair; 1383 req.cmd = &cmd; 1384 1385 ns.bdev = &bdev; 1386 1387 ctrlr.dif_insert_or_strip = false; 1388 1389 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1390 CU_ASSERT(ret == false); 1391 1392 ctrlr.dif_insert_or_strip = true; 1393 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1394 1395 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1396 CU_ASSERT(ret == false); 1397 1398 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1399 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1400 1401 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1402 CU_ASSERT(ret == false); 1403 1404 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1405 1406 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1407 CU_ASSERT(ret == false); 1408 1409 qpair.qid = 1; 1410 1411 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1412 CU_ASSERT(ret == false); 1413 1414 cmd.nvme_cmd.nsid = 1; 1415 1416 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1417 CU_ASSERT(ret == false); 1418 1419 subsystem.max_nsid = 1; 1420 subsystem.ns = &_ns; 1421 subsystem.ns[0] = &ns; 1422 1423 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1424 CU_ASSERT(ret == false); 1425 1426 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1427 1428 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1429 CU_ASSERT(ret == true); 1430 } 1431 1432 static void 1433 test_identify_ctrlr(void) 1434 { 1435 struct spdk_nvmf_subsystem subsystem = { 1436 .subtype = SPDK_NVMF_SUBTYPE_NVME 1437 }; 1438 struct spdk_nvmf_transport_ops tops = {}; 1439 struct spdk_nvmf_transport transport = { 1440 .ops = &tops, 1441 .opts = { 1442 .in_capsule_data_size = 4096, 1443 }, 1444 }; 1445 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1446 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1447 struct spdk_nvme_ctrlr_data cdata = {}; 1448 uint32_t expected_ioccsz; 1449 1450 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1451 1452 /* Check ioccsz, TCP transport */ 1453 tops.type = SPDK_NVME_TRANSPORT_TCP; 1454 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1455 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1456 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1457 1458 /* Check ioccsz, RDMA transport */ 1459 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1460 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1461 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1462 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1463 1464 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1465 tops.type = SPDK_NVME_TRANSPORT_TCP; 1466 ctrlr.dif_insert_or_strip = true; 1467 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1468 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1469 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1470 } 1471 1472 static int 1473 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1474 { 1475 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1476 1477 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1478 }; 1479 1480 static void 1481 test_custom_admin_cmd(void) 1482 { 1483 struct spdk_nvmf_subsystem subsystem; 1484 struct spdk_nvmf_qpair qpair; 1485 struct spdk_nvmf_ctrlr ctrlr; 1486 struct spdk_nvmf_request req; 1487 struct spdk_nvmf_ns *ns_ptrs[1]; 1488 struct spdk_nvmf_ns ns; 1489 union nvmf_h2c_msg cmd; 1490 union nvmf_c2h_msg rsp; 1491 struct spdk_bdev bdev; 1492 uint8_t buf[4096]; 1493 int rc; 1494 1495 memset(&subsystem, 0, sizeof(subsystem)); 1496 ns_ptrs[0] = &ns; 1497 subsystem.ns = ns_ptrs; 1498 subsystem.max_nsid = 1; 1499 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1500 1501 memset(&ns, 0, sizeof(ns)); 1502 ns.opts.nsid = 1; 1503 ns.bdev = &bdev; 1504 1505 memset(&qpair, 0, sizeof(qpair)); 1506 qpair.ctrlr = &ctrlr; 1507 1508 memset(&ctrlr, 0, sizeof(ctrlr)); 1509 ctrlr.subsys = &subsystem; 1510 ctrlr.vcprop.cc.bits.en = 1; 1511 1512 memset(&req, 0, sizeof(req)); 1513 req.qpair = &qpair; 1514 req.cmd = &cmd; 1515 req.rsp = &rsp; 1516 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1517 req.data = buf; 1518 req.length = sizeof(buf); 1519 1520 memset(&cmd, 0, sizeof(cmd)); 1521 cmd.nvme_cmd.opc = 0xc1; 1522 cmd.nvme_cmd.nsid = 0; 1523 memset(&rsp, 0, sizeof(rsp)); 1524 1525 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1526 1527 /* Ensure that our hdlr is being called */ 1528 rc = nvmf_ctrlr_process_admin_cmd(&req); 1529 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1530 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1531 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1532 } 1533 1534 static void 1535 test_fused_compare_and_write(void) 1536 { 1537 struct spdk_nvmf_request req = {}; 1538 struct spdk_nvmf_qpair qpair = {}; 1539 struct spdk_nvme_cmd cmd = {}; 1540 union nvmf_c2h_msg rsp = {}; 1541 struct spdk_nvmf_ctrlr ctrlr = {}; 1542 struct spdk_nvmf_subsystem subsystem = {}; 1543 struct spdk_nvmf_ns ns = {}; 1544 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1545 struct spdk_nvmf_subsystem_listener listener = {}; 1546 struct spdk_bdev bdev = {}; 1547 1548 struct spdk_nvmf_poll_group group = {}; 1549 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1550 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1551 1552 ns.bdev = &bdev; 1553 1554 subsystem.id = 0; 1555 subsystem.max_nsid = 1; 1556 subsys_ns[0] = &ns; 1557 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1558 1559 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1560 1561 /* Enable controller */ 1562 ctrlr.vcprop.cc.bits.en = 1; 1563 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1564 ctrlr.listener = &listener; 1565 1566 group.num_sgroups = 1; 1567 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1568 sgroups.num_ns = 1; 1569 sgroups.ns_info = &ns_info; 1570 TAILQ_INIT(&sgroups.queued); 1571 group.sgroups = &sgroups; 1572 TAILQ_INIT(&qpair.outstanding); 1573 1574 qpair.ctrlr = &ctrlr; 1575 qpair.group = &group; 1576 qpair.qid = 1; 1577 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1578 1579 cmd.nsid = 1; 1580 1581 req.qpair = &qpair; 1582 req.cmd = (union nvmf_h2c_msg *)&cmd; 1583 req.rsp = &rsp; 1584 1585 /* SUCCESS/SUCCESS */ 1586 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1587 cmd.opc = SPDK_NVME_OPC_COMPARE; 1588 1589 spdk_nvmf_request_exec(&req); 1590 CU_ASSERT(qpair.first_fused_req != NULL); 1591 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1592 1593 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1594 cmd.opc = SPDK_NVME_OPC_WRITE; 1595 1596 spdk_nvmf_request_exec(&req); 1597 CU_ASSERT(qpair.first_fused_req == NULL); 1598 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1599 1600 /* Wrong sequence */ 1601 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1602 cmd.opc = SPDK_NVME_OPC_WRITE; 1603 1604 spdk_nvmf_request_exec(&req); 1605 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1606 CU_ASSERT(qpair.first_fused_req == NULL); 1607 1608 /* Write as FUSE_FIRST (Wrong op code) */ 1609 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1610 cmd.opc = SPDK_NVME_OPC_WRITE; 1611 1612 spdk_nvmf_request_exec(&req); 1613 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1614 CU_ASSERT(qpair.first_fused_req == NULL); 1615 1616 /* Compare as FUSE_SECOND (Wrong op code) */ 1617 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1618 cmd.opc = SPDK_NVME_OPC_COMPARE; 1619 1620 spdk_nvmf_request_exec(&req); 1621 CU_ASSERT(qpair.first_fused_req != NULL); 1622 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1623 1624 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1625 cmd.opc = SPDK_NVME_OPC_COMPARE; 1626 1627 spdk_nvmf_request_exec(&req); 1628 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1629 CU_ASSERT(qpair.first_fused_req == NULL); 1630 } 1631 1632 static void 1633 test_multi_async_event_reqs(void) 1634 { 1635 struct spdk_nvmf_subsystem subsystem = {}; 1636 struct spdk_nvmf_qpair qpair = {}; 1637 struct spdk_nvmf_ctrlr ctrlr = {}; 1638 struct spdk_nvmf_request req[5] = {}; 1639 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1640 struct spdk_nvmf_ns ns = {}; 1641 union nvmf_h2c_msg cmd[5] = {}; 1642 union nvmf_c2h_msg rsp[5] = {}; 1643 1644 struct spdk_nvmf_poll_group group = {}; 1645 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1646 1647 int i; 1648 1649 ns_ptrs[0] = &ns; 1650 subsystem.ns = ns_ptrs; 1651 subsystem.max_nsid = 1; 1652 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1653 1654 ns.opts.nsid = 1; 1655 group.sgroups = &sgroups; 1656 1657 qpair.ctrlr = &ctrlr; 1658 qpair.group = &group; 1659 TAILQ_INIT(&qpair.outstanding); 1660 1661 ctrlr.subsys = &subsystem; 1662 ctrlr.vcprop.cc.bits.en = 1; 1663 1664 for (i = 0; i < 5; i++) { 1665 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1666 cmd[i].nvme_cmd.nsid = 1; 1667 cmd[i].nvme_cmd.cid = i; 1668 1669 req[i].qpair = &qpair; 1670 req[i].cmd = &cmd[i]; 1671 req[i].rsp = &rsp[i]; 1672 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1673 } 1674 1675 /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */ 1676 sgroups.io_outstanding = NVMF_MAX_ASYNC_EVENTS; 1677 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1678 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1679 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1680 } 1681 CU_ASSERT(sgroups.io_outstanding == 0); 1682 1683 /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */ 1684 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1685 CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS); 1686 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1687 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 1688 1689 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 1690 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 1691 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1692 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1693 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 1694 1695 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 1696 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1697 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1698 CU_ASSERT(ctrlr.aer_req[2] == NULL); 1699 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 1700 1701 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 1702 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 1703 } 1704 1705 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 1706 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 1707 static void 1708 test_get_ana_log_page(void) 1709 { 1710 struct spdk_nvmf_subsystem subsystem = {}; 1711 struct spdk_nvmf_ctrlr ctrlr = {}; 1712 struct spdk_nvmf_subsystem_listener listener = {}; 1713 struct spdk_nvmf_ns ns[3]; 1714 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 1715 uint64_t offset; 1716 uint32_t length; 1717 int i; 1718 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1719 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1720 struct spdk_nvme_ana_page *ana_hdr; 1721 char _ana_desc[UT_ANA_DESC_SIZE]; 1722 struct spdk_nvme_ana_group_descriptor *ana_desc; 1723 1724 subsystem.ns = ns_arr; 1725 subsystem.max_nsid = 3; 1726 ctrlr.subsys = &subsystem; 1727 ctrlr.listener = &listener; 1728 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1729 1730 for (i = 0; i < 3; i++) { 1731 ns_arr[i]->nsid = i + 1; 1732 } 1733 1734 /* create expected page */ 1735 ana_hdr = (void *)&expected_page[0]; 1736 ana_hdr->num_ana_group_desc = 3; 1737 ana_hdr->change_count = 0; 1738 1739 /* descriptor may be unaligned. So create data and then copy it to the location. */ 1740 ana_desc = (void *)_ana_desc; 1741 offset = sizeof(struct spdk_nvme_ana_page); 1742 1743 for (i = 0; i < 3; i++) { 1744 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 1745 ana_desc->ana_group_id = ns_arr[i]->nsid; 1746 ana_desc->num_of_nsid = 1; 1747 ana_desc->change_count = 0; 1748 ana_desc->ana_state = ctrlr.listener->ana_state; 1749 ana_desc->nsid[0] = ns_arr[i]->nsid; 1750 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 1751 offset += UT_ANA_DESC_SIZE; 1752 } 1753 1754 /* read entire actual log page */ 1755 offset = 0; 1756 while (offset < UT_ANA_LOG_PAGE_SIZE) { 1757 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 1758 nvmf_get_ana_log_page(&ctrlr, &actual_page[offset], offset, length); 1759 offset += length; 1760 } 1761 1762 /* compare expected page and actual page */ 1763 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1764 } 1765 1766 int main(int argc, char **argv) 1767 { 1768 CU_pSuite suite = NULL; 1769 unsigned int num_failures; 1770 1771 CU_set_error_action(CUEA_ABORT); 1772 CU_initialize_registry(); 1773 1774 suite = CU_add_suite("nvmf", NULL, NULL); 1775 CU_ADD_TEST(suite, test_get_log_page); 1776 CU_ADD_TEST(suite, test_process_fabrics_cmd); 1777 CU_ADD_TEST(suite, test_connect); 1778 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 1779 CU_ADD_TEST(suite, test_identify_ns); 1780 CU_ADD_TEST(suite, test_reservation_write_exclusive); 1781 CU_ADD_TEST(suite, test_reservation_exclusive_access); 1782 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 1783 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 1784 CU_ADD_TEST(suite, test_reservation_notification_log_page); 1785 CU_ADD_TEST(suite, test_get_dif_ctx); 1786 CU_ADD_TEST(suite, test_set_get_features); 1787 CU_ADD_TEST(suite, test_identify_ctrlr); 1788 CU_ADD_TEST(suite, test_custom_admin_cmd); 1789 CU_ADD_TEST(suite, test_fused_compare_and_write); 1790 CU_ADD_TEST(suite, test_multi_async_event_reqs); 1791 CU_ADD_TEST(suite, test_get_ana_log_page); 1792 1793 allocate_threads(1); 1794 set_thread(0); 1795 1796 CU_basic_set_mode(CU_BRM_VERBOSE); 1797 CU_basic_run_tests(); 1798 num_failures = CU_get_number_of_failures(); 1799 CU_cleanup_registry(); 1800 1801 free_threads(); 1802 1803 return num_failures; 1804 } 1805