1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "spdk_internal/thread.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT(nvmf) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 }; 49 50 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 51 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 52 53 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 54 struct spdk_nvmf_subsystem *, 55 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 56 NULL); 57 58 DEFINE_STUB(spdk_nvmf_poll_group_create, 59 struct spdk_nvmf_poll_group *, 60 (struct spdk_nvmf_tgt *tgt), 61 NULL); 62 63 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 64 const char *, 65 (const struct spdk_nvmf_subsystem *subsystem), 66 subsystem_default_sn); 67 68 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 69 const char *, 70 (const struct spdk_nvmf_subsystem *subsystem), 71 subsystem_default_mn); 72 73 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 74 bool, 75 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 76 true); 77 78 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 79 int, 80 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 81 0); 82 83 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 84 struct spdk_nvmf_ctrlr *, 85 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 86 NULL); 87 88 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 89 bool, 90 (struct spdk_nvmf_ctrlr *ctrlr), 91 false); 92 93 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 94 bool, 95 (struct spdk_nvmf_ctrlr *ctrlr), 96 false); 97 98 DEFINE_STUB_V(nvmf_get_discovery_log_page, 99 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 100 uint32_t iovcnt, uint64_t offset, uint32_t length)); 101 102 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 103 int, 104 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 105 0); 106 107 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 108 bool, 109 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 110 true); 111 112 DEFINE_STUB(nvmf_subsystem_find_listener, 113 struct spdk_nvmf_subsystem_listener *, 114 (struct spdk_nvmf_subsystem *subsystem, 115 const struct spdk_nvme_transport_id *trid), 116 (void *)0x1); 117 118 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 119 int, 120 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 121 struct spdk_nvmf_request *req), 122 0); 123 124 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 125 int, 126 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 127 struct spdk_nvmf_request *req), 128 0); 129 130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 131 int, 132 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 struct spdk_nvmf_request *req), 134 0); 135 136 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 137 int, 138 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 139 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 140 0); 141 142 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 143 int, 144 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 145 struct spdk_nvmf_request *req), 146 0); 147 148 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 149 int, 150 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 151 struct spdk_nvmf_request *req), 152 0); 153 154 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 155 int, 156 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 157 struct spdk_nvmf_request *req), 158 0); 159 160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 161 int, 162 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 163 struct spdk_nvmf_request *req), 164 0); 165 166 DEFINE_STUB(nvmf_transport_req_complete, 167 int, 168 (struct spdk_nvmf_request *req), 169 0); 170 171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 172 173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 174 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 175 struct spdk_dif_ctx *dif_ctx), 176 true); 177 178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 179 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 180 181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 183 184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 185 struct spdk_nvmf_ctrlr *ctrlr)); 186 187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 188 int, 189 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 190 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 191 0); 192 193 DEFINE_STUB(nvmf_transport_req_free, 194 int, 195 (struct spdk_nvmf_request *req), 196 0); 197 198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 199 int, 200 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 201 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 202 0); 203 204 int 205 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 206 { 207 return 0; 208 } 209 210 void 211 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 212 bool dif_insert_or_strip) 213 { 214 uint64_t num_blocks; 215 216 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 217 num_blocks = ns->bdev->blockcnt; 218 nsdata->nsze = num_blocks; 219 nsdata->ncap = num_blocks; 220 nsdata->nuse = num_blocks; 221 nsdata->nlbaf = 0; 222 nsdata->flbas.format = 0; 223 nsdata->lbaf[0].lbads = spdk_u32log2(512); 224 } 225 226 struct spdk_nvmf_ns * 227 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 228 { 229 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 230 return subsystem->ns[0]; 231 } 232 233 struct spdk_nvmf_ns * 234 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 235 struct spdk_nvmf_ns *prev_ns) 236 { 237 uint32_t nsid; 238 239 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 240 nsid = prev_ns->nsid; 241 242 if (nsid >= subsystem->max_nsid) { 243 return NULL; 244 } 245 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 246 if (subsystem->ns[nsid - 1]) { 247 return subsystem->ns[nsid - 1]; 248 } 249 } 250 return NULL; 251 } 252 253 static void 254 test_get_log_page(void) 255 { 256 struct spdk_nvmf_subsystem subsystem = {}; 257 struct spdk_nvmf_request req = {}; 258 struct spdk_nvmf_qpair qpair = {}; 259 struct spdk_nvmf_ctrlr ctrlr = {}; 260 union nvmf_h2c_msg cmd = {}; 261 union nvmf_c2h_msg rsp = {}; 262 char data[4096]; 263 264 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 265 266 ctrlr.subsys = &subsystem; 267 268 qpair.ctrlr = &ctrlr; 269 270 req.qpair = &qpair; 271 req.cmd = &cmd; 272 req.rsp = &rsp; 273 req.data = &data; 274 req.length = sizeof(data); 275 276 /* Get Log Page - all valid */ 277 memset(&cmd, 0, sizeof(cmd)); 278 memset(&rsp, 0, sizeof(rsp)); 279 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 280 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 281 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 282 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 283 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 284 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 285 286 /* Get Log Page with invalid log ID */ 287 memset(&cmd, 0, sizeof(cmd)); 288 memset(&rsp, 0, sizeof(rsp)); 289 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 290 cmd.nvme_cmd.cdw10 = 0; 291 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 292 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 293 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 294 295 /* Get Log Page with invalid offset (not dword aligned) */ 296 memset(&cmd, 0, sizeof(cmd)); 297 memset(&rsp, 0, sizeof(rsp)); 298 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 299 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 300 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 301 cmd.nvme_cmd.cdw12 = 2; 302 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 303 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 304 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 305 306 /* Get Log Page without data buffer */ 307 memset(&cmd, 0, sizeof(cmd)); 308 memset(&rsp, 0, sizeof(rsp)); 309 req.data = NULL; 310 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 311 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 312 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1); 313 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 314 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 315 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 316 req.data = data; 317 } 318 319 static void 320 test_process_fabrics_cmd(void) 321 { 322 struct spdk_nvmf_request req = {}; 323 int ret; 324 struct spdk_nvmf_qpair req_qpair = {}; 325 union nvmf_h2c_msg req_cmd = {}; 326 union nvmf_c2h_msg req_rsp = {}; 327 328 req.qpair = &req_qpair; 329 req.cmd = &req_cmd; 330 req.rsp = &req_rsp; 331 req.qpair->ctrlr = NULL; 332 333 /* No ctrlr and invalid command check */ 334 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 335 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 336 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 337 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 338 } 339 340 static bool 341 nvme_status_success(const struct spdk_nvme_status *status) 342 { 343 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 344 } 345 346 static void 347 test_connect(void) 348 { 349 struct spdk_nvmf_fabric_connect_data connect_data; 350 struct spdk_nvmf_poll_group group; 351 struct spdk_nvmf_subsystem_poll_group *sgroups; 352 struct spdk_nvmf_transport transport; 353 struct spdk_nvmf_transport_ops tops = {}; 354 struct spdk_nvmf_subsystem subsystem; 355 struct spdk_nvmf_request req; 356 struct spdk_nvmf_qpair admin_qpair; 357 struct spdk_nvmf_qpair qpair; 358 struct spdk_nvmf_qpair qpair2; 359 struct spdk_nvmf_ctrlr ctrlr; 360 struct spdk_nvmf_tgt tgt; 361 union nvmf_h2c_msg cmd; 362 union nvmf_c2h_msg rsp; 363 const uint8_t hostid[16] = { 364 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 365 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 366 }; 367 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 368 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 369 int rc; 370 371 memset(&group, 0, sizeof(group)); 372 group.thread = spdk_get_thread(); 373 374 memset(&ctrlr, 0, sizeof(ctrlr)); 375 ctrlr.subsys = &subsystem; 376 ctrlr.qpair_mask = spdk_bit_array_create(3); 377 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 378 ctrlr.vcprop.cc.bits.en = 1; 379 ctrlr.vcprop.cc.bits.iosqes = 6; 380 ctrlr.vcprop.cc.bits.iocqes = 4; 381 382 memset(&admin_qpair, 0, sizeof(admin_qpair)); 383 admin_qpair.group = &group; 384 385 memset(&tgt, 0, sizeof(tgt)); 386 memset(&transport, 0, sizeof(transport)); 387 transport.ops = &tops; 388 transport.opts.max_aq_depth = 32; 389 transport.opts.max_queue_depth = 64; 390 transport.opts.max_qpairs_per_ctrlr = 3; 391 transport.tgt = &tgt; 392 393 memset(&qpair, 0, sizeof(qpair)); 394 qpair.transport = &transport; 395 qpair.group = &group; 396 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 397 TAILQ_INIT(&qpair.outstanding); 398 399 memset(&connect_data, 0, sizeof(connect_data)); 400 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 401 connect_data.cntlid = 0xFFFF; 402 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 403 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 404 405 memset(&subsystem, 0, sizeof(subsystem)); 406 subsystem.thread = spdk_get_thread(); 407 subsystem.id = 1; 408 TAILQ_INIT(&subsystem.ctrlrs); 409 subsystem.tgt = &tgt; 410 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 411 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 412 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 413 414 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 415 group.sgroups = sgroups; 416 417 memset(&cmd, 0, sizeof(cmd)); 418 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 419 cmd.connect_cmd.cid = 1; 420 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 421 cmd.connect_cmd.recfmt = 0; 422 cmd.connect_cmd.qid = 0; 423 cmd.connect_cmd.sqsize = 31; 424 cmd.connect_cmd.cattr = 0; 425 cmd.connect_cmd.kato = 120000; 426 427 memset(&req, 0, sizeof(req)); 428 req.qpair = &qpair; 429 req.length = sizeof(connect_data); 430 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 431 req.data = &connect_data; 432 req.cmd = &cmd; 433 req.rsp = &rsp; 434 435 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 436 MOCK_SET(spdk_nvmf_poll_group_create, &group); 437 438 /* Valid admin connect command */ 439 memset(&rsp, 0, sizeof(rsp)); 440 sgroups[subsystem.id].io_outstanding++; 441 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 442 rc = nvmf_ctrlr_cmd_connect(&req); 443 poll_threads(); 444 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 445 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 446 CU_ASSERT(qpair.ctrlr != NULL); 447 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 448 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 449 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 450 free(qpair.ctrlr); 451 qpair.ctrlr = NULL; 452 453 /* Valid admin connect command with kato = 0 */ 454 cmd.connect_cmd.kato = 0; 455 memset(&rsp, 0, sizeof(rsp)); 456 sgroups[subsystem.id].io_outstanding++; 457 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 458 rc = nvmf_ctrlr_cmd_connect(&req); 459 poll_threads(); 460 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 461 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 462 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 463 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 464 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 465 free(qpair.ctrlr); 466 qpair.ctrlr = NULL; 467 cmd.connect_cmd.kato = 120000; 468 469 /* Invalid data length */ 470 memset(&rsp, 0, sizeof(rsp)); 471 req.length = sizeof(connect_data) - 1; 472 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 473 rc = nvmf_ctrlr_cmd_connect(&req); 474 poll_threads(); 475 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 476 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 477 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 478 CU_ASSERT(qpair.ctrlr == NULL); 479 req.length = sizeof(connect_data); 480 481 /* Invalid recfmt */ 482 memset(&rsp, 0, sizeof(rsp)); 483 cmd.connect_cmd.recfmt = 1234; 484 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 485 rc = nvmf_ctrlr_cmd_connect(&req); 486 poll_threads(); 487 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 488 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 489 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 490 CU_ASSERT(qpair.ctrlr == NULL); 491 cmd.connect_cmd.recfmt = 0; 492 493 /* Subsystem not found */ 494 memset(&rsp, 0, sizeof(rsp)); 495 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 496 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 497 rc = nvmf_ctrlr_cmd_connect(&req); 498 poll_threads(); 499 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 500 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 501 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 502 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 503 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 504 CU_ASSERT(qpair.ctrlr == NULL); 505 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 506 507 /* Unterminated hostnqn */ 508 memset(&rsp, 0, sizeof(rsp)); 509 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 510 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 511 rc = nvmf_ctrlr_cmd_connect(&req); 512 poll_threads(); 513 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 514 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 515 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 516 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 517 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 518 CU_ASSERT(qpair.ctrlr == NULL); 519 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 520 521 /* Host not allowed */ 522 memset(&rsp, 0, sizeof(rsp)); 523 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 524 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 525 rc = nvmf_ctrlr_cmd_connect(&req); 526 poll_threads(); 527 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 528 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 529 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 530 CU_ASSERT(qpair.ctrlr == NULL); 531 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 532 533 /* Invalid sqsize == 0 */ 534 memset(&rsp, 0, sizeof(rsp)); 535 cmd.connect_cmd.sqsize = 0; 536 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 537 rc = nvmf_ctrlr_cmd_connect(&req); 538 poll_threads(); 539 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 540 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 541 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 542 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 543 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 544 CU_ASSERT(qpair.ctrlr == NULL); 545 cmd.connect_cmd.sqsize = 31; 546 547 /* Invalid admin sqsize > max_aq_depth */ 548 memset(&rsp, 0, sizeof(rsp)); 549 cmd.connect_cmd.sqsize = 32; 550 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 551 rc = nvmf_ctrlr_cmd_connect(&req); 552 poll_threads(); 553 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 554 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 555 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 556 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 557 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 558 CU_ASSERT(qpair.ctrlr == NULL); 559 cmd.connect_cmd.sqsize = 31; 560 561 /* Invalid I/O sqsize > max_queue_depth */ 562 memset(&rsp, 0, sizeof(rsp)); 563 cmd.connect_cmd.qid = 1; 564 cmd.connect_cmd.sqsize = 64; 565 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 566 rc = nvmf_ctrlr_cmd_connect(&req); 567 poll_threads(); 568 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 569 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 570 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 571 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 572 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 573 CU_ASSERT(qpair.ctrlr == NULL); 574 cmd.connect_cmd.qid = 0; 575 cmd.connect_cmd.sqsize = 31; 576 577 /* Invalid cntlid for admin queue */ 578 memset(&rsp, 0, sizeof(rsp)); 579 connect_data.cntlid = 0x1234; 580 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 581 rc = nvmf_ctrlr_cmd_connect(&req); 582 poll_threads(); 583 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 584 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 585 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 586 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 587 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 588 CU_ASSERT(qpair.ctrlr == NULL); 589 connect_data.cntlid = 0xFFFF; 590 591 ctrlr.admin_qpair = &admin_qpair; 592 ctrlr.subsys = &subsystem; 593 594 /* Valid I/O queue connect command */ 595 memset(&rsp, 0, sizeof(rsp)); 596 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 597 cmd.connect_cmd.qid = 1; 598 cmd.connect_cmd.sqsize = 63; 599 sgroups[subsystem.id].io_outstanding++; 600 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 601 rc = nvmf_ctrlr_cmd_connect(&req); 602 poll_threads(); 603 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 604 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 605 CU_ASSERT(qpair.ctrlr == &ctrlr); 606 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 607 qpair.ctrlr = NULL; 608 cmd.connect_cmd.sqsize = 31; 609 610 /* Non-existent controller */ 611 memset(&rsp, 0, sizeof(rsp)); 612 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 613 sgroups[subsystem.id].io_outstanding++; 614 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 615 rc = nvmf_ctrlr_cmd_connect(&req); 616 poll_threads(); 617 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 618 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 619 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 620 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 621 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 622 CU_ASSERT(qpair.ctrlr == NULL); 623 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 624 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 625 626 /* I/O connect to discovery controller */ 627 memset(&rsp, 0, sizeof(rsp)); 628 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 629 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 630 sgroups[subsystem.id].io_outstanding++; 631 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 632 rc = nvmf_ctrlr_cmd_connect(&req); 633 poll_threads(); 634 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 635 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 636 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 637 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 638 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 639 CU_ASSERT(qpair.ctrlr == NULL); 640 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 641 642 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 643 cmd.connect_cmd.qid = 0; 644 cmd.connect_cmd.kato = 120000; 645 memset(&rsp, 0, sizeof(rsp)); 646 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 647 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 648 sgroups[subsystem.id].io_outstanding++; 649 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 650 rc = nvmf_ctrlr_cmd_connect(&req); 651 poll_threads(); 652 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 653 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 654 CU_ASSERT(qpair.ctrlr != NULL); 655 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 656 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 657 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 658 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 659 free(qpair.ctrlr); 660 qpair.ctrlr = NULL; 661 662 /* I/O connect to discovery controller with keep-alive-timeout == 0. 663 * Then, a fixed timeout value is set to keep-alive-timeout. 664 */ 665 cmd.connect_cmd.kato = 0; 666 memset(&rsp, 0, sizeof(rsp)); 667 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 668 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 669 sgroups[subsystem.id].io_outstanding++; 670 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 671 rc = nvmf_ctrlr_cmd_connect(&req); 672 poll_threads(); 673 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 674 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 675 CU_ASSERT(qpair.ctrlr != NULL); 676 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 677 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 678 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 679 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 680 free(qpair.ctrlr); 681 qpair.ctrlr = NULL; 682 cmd.connect_cmd.qid = 1; 683 cmd.connect_cmd.kato = 120000; 684 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 685 686 /* I/O connect to disabled controller */ 687 memset(&rsp, 0, sizeof(rsp)); 688 ctrlr.vcprop.cc.bits.en = 0; 689 sgroups[subsystem.id].io_outstanding++; 690 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 691 rc = nvmf_ctrlr_cmd_connect(&req); 692 poll_threads(); 693 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 694 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 695 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 696 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 697 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 698 CU_ASSERT(qpair.ctrlr == NULL); 699 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 700 ctrlr.vcprop.cc.bits.en = 1; 701 702 /* I/O connect with invalid IOSQES */ 703 memset(&rsp, 0, sizeof(rsp)); 704 ctrlr.vcprop.cc.bits.iosqes = 3; 705 sgroups[subsystem.id].io_outstanding++; 706 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 707 rc = nvmf_ctrlr_cmd_connect(&req); 708 poll_threads(); 709 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 710 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 711 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 712 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 713 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 714 CU_ASSERT(qpair.ctrlr == NULL); 715 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 716 ctrlr.vcprop.cc.bits.iosqes = 6; 717 718 /* I/O connect with invalid IOCQES */ 719 memset(&rsp, 0, sizeof(rsp)); 720 ctrlr.vcprop.cc.bits.iocqes = 3; 721 sgroups[subsystem.id].io_outstanding++; 722 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 723 rc = nvmf_ctrlr_cmd_connect(&req); 724 poll_threads(); 725 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 726 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 727 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 728 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 729 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 730 CU_ASSERT(qpair.ctrlr == NULL); 731 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 732 ctrlr.vcprop.cc.bits.iocqes = 4; 733 734 /* I/O connect with too many existing qpairs */ 735 memset(&rsp, 0, sizeof(rsp)); 736 spdk_bit_array_set(ctrlr.qpair_mask, 0); 737 spdk_bit_array_set(ctrlr.qpair_mask, 1); 738 spdk_bit_array_set(ctrlr.qpair_mask, 2); 739 sgroups[subsystem.id].io_outstanding++; 740 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 741 rc = nvmf_ctrlr_cmd_connect(&req); 742 poll_threads(); 743 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 744 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 745 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 746 CU_ASSERT(qpair.ctrlr == NULL); 747 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 748 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 749 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 750 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 751 752 /* I/O connect with duplicate queue ID */ 753 memset(&rsp, 0, sizeof(rsp)); 754 memset(&qpair2, 0, sizeof(qpair2)); 755 qpair2.group = &group; 756 qpair2.qid = 1; 757 spdk_bit_array_set(ctrlr.qpair_mask, 1); 758 cmd.connect_cmd.qid = 1; 759 sgroups[subsystem.id].io_outstanding++; 760 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 761 rc = nvmf_ctrlr_cmd_connect(&req); 762 poll_threads(); 763 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 764 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 765 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 766 CU_ASSERT(qpair.ctrlr == NULL); 767 CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0); 768 769 /* Clean up globals */ 770 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 771 MOCK_CLEAR(spdk_nvmf_poll_group_create); 772 773 spdk_bit_array_free(&ctrlr.qpair_mask); 774 free(sgroups); 775 } 776 777 static void 778 test_get_ns_id_desc_list(void) 779 { 780 struct spdk_nvmf_subsystem subsystem; 781 struct spdk_nvmf_qpair qpair; 782 struct spdk_nvmf_ctrlr ctrlr; 783 struct spdk_nvmf_request req; 784 struct spdk_nvmf_ns *ns_ptrs[1]; 785 struct spdk_nvmf_ns ns; 786 union nvmf_h2c_msg cmd; 787 union nvmf_c2h_msg rsp; 788 struct spdk_bdev bdev; 789 uint8_t buf[4096]; 790 791 memset(&subsystem, 0, sizeof(subsystem)); 792 ns_ptrs[0] = &ns; 793 subsystem.ns = ns_ptrs; 794 subsystem.max_nsid = 1; 795 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 796 797 memset(&ns, 0, sizeof(ns)); 798 ns.opts.nsid = 1; 799 ns.bdev = &bdev; 800 801 memset(&qpair, 0, sizeof(qpair)); 802 qpair.ctrlr = &ctrlr; 803 804 memset(&ctrlr, 0, sizeof(ctrlr)); 805 ctrlr.subsys = &subsystem; 806 ctrlr.vcprop.cc.bits.en = 1; 807 808 memset(&req, 0, sizeof(req)); 809 req.qpair = &qpair; 810 req.cmd = &cmd; 811 req.rsp = &rsp; 812 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 813 req.data = buf; 814 req.length = sizeof(buf); 815 816 memset(&cmd, 0, sizeof(cmd)); 817 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 818 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 819 820 /* Invalid NSID */ 821 cmd.nvme_cmd.nsid = 0; 822 memset(&rsp, 0, sizeof(rsp)); 823 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 824 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 825 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 826 827 /* Valid NSID, but ns has no IDs defined */ 828 cmd.nvme_cmd.nsid = 1; 829 memset(&rsp, 0, sizeof(rsp)); 830 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 831 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 832 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 833 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 834 835 /* Valid NSID, only EUI64 defined */ 836 ns.opts.eui64[0] = 0x11; 837 ns.opts.eui64[7] = 0xFF; 838 memset(&rsp, 0, sizeof(rsp)); 839 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 840 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 841 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 842 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 843 CU_ASSERT(buf[1] == 8); 844 CU_ASSERT(buf[4] == 0x11); 845 CU_ASSERT(buf[11] == 0xFF); 846 CU_ASSERT(buf[13] == 0); 847 848 /* Valid NSID, only NGUID defined */ 849 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 850 ns.opts.nguid[0] = 0x22; 851 ns.opts.nguid[15] = 0xEE; 852 memset(&rsp, 0, sizeof(rsp)); 853 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 854 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 855 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 856 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 857 CU_ASSERT(buf[1] == 16); 858 CU_ASSERT(buf[4] == 0x22); 859 CU_ASSERT(buf[19] == 0xEE); 860 CU_ASSERT(buf[21] == 0); 861 862 /* Valid NSID, both EUI64 and NGUID defined */ 863 ns.opts.eui64[0] = 0x11; 864 ns.opts.eui64[7] = 0xFF; 865 ns.opts.nguid[0] = 0x22; 866 ns.opts.nguid[15] = 0xEE; 867 memset(&rsp, 0, sizeof(rsp)); 868 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 869 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 870 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 871 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 872 CU_ASSERT(buf[1] == 8); 873 CU_ASSERT(buf[4] == 0x11); 874 CU_ASSERT(buf[11] == 0xFF); 875 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 876 CU_ASSERT(buf[13] == 16); 877 CU_ASSERT(buf[16] == 0x22); 878 CU_ASSERT(buf[31] == 0xEE); 879 CU_ASSERT(buf[33] == 0); 880 881 /* Valid NSID, EUI64, NGUID, and UUID defined */ 882 ns.opts.eui64[0] = 0x11; 883 ns.opts.eui64[7] = 0xFF; 884 ns.opts.nguid[0] = 0x22; 885 ns.opts.nguid[15] = 0xEE; 886 ns.opts.uuid.u.raw[0] = 0x33; 887 ns.opts.uuid.u.raw[15] = 0xDD; 888 memset(&rsp, 0, sizeof(rsp)); 889 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 890 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 891 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 892 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 893 CU_ASSERT(buf[1] == 8); 894 CU_ASSERT(buf[4] == 0x11); 895 CU_ASSERT(buf[11] == 0xFF); 896 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 897 CU_ASSERT(buf[13] == 16); 898 CU_ASSERT(buf[16] == 0x22); 899 CU_ASSERT(buf[31] == 0xEE); 900 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 901 CU_ASSERT(buf[33] == 16); 902 CU_ASSERT(buf[36] == 0x33); 903 CU_ASSERT(buf[51] == 0xDD); 904 CU_ASSERT(buf[53] == 0); 905 } 906 907 static void 908 test_identify_ns(void) 909 { 910 struct spdk_nvmf_subsystem subsystem = {}; 911 struct spdk_nvmf_transport transport = {}; 912 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 913 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 914 struct spdk_nvme_cmd cmd = {}; 915 struct spdk_nvme_cpl rsp = {}; 916 struct spdk_nvme_ns_data nsdata = {}; 917 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 918 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 919 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 920 921 subsystem.ns = ns_arr; 922 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 923 924 /* Invalid NSID 0 */ 925 cmd.nsid = 0; 926 memset(&nsdata, 0, sizeof(nsdata)); 927 memset(&rsp, 0, sizeof(rsp)); 928 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 929 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 930 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 931 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 932 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 933 934 /* Valid NSID 1 */ 935 cmd.nsid = 1; 936 memset(&nsdata, 0, sizeof(nsdata)); 937 memset(&rsp, 0, sizeof(rsp)); 938 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 939 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 940 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 941 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 942 CU_ASSERT(nsdata.nsze == 1234); 943 944 /* Valid but inactive NSID 2 */ 945 cmd.nsid = 2; 946 memset(&nsdata, 0, sizeof(nsdata)); 947 memset(&rsp, 0, sizeof(rsp)); 948 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 949 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 950 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 951 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 952 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 953 954 /* Valid NSID 3 */ 955 cmd.nsid = 3; 956 memset(&nsdata, 0, sizeof(nsdata)); 957 memset(&rsp, 0, sizeof(rsp)); 958 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 959 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 960 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 961 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 962 CU_ASSERT(nsdata.nsze == 5678); 963 964 /* Invalid NSID 4 */ 965 cmd.nsid = 4; 966 memset(&nsdata, 0, sizeof(nsdata)); 967 memset(&rsp, 0, sizeof(rsp)); 968 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 969 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 970 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 971 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 972 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 973 974 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 975 cmd.nsid = 0xFFFFFFFF; 976 memset(&nsdata, 0, sizeof(nsdata)); 977 memset(&rsp, 0, sizeof(rsp)); 978 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 979 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 980 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 981 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 982 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 983 } 984 985 static void 986 test_set_get_features(void) 987 { 988 struct spdk_nvmf_subsystem subsystem = {}; 989 struct spdk_nvmf_qpair admin_qpair = {}; 990 struct spdk_nvmf_subsystem_listener listener = {}; 991 struct spdk_nvmf_ctrlr ctrlr = { 992 .subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener 993 }; 994 union nvmf_h2c_msg cmd = {}; 995 union nvmf_c2h_msg rsp = {}; 996 struct spdk_nvmf_ns ns[3]; 997 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};; 998 struct spdk_nvmf_request req; 999 int rc; 1000 1001 subsystem.ns = ns_arr; 1002 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1003 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1004 admin_qpair.ctrlr = &ctrlr; 1005 req.qpair = &admin_qpair; 1006 cmd.nvme_cmd.nsid = 1; 1007 req.cmd = &cmd; 1008 req.rsp = &rsp; 1009 1010 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1011 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1012 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1013 ns[0].ptpl_file = "testcfg"; 1014 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1015 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1016 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1017 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1018 CU_ASSERT(ns[0].ptpl_activated == true); 1019 1020 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1021 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1022 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1023 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1024 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1025 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1026 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1027 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1028 1029 1030 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1031 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1032 cmd.nvme_cmd.cdw11 = 0x42; 1033 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1034 1035 rc = nvmf_ctrlr_get_features(&req); 1036 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1037 1038 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1039 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1040 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1041 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1042 1043 rc = nvmf_ctrlr_get_features(&req); 1044 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1045 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1046 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1047 1048 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1049 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1050 cmd.nvme_cmd.cdw11 = 0x42; 1051 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1052 1053 rc = nvmf_ctrlr_set_features(&req); 1054 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1055 1056 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1057 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1058 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1059 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1060 1061 rc = nvmf_ctrlr_set_features(&req); 1062 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1063 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1064 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1065 1066 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1067 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1068 cmd.nvme_cmd.cdw11 = 0x42; 1069 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1070 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1071 1072 rc = nvmf_ctrlr_set_features(&req); 1073 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1074 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1075 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1076 1077 1078 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1079 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1080 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1081 1082 rc = nvmf_ctrlr_get_features(&req); 1083 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1084 1085 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1086 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1087 cmd.nvme_cmd.cdw11 = 0x42; 1088 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1089 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1090 1091 rc = nvmf_ctrlr_set_features(&req); 1092 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1093 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1094 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1095 1096 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1097 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1098 cmd.nvme_cmd.cdw11 = 0x42; 1099 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1100 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1101 1102 rc = nvmf_ctrlr_set_features(&req); 1103 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1104 } 1105 1106 /* 1107 * Reservation Unit Test Configuration 1108 * -------- -------- -------- 1109 * | Host A | | Host B | | Host C | 1110 * -------- -------- -------- 1111 * / \ | | 1112 * -------- -------- ------- ------- 1113 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1114 * -------- -------- ------- ------- 1115 * \ \ / / 1116 * \ \ / / 1117 * \ \ / / 1118 * -------------------------------------- 1119 * | NAMESPACE 1 | 1120 * -------------------------------------- 1121 */ 1122 1123 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1124 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1125 1126 static void 1127 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1128 { 1129 /* Host A has two controllers */ 1130 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1131 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1132 1133 /* Host B has 1 controller */ 1134 spdk_uuid_generate(&g_ctrlr_B.hostid); 1135 1136 /* Host C has 1 controller */ 1137 spdk_uuid_generate(&g_ctrlr_C.hostid); 1138 1139 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1140 g_ns_info.rtype = rtype; 1141 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1142 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1143 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1144 } 1145 1146 static void 1147 test_reservation_write_exclusive(void) 1148 { 1149 struct spdk_nvmf_request req = {}; 1150 union nvmf_h2c_msg cmd = {}; 1151 union nvmf_c2h_msg rsp = {}; 1152 int rc; 1153 1154 req.cmd = &cmd; 1155 req.rsp = &rsp; 1156 1157 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1158 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1159 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1160 1161 /* Test Case: Issue a Read command from Host A and Host B */ 1162 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1163 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1164 SPDK_CU_ASSERT_FATAL(rc == 0); 1165 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1166 SPDK_CU_ASSERT_FATAL(rc == 0); 1167 1168 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1169 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1170 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1171 SPDK_CU_ASSERT_FATAL(rc == 0); 1172 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1173 SPDK_CU_ASSERT_FATAL(rc < 0); 1174 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1175 1176 /* Test Case: Issue a Write command from Host C */ 1177 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1178 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1179 SPDK_CU_ASSERT_FATAL(rc < 0); 1180 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1181 1182 /* Test Case: Issue a Read command from Host B */ 1183 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1184 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1185 SPDK_CU_ASSERT_FATAL(rc == 0); 1186 1187 /* Unregister Host C */ 1188 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1189 1190 /* Test Case: Read and Write commands from non-registrant Host C */ 1191 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1192 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1193 SPDK_CU_ASSERT_FATAL(rc < 0); 1194 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1195 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1196 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1197 SPDK_CU_ASSERT_FATAL(rc == 0); 1198 } 1199 1200 static void 1201 test_reservation_exclusive_access(void) 1202 { 1203 struct spdk_nvmf_request req = {}; 1204 union nvmf_h2c_msg cmd = {}; 1205 union nvmf_c2h_msg rsp = {}; 1206 int rc; 1207 1208 req.cmd = &cmd; 1209 req.rsp = &rsp; 1210 1211 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1212 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1213 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1214 1215 /* Test Case: Issue a Read command from Host B */ 1216 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1217 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1218 SPDK_CU_ASSERT_FATAL(rc < 0); 1219 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1220 1221 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1222 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1223 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1224 SPDK_CU_ASSERT_FATAL(rc == 0); 1225 } 1226 1227 static void 1228 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1229 { 1230 struct spdk_nvmf_request req = {}; 1231 union nvmf_h2c_msg cmd = {}; 1232 union nvmf_c2h_msg rsp = {}; 1233 int rc; 1234 1235 req.cmd = &cmd; 1236 req.rsp = &rsp; 1237 1238 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1239 ut_reservation_init(rtype); 1240 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1241 1242 /* Test Case: Issue a Read command from Host A and Host C */ 1243 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1244 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1245 SPDK_CU_ASSERT_FATAL(rc == 0); 1246 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1247 SPDK_CU_ASSERT_FATAL(rc == 0); 1248 1249 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1250 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1251 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1252 SPDK_CU_ASSERT_FATAL(rc == 0); 1253 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1254 SPDK_CU_ASSERT_FATAL(rc == 0); 1255 1256 /* Unregister Host C */ 1257 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1258 1259 /* Test Case: Read and Write commands from non-registrant Host C */ 1260 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1261 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1262 SPDK_CU_ASSERT_FATAL(rc == 0); 1263 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1264 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1265 SPDK_CU_ASSERT_FATAL(rc < 0); 1266 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1267 } 1268 1269 static void 1270 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1271 { 1272 _test_reservation_write_exclusive_regs_only_and_all_regs( 1273 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1274 _test_reservation_write_exclusive_regs_only_and_all_regs( 1275 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1276 } 1277 1278 static void 1279 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1280 { 1281 struct spdk_nvmf_request req = {}; 1282 union nvmf_h2c_msg cmd = {}; 1283 union nvmf_c2h_msg rsp = {}; 1284 int rc; 1285 1286 req.cmd = &cmd; 1287 req.rsp = &rsp; 1288 1289 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1290 ut_reservation_init(rtype); 1291 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1292 1293 /* Test Case: Issue a Write command from Host B */ 1294 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1295 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1296 SPDK_CU_ASSERT_FATAL(rc == 0); 1297 1298 /* Unregister Host B */ 1299 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1300 1301 /* Test Case: Issue a Read command from Host B */ 1302 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1303 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1304 SPDK_CU_ASSERT_FATAL(rc < 0); 1305 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1306 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1307 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1308 SPDK_CU_ASSERT_FATAL(rc < 0); 1309 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1310 } 1311 1312 static void 1313 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1314 { 1315 _test_reservation_exclusive_access_regs_only_and_all_regs( 1316 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1317 _test_reservation_exclusive_access_regs_only_and_all_regs( 1318 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1319 } 1320 1321 static void 1322 test_reservation_notification_log_page(void) 1323 { 1324 struct spdk_nvmf_ctrlr ctrlr; 1325 struct spdk_nvmf_qpair qpair; 1326 struct spdk_nvmf_ns ns; 1327 struct spdk_nvmf_request req; 1328 union nvmf_h2c_msg cmd = {}; 1329 union nvmf_c2h_msg rsp = {}; 1330 union spdk_nvme_async_event_completion event = {}; 1331 struct spdk_nvme_reservation_notification_log logs[3]; 1332 1333 memset(&ctrlr, 0, sizeof(ctrlr)); 1334 ctrlr.thread = spdk_get_thread(); 1335 TAILQ_INIT(&ctrlr.log_head); 1336 ns.nsid = 1; 1337 1338 /* Test Case: Mask all the reservation notifications */ 1339 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1340 SPDK_NVME_RESERVATION_RELEASED_MASK | 1341 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1342 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1343 SPDK_NVME_REGISTRATION_PREEMPTED); 1344 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1345 SPDK_NVME_RESERVATION_RELEASED); 1346 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1347 SPDK_NVME_RESERVATION_PREEMPTED); 1348 poll_threads(); 1349 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1350 1351 /* Test Case: Unmask all the reservation notifications, 1352 * 3 log pages are generated, and AER was triggered. 1353 */ 1354 ns.mask = 0; 1355 ctrlr.num_avail_log_pages = 0; 1356 req.cmd = &cmd; 1357 req.rsp = &rsp; 1358 ctrlr.aer_req[0] = &req; 1359 ctrlr.nr_aer_reqs = 1; 1360 req.qpair = &qpair; 1361 TAILQ_INIT(&qpair.outstanding); 1362 qpair.ctrlr = NULL; 1363 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1364 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1365 1366 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1367 SPDK_NVME_REGISTRATION_PREEMPTED); 1368 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1369 SPDK_NVME_RESERVATION_RELEASED); 1370 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1371 SPDK_NVME_RESERVATION_PREEMPTED); 1372 poll_threads(); 1373 event.raw = rsp.nvme_cpl.cdw0; 1374 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1375 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1376 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1377 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1378 1379 /* Test Case: Get Log Page to clear the log pages */ 1380 nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs)); 1381 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1382 } 1383 1384 static void 1385 test_get_dif_ctx(void) 1386 { 1387 struct spdk_nvmf_subsystem subsystem = {}; 1388 struct spdk_nvmf_request req = {}; 1389 struct spdk_nvmf_qpair qpair = {}; 1390 struct spdk_nvmf_ctrlr ctrlr = {}; 1391 struct spdk_nvmf_ns ns = {}; 1392 struct spdk_nvmf_ns *_ns = NULL; 1393 struct spdk_bdev bdev = {}; 1394 union nvmf_h2c_msg cmd = {}; 1395 struct spdk_dif_ctx dif_ctx = {}; 1396 bool ret; 1397 1398 ctrlr.subsys = &subsystem; 1399 1400 qpair.ctrlr = &ctrlr; 1401 1402 req.qpair = &qpair; 1403 req.cmd = &cmd; 1404 1405 ns.bdev = &bdev; 1406 1407 ctrlr.dif_insert_or_strip = false; 1408 1409 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1410 CU_ASSERT(ret == false); 1411 1412 ctrlr.dif_insert_or_strip = true; 1413 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1414 1415 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1416 CU_ASSERT(ret == false); 1417 1418 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1419 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1420 1421 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1422 CU_ASSERT(ret == false); 1423 1424 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1425 1426 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1427 CU_ASSERT(ret == false); 1428 1429 qpair.qid = 1; 1430 1431 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1432 CU_ASSERT(ret == false); 1433 1434 cmd.nvme_cmd.nsid = 1; 1435 1436 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1437 CU_ASSERT(ret == false); 1438 1439 subsystem.max_nsid = 1; 1440 subsystem.ns = &_ns; 1441 subsystem.ns[0] = &ns; 1442 1443 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1444 CU_ASSERT(ret == false); 1445 1446 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1447 1448 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1449 CU_ASSERT(ret == true); 1450 } 1451 1452 static void 1453 test_identify_ctrlr(void) 1454 { 1455 struct spdk_nvmf_subsystem subsystem = { 1456 .subtype = SPDK_NVMF_SUBTYPE_NVME 1457 }; 1458 struct spdk_nvmf_transport_ops tops = {}; 1459 struct spdk_nvmf_transport transport = { 1460 .ops = &tops, 1461 .opts = { 1462 .in_capsule_data_size = 4096, 1463 }, 1464 }; 1465 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1466 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1467 struct spdk_nvme_ctrlr_data cdata = {}; 1468 uint32_t expected_ioccsz; 1469 1470 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1471 1472 /* Check ioccsz, TCP transport */ 1473 tops.type = SPDK_NVME_TRANSPORT_TCP; 1474 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1475 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1476 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1477 1478 /* Check ioccsz, RDMA transport */ 1479 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1480 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1481 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1482 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1483 1484 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1485 tops.type = SPDK_NVME_TRANSPORT_TCP; 1486 ctrlr.dif_insert_or_strip = true; 1487 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1488 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1489 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1490 } 1491 1492 static int 1493 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1494 { 1495 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1496 1497 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1498 }; 1499 1500 static void 1501 test_custom_admin_cmd(void) 1502 { 1503 struct spdk_nvmf_subsystem subsystem; 1504 struct spdk_nvmf_qpair qpair; 1505 struct spdk_nvmf_ctrlr ctrlr; 1506 struct spdk_nvmf_request req; 1507 struct spdk_nvmf_ns *ns_ptrs[1]; 1508 struct spdk_nvmf_ns ns; 1509 union nvmf_h2c_msg cmd; 1510 union nvmf_c2h_msg rsp; 1511 struct spdk_bdev bdev; 1512 uint8_t buf[4096]; 1513 int rc; 1514 1515 memset(&subsystem, 0, sizeof(subsystem)); 1516 ns_ptrs[0] = &ns; 1517 subsystem.ns = ns_ptrs; 1518 subsystem.max_nsid = 1; 1519 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1520 1521 memset(&ns, 0, sizeof(ns)); 1522 ns.opts.nsid = 1; 1523 ns.bdev = &bdev; 1524 1525 memset(&qpair, 0, sizeof(qpair)); 1526 qpair.ctrlr = &ctrlr; 1527 1528 memset(&ctrlr, 0, sizeof(ctrlr)); 1529 ctrlr.subsys = &subsystem; 1530 ctrlr.vcprop.cc.bits.en = 1; 1531 1532 memset(&req, 0, sizeof(req)); 1533 req.qpair = &qpair; 1534 req.cmd = &cmd; 1535 req.rsp = &rsp; 1536 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1537 req.data = buf; 1538 req.length = sizeof(buf); 1539 1540 memset(&cmd, 0, sizeof(cmd)); 1541 cmd.nvme_cmd.opc = 0xc1; 1542 cmd.nvme_cmd.nsid = 0; 1543 memset(&rsp, 0, sizeof(rsp)); 1544 1545 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1546 1547 /* Ensure that our hdlr is being called */ 1548 rc = nvmf_ctrlr_process_admin_cmd(&req); 1549 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1550 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1551 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1552 } 1553 1554 static void 1555 test_fused_compare_and_write(void) 1556 { 1557 struct spdk_nvmf_request req = {}; 1558 struct spdk_nvmf_qpair qpair = {}; 1559 struct spdk_nvme_cmd cmd = {}; 1560 union nvmf_c2h_msg rsp = {}; 1561 struct spdk_nvmf_ctrlr ctrlr = {}; 1562 struct spdk_nvmf_subsystem subsystem = {}; 1563 struct spdk_nvmf_ns ns = {}; 1564 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1565 struct spdk_nvmf_subsystem_listener listener = {}; 1566 struct spdk_bdev bdev = {}; 1567 1568 struct spdk_nvmf_poll_group group = {}; 1569 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1570 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1571 1572 ns.bdev = &bdev; 1573 1574 subsystem.id = 0; 1575 subsystem.max_nsid = 1; 1576 subsys_ns[0] = &ns; 1577 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1578 1579 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1580 1581 /* Enable controller */ 1582 ctrlr.vcprop.cc.bits.en = 1; 1583 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1584 ctrlr.listener = &listener; 1585 1586 group.num_sgroups = 1; 1587 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1588 sgroups.num_ns = 1; 1589 sgroups.ns_info = &ns_info; 1590 TAILQ_INIT(&sgroups.queued); 1591 group.sgroups = &sgroups; 1592 TAILQ_INIT(&qpair.outstanding); 1593 1594 qpair.ctrlr = &ctrlr; 1595 qpair.group = &group; 1596 qpair.qid = 1; 1597 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1598 1599 cmd.nsid = 1; 1600 1601 req.qpair = &qpair; 1602 req.cmd = (union nvmf_h2c_msg *)&cmd; 1603 req.rsp = &rsp; 1604 1605 /* SUCCESS/SUCCESS */ 1606 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1607 cmd.opc = SPDK_NVME_OPC_COMPARE; 1608 1609 spdk_nvmf_request_exec(&req); 1610 CU_ASSERT(qpair.first_fused_req != NULL); 1611 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1612 1613 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1614 cmd.opc = SPDK_NVME_OPC_WRITE; 1615 1616 spdk_nvmf_request_exec(&req); 1617 CU_ASSERT(qpair.first_fused_req == NULL); 1618 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1619 1620 /* Wrong sequence */ 1621 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1622 cmd.opc = SPDK_NVME_OPC_WRITE; 1623 1624 spdk_nvmf_request_exec(&req); 1625 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1626 CU_ASSERT(qpair.first_fused_req == NULL); 1627 1628 /* Write as FUSE_FIRST (Wrong op code) */ 1629 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1630 cmd.opc = SPDK_NVME_OPC_WRITE; 1631 1632 spdk_nvmf_request_exec(&req); 1633 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1634 CU_ASSERT(qpair.first_fused_req == NULL); 1635 1636 /* Compare as FUSE_SECOND (Wrong op code) */ 1637 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1638 cmd.opc = SPDK_NVME_OPC_COMPARE; 1639 1640 spdk_nvmf_request_exec(&req); 1641 CU_ASSERT(qpair.first_fused_req != NULL); 1642 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1643 1644 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1645 cmd.opc = SPDK_NVME_OPC_COMPARE; 1646 1647 spdk_nvmf_request_exec(&req); 1648 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1649 CU_ASSERT(qpair.first_fused_req == NULL); 1650 } 1651 1652 static void 1653 test_multi_async_event_reqs(void) 1654 { 1655 struct spdk_nvmf_subsystem subsystem = {}; 1656 struct spdk_nvmf_qpair qpair = {}; 1657 struct spdk_nvmf_ctrlr ctrlr = {}; 1658 struct spdk_nvmf_request req[5] = {}; 1659 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1660 struct spdk_nvmf_ns ns = {}; 1661 union nvmf_h2c_msg cmd[5] = {}; 1662 union nvmf_c2h_msg rsp[5] = {}; 1663 1664 struct spdk_nvmf_poll_group group = {}; 1665 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1666 1667 int i; 1668 1669 ns_ptrs[0] = &ns; 1670 subsystem.ns = ns_ptrs; 1671 subsystem.max_nsid = 1; 1672 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1673 1674 ns.opts.nsid = 1; 1675 group.sgroups = &sgroups; 1676 1677 qpair.ctrlr = &ctrlr; 1678 qpair.group = &group; 1679 TAILQ_INIT(&qpair.outstanding); 1680 1681 ctrlr.subsys = &subsystem; 1682 ctrlr.vcprop.cc.bits.en = 1; 1683 1684 for (i = 0; i < 5; i++) { 1685 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1686 cmd[i].nvme_cmd.nsid = 1; 1687 cmd[i].nvme_cmd.cid = i; 1688 1689 req[i].qpair = &qpair; 1690 req[i].cmd = &cmd[i]; 1691 req[i].rsp = &rsp[i]; 1692 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1693 } 1694 1695 /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */ 1696 sgroups.io_outstanding = NVMF_MAX_ASYNC_EVENTS; 1697 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1698 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1699 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1700 } 1701 CU_ASSERT(sgroups.io_outstanding == 0); 1702 1703 /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */ 1704 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1705 CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS); 1706 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1707 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 1708 1709 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 1710 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 1711 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1712 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1713 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 1714 1715 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 1716 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1717 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1718 CU_ASSERT(ctrlr.aer_req[2] == NULL); 1719 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 1720 1721 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 1722 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 1723 } 1724 1725 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 1726 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 1727 static void 1728 test_get_ana_log_page(void) 1729 { 1730 struct spdk_nvmf_subsystem subsystem = {}; 1731 struct spdk_nvmf_ctrlr ctrlr = {}; 1732 struct spdk_nvmf_subsystem_listener listener = {}; 1733 struct spdk_nvmf_ns ns[3]; 1734 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 1735 uint64_t offset; 1736 uint32_t length; 1737 int i; 1738 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1739 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1740 struct spdk_nvme_ana_page *ana_hdr; 1741 char _ana_desc[UT_ANA_DESC_SIZE]; 1742 struct spdk_nvme_ana_group_descriptor *ana_desc; 1743 1744 subsystem.ns = ns_arr; 1745 subsystem.max_nsid = 3; 1746 ctrlr.subsys = &subsystem; 1747 ctrlr.listener = &listener; 1748 listener.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1749 1750 for (i = 0; i < 3; i++) { 1751 ns_arr[i]->nsid = i + 1; 1752 } 1753 1754 /* create expected page */ 1755 ana_hdr = (void *)&expected_page[0]; 1756 ana_hdr->num_ana_group_desc = 3; 1757 ana_hdr->change_count = 0; 1758 1759 /* descriptor may be unaligned. So create data and then copy it to the location. */ 1760 ana_desc = (void *)_ana_desc; 1761 offset = sizeof(struct spdk_nvme_ana_page); 1762 1763 for (i = 0; i < 3; i++) { 1764 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 1765 ana_desc->ana_group_id = ns_arr[i]->nsid; 1766 ana_desc->num_of_nsid = 1; 1767 ana_desc->change_count = 0; 1768 ana_desc->ana_state = ctrlr.listener->ana_state; 1769 ana_desc->nsid[0] = ns_arr[i]->nsid; 1770 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 1771 offset += UT_ANA_DESC_SIZE; 1772 } 1773 1774 /* read entire actual log page */ 1775 offset = 0; 1776 while (offset < UT_ANA_LOG_PAGE_SIZE) { 1777 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 1778 nvmf_get_ana_log_page(&ctrlr, &actual_page[offset], offset, length); 1779 offset += length; 1780 } 1781 1782 /* compare expected page and actual page */ 1783 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1784 } 1785 1786 int main(int argc, char **argv) 1787 { 1788 CU_pSuite suite = NULL; 1789 unsigned int num_failures; 1790 1791 CU_set_error_action(CUEA_ABORT); 1792 CU_initialize_registry(); 1793 1794 suite = CU_add_suite("nvmf", NULL, NULL); 1795 CU_ADD_TEST(suite, test_get_log_page); 1796 CU_ADD_TEST(suite, test_process_fabrics_cmd); 1797 CU_ADD_TEST(suite, test_connect); 1798 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 1799 CU_ADD_TEST(suite, test_identify_ns); 1800 CU_ADD_TEST(suite, test_reservation_write_exclusive); 1801 CU_ADD_TEST(suite, test_reservation_exclusive_access); 1802 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 1803 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 1804 CU_ADD_TEST(suite, test_reservation_notification_log_page); 1805 CU_ADD_TEST(suite, test_get_dif_ctx); 1806 CU_ADD_TEST(suite, test_set_get_features); 1807 CU_ADD_TEST(suite, test_identify_ctrlr); 1808 CU_ADD_TEST(suite, test_custom_admin_cmd); 1809 CU_ADD_TEST(suite, test_fused_compare_and_write); 1810 CU_ADD_TEST(suite, test_multi_async_event_reqs); 1811 CU_ADD_TEST(suite, test_get_ana_log_page); 1812 1813 allocate_threads(1); 1814 set_thread(0); 1815 1816 CU_basic_set_mode(CU_BRM_VERBOSE); 1817 CU_basic_run_tests(); 1818 num_failures = CU_get_number_of_failures(); 1819 CU_cleanup_registry(); 1820 1821 free_threads(); 1822 1823 return num_failures; 1824 } 1825