1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk_cunit.h" 38 #include "spdk_internal/mock.h" 39 #include "thread/thread_internal.h" 40 41 #include "common/lib/ut_multithread.c" 42 #include "nvmf/ctrlr.c" 43 44 SPDK_LOG_REGISTER_COMPONENT(nvmf) 45 46 struct spdk_bdev { 47 int ut_mock; 48 uint64_t blockcnt; 49 uint32_t blocklen; 50 }; 51 52 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 53 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 54 55 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL; 56 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *) 57 0x8877665544332211UL; 58 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL; 59 60 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 61 struct spdk_nvmf_subsystem *, 62 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 63 NULL); 64 65 DEFINE_STUB(spdk_nvmf_poll_group_create, 66 struct spdk_nvmf_poll_group *, 67 (struct spdk_nvmf_tgt *tgt), 68 NULL); 69 70 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 71 const char *, 72 (const struct spdk_nvmf_subsystem *subsystem), 73 subsystem_default_sn); 74 75 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 76 const char *, 77 (const struct spdk_nvmf_subsystem *subsystem), 78 subsystem_default_mn); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 81 bool, 82 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 83 true); 84 85 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 86 int, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 88 0); 89 90 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 91 struct spdk_nvmf_ctrlr *, 92 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 93 NULL); 94 95 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 96 bool, 97 (struct spdk_nvmf_ctrlr *ctrlr), 98 false); 99 100 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 101 bool, 102 (struct spdk_nvmf_ctrlr *ctrlr), 103 false); 104 105 DEFINE_STUB_V(nvmf_get_discovery_log_page, 106 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 107 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 108 109 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 110 int, 111 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 112 0); 113 114 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 115 bool, 116 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 117 true); 118 119 DEFINE_STUB(nvmf_subsystem_find_listener, 120 struct spdk_nvmf_subsystem_listener *, 121 (struct spdk_nvmf_subsystem *subsystem, 122 const struct spdk_nvme_transport_id *trid), 123 (void *)0x1); 124 125 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 126 int, 127 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 128 struct spdk_nvmf_request *req), 129 0); 130 131 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 132 int, 133 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 134 struct spdk_nvmf_request *req), 135 0); 136 137 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 138 int, 139 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 140 struct spdk_nvmf_request *req), 141 0); 142 143 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 144 int, 145 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 146 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 147 0); 148 149 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 150 int, 151 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 152 struct spdk_nvmf_request *req), 153 0); 154 155 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 156 int, 157 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 158 struct spdk_nvmf_request *req), 159 0); 160 161 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 162 int, 163 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 164 struct spdk_nvmf_request *req), 165 0); 166 167 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 168 int, 169 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 170 struct spdk_nvmf_request *req), 171 0); 172 173 DEFINE_STUB(nvmf_transport_req_complete, 174 int, 175 (struct spdk_nvmf_request *req), 176 0); 177 178 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 179 180 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 181 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 182 struct spdk_dif_ctx *dif_ctx), 183 true); 184 185 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 186 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 187 188 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 189 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 190 191 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 192 struct spdk_nvmf_ctrlr *ctrlr)); 193 194 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 195 int, 196 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 197 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 198 0); 199 200 DEFINE_STUB(nvmf_transport_req_free, 201 int, 202 (struct spdk_nvmf_request *req), 203 0); 204 205 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 206 int, 207 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 208 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 209 0); 210 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 211 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 212 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 213 214 int 215 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 216 { 217 return 0; 218 } 219 220 void 221 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 222 bool dif_insert_or_strip) 223 { 224 uint64_t num_blocks; 225 226 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 227 num_blocks = ns->bdev->blockcnt; 228 nsdata->nsze = num_blocks; 229 nsdata->ncap = num_blocks; 230 nsdata->nuse = num_blocks; 231 nsdata->nlbaf = 0; 232 nsdata->flbas.format = 0; 233 nsdata->lbaf[0].lbads = spdk_u32log2(512); 234 } 235 236 struct spdk_nvmf_ns * 237 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 238 { 239 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 240 return subsystem->ns[0]; 241 } 242 243 struct spdk_nvmf_ns * 244 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 245 struct spdk_nvmf_ns *prev_ns) 246 { 247 uint32_t nsid; 248 249 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 250 nsid = prev_ns->nsid; 251 252 if (nsid >= subsystem->max_nsid) { 253 return NULL; 254 } 255 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 256 if (subsystem->ns[nsid - 1]) { 257 return subsystem->ns[nsid - 1]; 258 } 259 } 260 return NULL; 261 } 262 263 bool 264 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 265 { 266 return true; 267 } 268 269 int 270 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 271 struct spdk_bdev_desc *desc, 272 struct spdk_io_channel *ch, 273 struct spdk_nvmf_request *req) 274 { 275 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 276 uint64_t start_lba; 277 uint64_t num_blocks; 278 279 start_lba = from_le64(&req->cmd->nvme_cmd.cdw10); 280 num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1; 281 282 if ((start_lba + num_blocks) > bdev->blockcnt) { 283 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 284 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 285 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 286 } 287 288 if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) { 289 req->zcopy_bdev_io = zcopy_start_bdev_io_write; 290 } else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) { 291 req->zcopy_bdev_io = zcopy_start_bdev_io_read; 292 } else { 293 req->zcopy_bdev_io = zcopy_start_bdev_io_fail; 294 } 295 296 297 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 298 } 299 300 void 301 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 302 { 303 req->zcopy_bdev_io = NULL; 304 spdk_nvmf_request_complete(req); 305 } 306 307 static void 308 test_get_log_page(void) 309 { 310 struct spdk_nvmf_subsystem subsystem = {}; 311 struct spdk_nvmf_request req = {}; 312 struct spdk_nvmf_qpair qpair = {}; 313 struct spdk_nvmf_ctrlr ctrlr = {}; 314 union nvmf_h2c_msg cmd = {}; 315 union nvmf_c2h_msg rsp = {}; 316 char data[4096]; 317 318 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 319 320 ctrlr.subsys = &subsystem; 321 322 qpair.ctrlr = &ctrlr; 323 324 req.qpair = &qpair; 325 req.cmd = &cmd; 326 req.rsp = &rsp; 327 req.data = &data; 328 req.length = sizeof(data); 329 330 /* Get Log Page - all valid */ 331 memset(&cmd, 0, sizeof(cmd)); 332 memset(&rsp, 0, sizeof(rsp)); 333 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 334 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 335 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 336 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 337 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 338 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 339 340 /* Get Log Page with invalid log ID */ 341 memset(&cmd, 0, sizeof(cmd)); 342 memset(&rsp, 0, sizeof(rsp)); 343 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 344 cmd.nvme_cmd.cdw10 = 0; 345 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 346 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 347 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 348 349 /* Get Log Page with invalid offset (not dword aligned) */ 350 memset(&cmd, 0, sizeof(cmd)); 351 memset(&rsp, 0, sizeof(rsp)); 352 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 353 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 354 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 355 cmd.nvme_cmd.cdw12 = 2; 356 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 357 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 358 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 359 360 /* Get Log Page without data buffer */ 361 memset(&cmd, 0, sizeof(cmd)); 362 memset(&rsp, 0, sizeof(rsp)); 363 req.data = NULL; 364 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 365 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 366 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 367 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 368 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 369 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 370 req.data = data; 371 } 372 373 static void 374 test_process_fabrics_cmd(void) 375 { 376 struct spdk_nvmf_request req = {}; 377 int ret; 378 struct spdk_nvmf_qpair req_qpair = {}; 379 union nvmf_h2c_msg req_cmd = {}; 380 union nvmf_c2h_msg req_rsp = {}; 381 382 req.qpair = &req_qpair; 383 req.cmd = &req_cmd; 384 req.rsp = &req_rsp; 385 req.qpair->ctrlr = NULL; 386 387 /* No ctrlr and invalid command check */ 388 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 389 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 390 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 391 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 392 } 393 394 static bool 395 nvme_status_success(const struct spdk_nvme_status *status) 396 { 397 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 398 } 399 400 static void 401 test_connect(void) 402 { 403 struct spdk_nvmf_fabric_connect_data connect_data; 404 struct spdk_nvmf_poll_group group; 405 struct spdk_nvmf_subsystem_poll_group *sgroups; 406 struct spdk_nvmf_transport transport; 407 struct spdk_nvmf_transport_ops tops = {}; 408 struct spdk_nvmf_subsystem subsystem; 409 struct spdk_nvmf_request req; 410 struct spdk_nvmf_qpair admin_qpair; 411 struct spdk_nvmf_qpair qpair; 412 struct spdk_nvmf_qpair qpair2; 413 struct spdk_nvmf_ctrlr ctrlr; 414 struct spdk_nvmf_tgt tgt; 415 union nvmf_h2c_msg cmd; 416 union nvmf_c2h_msg rsp; 417 const uint8_t hostid[16] = { 418 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 419 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 420 }; 421 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 422 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 423 int rc; 424 425 memset(&group, 0, sizeof(group)); 426 group.thread = spdk_get_thread(); 427 428 memset(&ctrlr, 0, sizeof(ctrlr)); 429 ctrlr.subsys = &subsystem; 430 ctrlr.qpair_mask = spdk_bit_array_create(3); 431 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 432 ctrlr.vcprop.cc.bits.en = 1; 433 ctrlr.vcprop.cc.bits.iosqes = 6; 434 ctrlr.vcprop.cc.bits.iocqes = 4; 435 436 memset(&admin_qpair, 0, sizeof(admin_qpair)); 437 admin_qpair.group = &group; 438 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 439 440 memset(&tgt, 0, sizeof(tgt)); 441 memset(&transport, 0, sizeof(transport)); 442 transport.ops = &tops; 443 transport.opts.max_aq_depth = 32; 444 transport.opts.max_queue_depth = 64; 445 transport.opts.max_qpairs_per_ctrlr = 3; 446 transport.tgt = &tgt; 447 448 memset(&qpair, 0, sizeof(qpair)); 449 qpair.transport = &transport; 450 qpair.group = &group; 451 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 452 TAILQ_INIT(&qpair.outstanding); 453 454 memset(&connect_data, 0, sizeof(connect_data)); 455 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 456 connect_data.cntlid = 0xFFFF; 457 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 458 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 459 460 memset(&subsystem, 0, sizeof(subsystem)); 461 subsystem.thread = spdk_get_thread(); 462 subsystem.id = 1; 463 TAILQ_INIT(&subsystem.ctrlrs); 464 subsystem.tgt = &tgt; 465 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 466 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 467 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 468 469 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 470 group.sgroups = sgroups; 471 472 memset(&cmd, 0, sizeof(cmd)); 473 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 474 cmd.connect_cmd.cid = 1; 475 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 476 cmd.connect_cmd.recfmt = 0; 477 cmd.connect_cmd.qid = 0; 478 cmd.connect_cmd.sqsize = 31; 479 cmd.connect_cmd.cattr = 0; 480 cmd.connect_cmd.kato = 120000; 481 482 memset(&req, 0, sizeof(req)); 483 req.qpair = &qpair; 484 req.length = sizeof(connect_data); 485 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 486 req.data = &connect_data; 487 req.cmd = &cmd; 488 req.rsp = &rsp; 489 490 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 491 MOCK_SET(spdk_nvmf_poll_group_create, &group); 492 493 /* Valid admin connect command */ 494 memset(&rsp, 0, sizeof(rsp)); 495 sgroups[subsystem.id].mgmt_io_outstanding++; 496 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 497 rc = nvmf_ctrlr_cmd_connect(&req); 498 poll_threads(); 499 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 500 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 501 CU_ASSERT(qpair.ctrlr != NULL); 502 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 503 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 504 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 505 free(qpair.ctrlr); 506 qpair.ctrlr = NULL; 507 508 /* Valid admin connect command with kato = 0 */ 509 cmd.connect_cmd.kato = 0; 510 memset(&rsp, 0, sizeof(rsp)); 511 sgroups[subsystem.id].mgmt_io_outstanding++; 512 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 513 rc = nvmf_ctrlr_cmd_connect(&req); 514 poll_threads(); 515 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 516 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 517 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 518 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 519 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 520 free(qpair.ctrlr); 521 qpair.ctrlr = NULL; 522 cmd.connect_cmd.kato = 120000; 523 524 /* Invalid data length */ 525 memset(&rsp, 0, sizeof(rsp)); 526 req.length = sizeof(connect_data) - 1; 527 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 528 rc = nvmf_ctrlr_cmd_connect(&req); 529 poll_threads(); 530 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 531 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 532 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 533 CU_ASSERT(qpair.ctrlr == NULL); 534 req.length = sizeof(connect_data); 535 536 /* Invalid recfmt */ 537 memset(&rsp, 0, sizeof(rsp)); 538 cmd.connect_cmd.recfmt = 1234; 539 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 540 rc = nvmf_ctrlr_cmd_connect(&req); 541 poll_threads(); 542 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 543 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 544 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 545 CU_ASSERT(qpair.ctrlr == NULL); 546 cmd.connect_cmd.recfmt = 0; 547 548 /* Subsystem not found */ 549 memset(&rsp, 0, sizeof(rsp)); 550 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 551 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 552 rc = nvmf_ctrlr_cmd_connect(&req); 553 poll_threads(); 554 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 555 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 556 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 557 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 558 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 559 CU_ASSERT(qpair.ctrlr == NULL); 560 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 561 562 /* Unterminated hostnqn */ 563 memset(&rsp, 0, sizeof(rsp)); 564 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 565 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 566 rc = nvmf_ctrlr_cmd_connect(&req); 567 poll_threads(); 568 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 569 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 570 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 571 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 572 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 573 CU_ASSERT(qpair.ctrlr == NULL); 574 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 575 576 /* Host not allowed */ 577 memset(&rsp, 0, sizeof(rsp)); 578 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 579 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 580 rc = nvmf_ctrlr_cmd_connect(&req); 581 poll_threads(); 582 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 583 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 584 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 585 CU_ASSERT(qpair.ctrlr == NULL); 586 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 587 588 /* Invalid sqsize == 0 */ 589 memset(&rsp, 0, sizeof(rsp)); 590 cmd.connect_cmd.sqsize = 0; 591 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 592 rc = nvmf_ctrlr_cmd_connect(&req); 593 poll_threads(); 594 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 595 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 596 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 597 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 598 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 599 CU_ASSERT(qpair.ctrlr == NULL); 600 cmd.connect_cmd.sqsize = 31; 601 602 /* Invalid admin sqsize > max_aq_depth */ 603 memset(&rsp, 0, sizeof(rsp)); 604 cmd.connect_cmd.sqsize = 32; 605 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 606 rc = nvmf_ctrlr_cmd_connect(&req); 607 poll_threads(); 608 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 609 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 610 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 611 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 612 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 613 CU_ASSERT(qpair.ctrlr == NULL); 614 cmd.connect_cmd.sqsize = 31; 615 616 /* Invalid I/O sqsize > max_queue_depth */ 617 memset(&rsp, 0, sizeof(rsp)); 618 cmd.connect_cmd.qid = 1; 619 cmd.connect_cmd.sqsize = 64; 620 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 621 rc = nvmf_ctrlr_cmd_connect(&req); 622 poll_threads(); 623 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 624 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 625 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 626 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 627 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 628 CU_ASSERT(qpair.ctrlr == NULL); 629 cmd.connect_cmd.qid = 0; 630 cmd.connect_cmd.sqsize = 31; 631 632 /* Invalid cntlid for admin queue */ 633 memset(&rsp, 0, sizeof(rsp)); 634 connect_data.cntlid = 0x1234; 635 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 636 rc = nvmf_ctrlr_cmd_connect(&req); 637 poll_threads(); 638 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 639 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 640 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 641 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 642 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 643 CU_ASSERT(qpair.ctrlr == NULL); 644 connect_data.cntlid = 0xFFFF; 645 646 ctrlr.admin_qpair = &admin_qpair; 647 ctrlr.subsys = &subsystem; 648 649 /* Valid I/O queue connect command */ 650 memset(&rsp, 0, sizeof(rsp)); 651 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 652 cmd.connect_cmd.qid = 1; 653 cmd.connect_cmd.sqsize = 63; 654 sgroups[subsystem.id].mgmt_io_outstanding++; 655 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 656 rc = nvmf_ctrlr_cmd_connect(&req); 657 poll_threads(); 658 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 659 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 660 CU_ASSERT(qpair.ctrlr == &ctrlr); 661 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 662 qpair.ctrlr = NULL; 663 cmd.connect_cmd.sqsize = 31; 664 665 /* Non-existent controller */ 666 memset(&rsp, 0, sizeof(rsp)); 667 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 668 sgroups[subsystem.id].mgmt_io_outstanding++; 669 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 670 rc = nvmf_ctrlr_cmd_connect(&req); 671 poll_threads(); 672 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 673 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 674 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 675 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 676 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 677 CU_ASSERT(qpair.ctrlr == NULL); 678 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 679 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 680 681 /* I/O connect to discovery controller */ 682 memset(&rsp, 0, sizeof(rsp)); 683 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 684 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 685 sgroups[subsystem.id].mgmt_io_outstanding++; 686 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 687 rc = nvmf_ctrlr_cmd_connect(&req); 688 poll_threads(); 689 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 690 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 691 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 692 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 693 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 694 CU_ASSERT(qpair.ctrlr == NULL); 695 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 696 697 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 698 cmd.connect_cmd.qid = 0; 699 cmd.connect_cmd.kato = 120000; 700 memset(&rsp, 0, sizeof(rsp)); 701 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 702 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 703 sgroups[subsystem.id].mgmt_io_outstanding++; 704 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 705 rc = nvmf_ctrlr_cmd_connect(&req); 706 poll_threads(); 707 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 708 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 709 CU_ASSERT(qpair.ctrlr != NULL); 710 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 711 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 712 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 713 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 714 free(qpair.ctrlr); 715 qpair.ctrlr = NULL; 716 717 /* I/O connect to discovery controller with keep-alive-timeout == 0. 718 * Then, a fixed timeout value is set to keep-alive-timeout. 719 */ 720 cmd.connect_cmd.kato = 0; 721 memset(&rsp, 0, sizeof(rsp)); 722 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 723 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 724 sgroups[subsystem.id].mgmt_io_outstanding++; 725 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 726 rc = nvmf_ctrlr_cmd_connect(&req); 727 poll_threads(); 728 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 729 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 730 CU_ASSERT(qpair.ctrlr != NULL); 731 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 732 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 733 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 734 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 735 free(qpair.ctrlr); 736 qpair.ctrlr = NULL; 737 cmd.connect_cmd.qid = 1; 738 cmd.connect_cmd.kato = 120000; 739 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 740 741 /* I/O connect to disabled controller */ 742 memset(&rsp, 0, sizeof(rsp)); 743 ctrlr.vcprop.cc.bits.en = 0; 744 sgroups[subsystem.id].mgmt_io_outstanding++; 745 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 746 rc = nvmf_ctrlr_cmd_connect(&req); 747 poll_threads(); 748 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 749 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 750 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 751 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 752 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 753 CU_ASSERT(qpair.ctrlr == NULL); 754 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 755 ctrlr.vcprop.cc.bits.en = 1; 756 757 /* I/O connect with invalid IOSQES */ 758 memset(&rsp, 0, sizeof(rsp)); 759 ctrlr.vcprop.cc.bits.iosqes = 3; 760 sgroups[subsystem.id].mgmt_io_outstanding++; 761 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 762 rc = nvmf_ctrlr_cmd_connect(&req); 763 poll_threads(); 764 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 765 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 766 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 767 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 768 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 769 CU_ASSERT(qpair.ctrlr == NULL); 770 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 771 ctrlr.vcprop.cc.bits.iosqes = 6; 772 773 /* I/O connect with invalid IOCQES */ 774 memset(&rsp, 0, sizeof(rsp)); 775 ctrlr.vcprop.cc.bits.iocqes = 3; 776 sgroups[subsystem.id].mgmt_io_outstanding++; 777 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 778 rc = nvmf_ctrlr_cmd_connect(&req); 779 poll_threads(); 780 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 781 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 782 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 783 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 784 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 785 CU_ASSERT(qpair.ctrlr == NULL); 786 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 787 ctrlr.vcprop.cc.bits.iocqes = 4; 788 789 /* I/O connect with too many existing qpairs */ 790 memset(&rsp, 0, sizeof(rsp)); 791 spdk_bit_array_set(ctrlr.qpair_mask, 0); 792 spdk_bit_array_set(ctrlr.qpair_mask, 1); 793 spdk_bit_array_set(ctrlr.qpair_mask, 2); 794 sgroups[subsystem.id].mgmt_io_outstanding++; 795 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 796 rc = nvmf_ctrlr_cmd_connect(&req); 797 poll_threads(); 798 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 799 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 800 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 801 CU_ASSERT(qpair.ctrlr == NULL); 802 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 803 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 804 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 805 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 806 807 /* I/O connect with duplicate queue ID */ 808 memset(&rsp, 0, sizeof(rsp)); 809 memset(&qpair2, 0, sizeof(qpair2)); 810 qpair2.group = &group; 811 qpair2.qid = 1; 812 spdk_bit_array_set(ctrlr.qpair_mask, 1); 813 cmd.connect_cmd.qid = 1; 814 sgroups[subsystem.id].mgmt_io_outstanding++; 815 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 816 rc = nvmf_ctrlr_cmd_connect(&req); 817 poll_threads(); 818 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 819 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 820 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 821 CU_ASSERT(qpair.ctrlr == NULL); 822 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 823 824 /* I/O connect when admin qpair is being destroyed */ 825 admin_qpair.group = NULL; 826 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 827 memset(&rsp, 0, sizeof(rsp)); 828 sgroups[subsystem.id].mgmt_io_outstanding++; 829 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 830 rc = nvmf_ctrlr_cmd_connect(&req); 831 poll_threads(); 832 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 833 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 834 CU_ASSERT(qpair.ctrlr == NULL); 835 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 836 admin_qpair.group = &group; 837 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 838 839 /* Clean up globals */ 840 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 841 MOCK_CLEAR(spdk_nvmf_poll_group_create); 842 843 spdk_bit_array_free(&ctrlr.qpair_mask); 844 free(sgroups); 845 } 846 847 static void 848 test_get_ns_id_desc_list(void) 849 { 850 struct spdk_nvmf_subsystem subsystem; 851 struct spdk_nvmf_qpair qpair; 852 struct spdk_nvmf_ctrlr ctrlr; 853 struct spdk_nvmf_request req; 854 struct spdk_nvmf_ns *ns_ptrs[1]; 855 struct spdk_nvmf_ns ns; 856 union nvmf_h2c_msg cmd; 857 union nvmf_c2h_msg rsp; 858 struct spdk_bdev bdev; 859 uint8_t buf[4096]; 860 861 memset(&subsystem, 0, sizeof(subsystem)); 862 ns_ptrs[0] = &ns; 863 subsystem.ns = ns_ptrs; 864 subsystem.max_nsid = 1; 865 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 866 867 memset(&ns, 0, sizeof(ns)); 868 ns.opts.nsid = 1; 869 ns.bdev = &bdev; 870 871 memset(&qpair, 0, sizeof(qpair)); 872 qpair.ctrlr = &ctrlr; 873 874 memset(&ctrlr, 0, sizeof(ctrlr)); 875 ctrlr.subsys = &subsystem; 876 ctrlr.vcprop.cc.bits.en = 1; 877 ctrlr.thread = spdk_get_thread(); 878 879 memset(&req, 0, sizeof(req)); 880 req.qpair = &qpair; 881 req.cmd = &cmd; 882 req.rsp = &rsp; 883 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 884 req.data = buf; 885 req.length = sizeof(buf); 886 887 memset(&cmd, 0, sizeof(cmd)); 888 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 889 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 890 891 /* Invalid NSID */ 892 cmd.nvme_cmd.nsid = 0; 893 memset(&rsp, 0, sizeof(rsp)); 894 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 895 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 896 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 897 898 /* Valid NSID, but ns has no IDs defined */ 899 cmd.nvme_cmd.nsid = 1; 900 memset(&rsp, 0, sizeof(rsp)); 901 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 902 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 903 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 904 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 905 906 /* Valid NSID, only EUI64 defined */ 907 ns.opts.eui64[0] = 0x11; 908 ns.opts.eui64[7] = 0xFF; 909 memset(&rsp, 0, sizeof(rsp)); 910 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 911 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 912 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 913 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 914 CU_ASSERT(buf[1] == 8); 915 CU_ASSERT(buf[4] == 0x11); 916 CU_ASSERT(buf[11] == 0xFF); 917 CU_ASSERT(buf[13] == 0); 918 919 /* Valid NSID, only NGUID defined */ 920 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 921 ns.opts.nguid[0] = 0x22; 922 ns.opts.nguid[15] = 0xEE; 923 memset(&rsp, 0, sizeof(rsp)); 924 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 925 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 926 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 927 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 928 CU_ASSERT(buf[1] == 16); 929 CU_ASSERT(buf[4] == 0x22); 930 CU_ASSERT(buf[19] == 0xEE); 931 CU_ASSERT(buf[21] == 0); 932 933 /* Valid NSID, both EUI64 and NGUID defined */ 934 ns.opts.eui64[0] = 0x11; 935 ns.opts.eui64[7] = 0xFF; 936 ns.opts.nguid[0] = 0x22; 937 ns.opts.nguid[15] = 0xEE; 938 memset(&rsp, 0, sizeof(rsp)); 939 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 940 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 941 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 942 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 943 CU_ASSERT(buf[1] == 8); 944 CU_ASSERT(buf[4] == 0x11); 945 CU_ASSERT(buf[11] == 0xFF); 946 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 947 CU_ASSERT(buf[13] == 16); 948 CU_ASSERT(buf[16] == 0x22); 949 CU_ASSERT(buf[31] == 0xEE); 950 CU_ASSERT(buf[33] == 0); 951 952 /* Valid NSID, EUI64, NGUID, and UUID defined */ 953 ns.opts.eui64[0] = 0x11; 954 ns.opts.eui64[7] = 0xFF; 955 ns.opts.nguid[0] = 0x22; 956 ns.opts.nguid[15] = 0xEE; 957 ns.opts.uuid.u.raw[0] = 0x33; 958 ns.opts.uuid.u.raw[15] = 0xDD; 959 memset(&rsp, 0, sizeof(rsp)); 960 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 961 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 962 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 963 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 964 CU_ASSERT(buf[1] == 8); 965 CU_ASSERT(buf[4] == 0x11); 966 CU_ASSERT(buf[11] == 0xFF); 967 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 968 CU_ASSERT(buf[13] == 16); 969 CU_ASSERT(buf[16] == 0x22); 970 CU_ASSERT(buf[31] == 0xEE); 971 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 972 CU_ASSERT(buf[33] == 16); 973 CU_ASSERT(buf[36] == 0x33); 974 CU_ASSERT(buf[51] == 0xDD); 975 CU_ASSERT(buf[53] == 0); 976 } 977 978 static void 979 test_identify_ns(void) 980 { 981 struct spdk_nvmf_subsystem subsystem = {}; 982 struct spdk_nvmf_transport transport = {}; 983 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 984 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 985 struct spdk_nvme_cmd cmd = {}; 986 struct spdk_nvme_cpl rsp = {}; 987 struct spdk_nvme_ns_data nsdata = {}; 988 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 989 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 990 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 991 992 subsystem.ns = ns_arr; 993 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 994 995 /* Invalid NSID 0 */ 996 cmd.nsid = 0; 997 memset(&nsdata, 0, sizeof(nsdata)); 998 memset(&rsp, 0, sizeof(rsp)); 999 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1000 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1001 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1002 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1003 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1004 1005 /* Valid NSID 1 */ 1006 cmd.nsid = 1; 1007 memset(&nsdata, 0, sizeof(nsdata)); 1008 memset(&rsp, 0, sizeof(rsp)); 1009 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1010 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1011 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1012 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1013 CU_ASSERT(nsdata.nsze == 1234); 1014 1015 /* Valid but inactive NSID 2 */ 1016 cmd.nsid = 2; 1017 memset(&nsdata, 0, sizeof(nsdata)); 1018 memset(&rsp, 0, sizeof(rsp)); 1019 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1020 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1021 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1022 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1023 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1024 1025 /* Valid NSID 3 */ 1026 cmd.nsid = 3; 1027 memset(&nsdata, 0, sizeof(nsdata)); 1028 memset(&rsp, 0, sizeof(rsp)); 1029 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1030 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1031 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1032 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1033 CU_ASSERT(nsdata.nsze == 5678); 1034 1035 /* Invalid NSID 4 */ 1036 cmd.nsid = 4; 1037 memset(&nsdata, 0, sizeof(nsdata)); 1038 memset(&rsp, 0, sizeof(rsp)); 1039 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1040 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1041 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1042 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1043 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1044 1045 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 1046 cmd.nsid = 0xFFFFFFFF; 1047 memset(&nsdata, 0, sizeof(nsdata)); 1048 memset(&rsp, 0, sizeof(rsp)); 1049 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1050 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1051 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1052 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1053 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1054 } 1055 1056 static void 1057 test_set_get_features(void) 1058 { 1059 struct spdk_nvmf_subsystem subsystem = {}; 1060 struct spdk_nvmf_qpair admin_qpair = {}; 1061 enum spdk_nvme_ana_state ana_state[3]; 1062 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1063 struct spdk_nvmf_ctrlr ctrlr = { 1064 .subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener 1065 }; 1066 union nvmf_h2c_msg cmd = {}; 1067 union nvmf_c2h_msg rsp = {}; 1068 struct spdk_nvmf_ns ns[3]; 1069 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1070 struct spdk_nvmf_request req; 1071 int rc; 1072 1073 ns[0].anagrpid = 1; 1074 ns[2].anagrpid = 3; 1075 subsystem.ns = ns_arr; 1076 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1077 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1078 listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1079 admin_qpair.ctrlr = &ctrlr; 1080 req.qpair = &admin_qpair; 1081 cmd.nvme_cmd.nsid = 1; 1082 req.cmd = &cmd; 1083 req.rsp = &rsp; 1084 1085 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1086 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1087 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1088 ns[0].ptpl_file = "testcfg"; 1089 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1090 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1091 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1092 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1093 CU_ASSERT(ns[0].ptpl_activated == true); 1094 1095 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1096 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1097 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1098 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1099 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1100 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1101 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1102 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1103 1104 1105 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1106 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1107 cmd.nvme_cmd.cdw11 = 0x42; 1108 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1109 1110 rc = nvmf_ctrlr_get_features(&req); 1111 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1112 1113 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1114 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1115 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1116 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1117 1118 rc = nvmf_ctrlr_get_features(&req); 1119 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1120 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1121 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1122 1123 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1124 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1125 cmd.nvme_cmd.cdw11 = 0x42; 1126 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1127 1128 rc = nvmf_ctrlr_set_features(&req); 1129 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1130 1131 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1132 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1133 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1134 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1135 1136 rc = nvmf_ctrlr_set_features(&req); 1137 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1138 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1139 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1140 1141 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1142 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1143 cmd.nvme_cmd.cdw11 = 0x42; 1144 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1145 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1146 1147 rc = nvmf_ctrlr_set_features(&req); 1148 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1149 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1150 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1151 1152 1153 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1154 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1155 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1156 1157 rc = nvmf_ctrlr_get_features(&req); 1158 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1159 1160 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1161 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1162 cmd.nvme_cmd.cdw11 = 0x42; 1163 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1164 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1165 1166 rc = nvmf_ctrlr_set_features(&req); 1167 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1168 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1169 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1170 1171 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1172 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1173 cmd.nvme_cmd.cdw11 = 0x42; 1174 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1175 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1176 1177 rc = nvmf_ctrlr_set_features(&req); 1178 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1179 } 1180 1181 /* 1182 * Reservation Unit Test Configuration 1183 * -------- -------- -------- 1184 * | Host A | | Host B | | Host C | 1185 * -------- -------- -------- 1186 * / \ | | 1187 * -------- -------- ------- ------- 1188 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1189 * -------- -------- ------- ------- 1190 * \ \ / / 1191 * \ \ / / 1192 * \ \ / / 1193 * -------------------------------------- 1194 * | NAMESPACE 1 | 1195 * -------------------------------------- 1196 */ 1197 1198 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1199 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1200 1201 static void 1202 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1203 { 1204 /* Host A has two controllers */ 1205 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1206 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1207 1208 /* Host B has 1 controller */ 1209 spdk_uuid_generate(&g_ctrlr_B.hostid); 1210 1211 /* Host C has 1 controller */ 1212 spdk_uuid_generate(&g_ctrlr_C.hostid); 1213 1214 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1215 g_ns_info.rtype = rtype; 1216 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1217 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1218 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1219 } 1220 1221 static void 1222 test_reservation_write_exclusive(void) 1223 { 1224 struct spdk_nvmf_request req = {}; 1225 union nvmf_h2c_msg cmd = {}; 1226 union nvmf_c2h_msg rsp = {}; 1227 int rc; 1228 1229 req.cmd = &cmd; 1230 req.rsp = &rsp; 1231 1232 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1233 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1234 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1235 1236 /* Test Case: Issue a Read command from Host A and Host B */ 1237 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1238 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1239 SPDK_CU_ASSERT_FATAL(rc == 0); 1240 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1241 SPDK_CU_ASSERT_FATAL(rc == 0); 1242 1243 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1244 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1245 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1246 SPDK_CU_ASSERT_FATAL(rc == 0); 1247 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1248 SPDK_CU_ASSERT_FATAL(rc < 0); 1249 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1250 1251 /* Test Case: Issue a Write command from Host C */ 1252 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1253 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1254 SPDK_CU_ASSERT_FATAL(rc < 0); 1255 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1256 1257 /* Test Case: Issue a Read command from Host B */ 1258 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1259 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1260 SPDK_CU_ASSERT_FATAL(rc == 0); 1261 1262 /* Unregister Host C */ 1263 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1264 1265 /* Test Case: Read and Write commands from non-registrant Host C */ 1266 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1267 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1268 SPDK_CU_ASSERT_FATAL(rc < 0); 1269 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1270 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1271 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1272 SPDK_CU_ASSERT_FATAL(rc == 0); 1273 } 1274 1275 static void 1276 test_reservation_exclusive_access(void) 1277 { 1278 struct spdk_nvmf_request req = {}; 1279 union nvmf_h2c_msg cmd = {}; 1280 union nvmf_c2h_msg rsp = {}; 1281 int rc; 1282 1283 req.cmd = &cmd; 1284 req.rsp = &rsp; 1285 1286 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1287 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1288 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1289 1290 /* Test Case: Issue a Read command from Host B */ 1291 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1292 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1293 SPDK_CU_ASSERT_FATAL(rc < 0); 1294 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1295 1296 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1297 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1298 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1299 SPDK_CU_ASSERT_FATAL(rc == 0); 1300 } 1301 1302 static void 1303 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1304 { 1305 struct spdk_nvmf_request req = {}; 1306 union nvmf_h2c_msg cmd = {}; 1307 union nvmf_c2h_msg rsp = {}; 1308 int rc; 1309 1310 req.cmd = &cmd; 1311 req.rsp = &rsp; 1312 1313 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1314 ut_reservation_init(rtype); 1315 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1316 1317 /* Test Case: Issue a Read command from Host A and Host C */ 1318 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1319 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1320 SPDK_CU_ASSERT_FATAL(rc == 0); 1321 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1322 SPDK_CU_ASSERT_FATAL(rc == 0); 1323 1324 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1325 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1326 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1327 SPDK_CU_ASSERT_FATAL(rc == 0); 1328 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1329 SPDK_CU_ASSERT_FATAL(rc == 0); 1330 1331 /* Unregister Host C */ 1332 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1333 1334 /* Test Case: Read and Write commands from non-registrant Host C */ 1335 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1336 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1337 SPDK_CU_ASSERT_FATAL(rc == 0); 1338 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1339 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1340 SPDK_CU_ASSERT_FATAL(rc < 0); 1341 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1342 } 1343 1344 static void 1345 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1346 { 1347 _test_reservation_write_exclusive_regs_only_and_all_regs( 1348 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1349 _test_reservation_write_exclusive_regs_only_and_all_regs( 1350 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1351 } 1352 1353 static void 1354 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1355 { 1356 struct spdk_nvmf_request req = {}; 1357 union nvmf_h2c_msg cmd = {}; 1358 union nvmf_c2h_msg rsp = {}; 1359 int rc; 1360 1361 req.cmd = &cmd; 1362 req.rsp = &rsp; 1363 1364 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1365 ut_reservation_init(rtype); 1366 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1367 1368 /* Test Case: Issue a Write command from Host B */ 1369 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1370 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1371 SPDK_CU_ASSERT_FATAL(rc == 0); 1372 1373 /* Unregister Host B */ 1374 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1375 1376 /* Test Case: Issue a Read command from Host B */ 1377 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1378 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1379 SPDK_CU_ASSERT_FATAL(rc < 0); 1380 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1381 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1382 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1383 SPDK_CU_ASSERT_FATAL(rc < 0); 1384 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1385 } 1386 1387 static void 1388 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1389 { 1390 _test_reservation_exclusive_access_regs_only_and_all_regs( 1391 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1392 _test_reservation_exclusive_access_regs_only_and_all_regs( 1393 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1394 } 1395 1396 static void 1397 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1398 { 1399 STAILQ_INIT(&ctrlr->async_events); 1400 } 1401 1402 static void 1403 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1404 { 1405 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1406 1407 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1408 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1409 free(event); 1410 } 1411 } 1412 1413 static int 1414 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1415 { 1416 int num = 0; 1417 struct spdk_nvmf_async_event_completion *event; 1418 1419 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1420 num++; 1421 } 1422 return num; 1423 } 1424 1425 static void 1426 test_reservation_notification_log_page(void) 1427 { 1428 struct spdk_nvmf_ctrlr ctrlr; 1429 struct spdk_nvmf_qpair qpair; 1430 struct spdk_nvmf_ns ns; 1431 struct spdk_nvmf_request req = {}; 1432 union nvmf_h2c_msg cmd = {}; 1433 union nvmf_c2h_msg rsp = {}; 1434 union spdk_nvme_async_event_completion event = {}; 1435 struct spdk_nvme_reservation_notification_log logs[3]; 1436 struct iovec iov; 1437 1438 memset(&ctrlr, 0, sizeof(ctrlr)); 1439 ctrlr.thread = spdk_get_thread(); 1440 TAILQ_INIT(&ctrlr.log_head); 1441 init_pending_async_events(&ctrlr); 1442 ns.nsid = 1; 1443 1444 /* Test Case: Mask all the reservation notifications */ 1445 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1446 SPDK_NVME_RESERVATION_RELEASED_MASK | 1447 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1448 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1449 SPDK_NVME_REGISTRATION_PREEMPTED); 1450 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1451 SPDK_NVME_RESERVATION_RELEASED); 1452 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1453 SPDK_NVME_RESERVATION_PREEMPTED); 1454 poll_threads(); 1455 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1456 1457 /* Test Case: Unmask all the reservation notifications, 1458 * 3 log pages are generated, and AER was triggered. 1459 */ 1460 ns.mask = 0; 1461 ctrlr.num_avail_log_pages = 0; 1462 req.cmd = &cmd; 1463 req.rsp = &rsp; 1464 ctrlr.aer_req[0] = &req; 1465 ctrlr.nr_aer_reqs = 1; 1466 req.qpair = &qpair; 1467 TAILQ_INIT(&qpair.outstanding); 1468 qpair.ctrlr = NULL; 1469 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1470 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1471 1472 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1473 SPDK_NVME_REGISTRATION_PREEMPTED); 1474 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1475 SPDK_NVME_RESERVATION_RELEASED); 1476 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1477 SPDK_NVME_RESERVATION_PREEMPTED); 1478 poll_threads(); 1479 event.raw = rsp.nvme_cpl.cdw0; 1480 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1481 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1482 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1483 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1484 1485 /* Test Case: Get Log Page to clear the log pages */ 1486 iov.iov_base = &logs[0]; 1487 iov.iov_len = sizeof(logs); 1488 nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0); 1489 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1490 1491 cleanup_pending_async_events(&ctrlr); 1492 } 1493 1494 static void 1495 test_get_dif_ctx(void) 1496 { 1497 struct spdk_nvmf_subsystem subsystem = {}; 1498 struct spdk_nvmf_request req = {}; 1499 struct spdk_nvmf_qpair qpair = {}; 1500 struct spdk_nvmf_ctrlr ctrlr = {}; 1501 struct spdk_nvmf_ns ns = {}; 1502 struct spdk_nvmf_ns *_ns = NULL; 1503 struct spdk_bdev bdev = {}; 1504 union nvmf_h2c_msg cmd = {}; 1505 struct spdk_dif_ctx dif_ctx = {}; 1506 bool ret; 1507 1508 ctrlr.subsys = &subsystem; 1509 1510 qpair.ctrlr = &ctrlr; 1511 1512 req.qpair = &qpair; 1513 req.cmd = &cmd; 1514 1515 ns.bdev = &bdev; 1516 1517 ctrlr.dif_insert_or_strip = false; 1518 1519 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1520 CU_ASSERT(ret == false); 1521 1522 ctrlr.dif_insert_or_strip = true; 1523 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1524 1525 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1526 CU_ASSERT(ret == false); 1527 1528 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1529 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1530 1531 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1532 CU_ASSERT(ret == false); 1533 1534 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1535 1536 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1537 CU_ASSERT(ret == false); 1538 1539 qpair.qid = 1; 1540 1541 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1542 CU_ASSERT(ret == false); 1543 1544 cmd.nvme_cmd.nsid = 1; 1545 1546 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1547 CU_ASSERT(ret == false); 1548 1549 subsystem.max_nsid = 1; 1550 subsystem.ns = &_ns; 1551 subsystem.ns[0] = &ns; 1552 1553 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1554 CU_ASSERT(ret == false); 1555 1556 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1557 1558 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1559 CU_ASSERT(ret == true); 1560 } 1561 1562 static void 1563 test_identify_ctrlr(void) 1564 { 1565 struct spdk_nvmf_tgt tgt = {}; 1566 struct spdk_nvmf_subsystem subsystem = { 1567 .subtype = SPDK_NVMF_SUBTYPE_NVME, 1568 .tgt = &tgt, 1569 }; 1570 struct spdk_nvmf_transport_ops tops = {}; 1571 struct spdk_nvmf_transport transport = { 1572 .ops = &tops, 1573 .opts = { 1574 .in_capsule_data_size = 4096, 1575 }, 1576 }; 1577 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1578 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1579 struct spdk_nvme_ctrlr_data cdata = {}; 1580 uint32_t expected_ioccsz; 1581 1582 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1583 1584 /* Check ioccsz, TCP transport */ 1585 tops.type = SPDK_NVME_TRANSPORT_TCP; 1586 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1587 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1588 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1589 1590 /* Check ioccsz, RDMA transport */ 1591 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1592 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1593 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1594 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1595 1596 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1597 tops.type = SPDK_NVME_TRANSPORT_TCP; 1598 ctrlr.dif_insert_or_strip = true; 1599 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1600 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1601 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1602 } 1603 1604 static int 1605 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1606 { 1607 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1608 1609 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1610 }; 1611 1612 static void 1613 test_custom_admin_cmd(void) 1614 { 1615 struct spdk_nvmf_subsystem subsystem; 1616 struct spdk_nvmf_qpair qpair; 1617 struct spdk_nvmf_ctrlr ctrlr; 1618 struct spdk_nvmf_request req; 1619 struct spdk_nvmf_ns *ns_ptrs[1]; 1620 struct spdk_nvmf_ns ns; 1621 union nvmf_h2c_msg cmd; 1622 union nvmf_c2h_msg rsp; 1623 struct spdk_bdev bdev; 1624 uint8_t buf[4096]; 1625 int rc; 1626 1627 memset(&subsystem, 0, sizeof(subsystem)); 1628 ns_ptrs[0] = &ns; 1629 subsystem.ns = ns_ptrs; 1630 subsystem.max_nsid = 1; 1631 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1632 1633 memset(&ns, 0, sizeof(ns)); 1634 ns.opts.nsid = 1; 1635 ns.bdev = &bdev; 1636 1637 memset(&qpair, 0, sizeof(qpair)); 1638 qpair.ctrlr = &ctrlr; 1639 1640 memset(&ctrlr, 0, sizeof(ctrlr)); 1641 ctrlr.subsys = &subsystem; 1642 ctrlr.vcprop.cc.bits.en = 1; 1643 ctrlr.thread = spdk_get_thread(); 1644 1645 memset(&req, 0, sizeof(req)); 1646 req.qpair = &qpair; 1647 req.cmd = &cmd; 1648 req.rsp = &rsp; 1649 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1650 req.data = buf; 1651 req.length = sizeof(buf); 1652 1653 memset(&cmd, 0, sizeof(cmd)); 1654 cmd.nvme_cmd.opc = 0xc1; 1655 cmd.nvme_cmd.nsid = 0; 1656 memset(&rsp, 0, sizeof(rsp)); 1657 1658 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1659 1660 /* Ensure that our hdlr is being called */ 1661 rc = nvmf_ctrlr_process_admin_cmd(&req); 1662 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1663 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1664 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1665 } 1666 1667 static void 1668 test_fused_compare_and_write(void) 1669 { 1670 struct spdk_nvmf_request req = {}; 1671 struct spdk_nvmf_qpair qpair = {}; 1672 struct spdk_nvme_cmd cmd = {}; 1673 union nvmf_c2h_msg rsp = {}; 1674 struct spdk_nvmf_ctrlr ctrlr = {}; 1675 struct spdk_nvmf_subsystem subsystem = {}; 1676 struct spdk_nvmf_ns ns = {}; 1677 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1678 enum spdk_nvme_ana_state ana_state[1]; 1679 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1680 struct spdk_bdev bdev = {}; 1681 1682 struct spdk_nvmf_poll_group group = {}; 1683 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1684 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1685 struct spdk_io_channel io_ch = {}; 1686 1687 ns.bdev = &bdev; 1688 ns.anagrpid = 1; 1689 1690 subsystem.id = 0; 1691 subsystem.max_nsid = 1; 1692 subsys_ns[0] = &ns; 1693 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1694 1695 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1696 1697 /* Enable controller */ 1698 ctrlr.vcprop.cc.bits.en = 1; 1699 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1700 ctrlr.listener = &listener; 1701 1702 group.num_sgroups = 1; 1703 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1704 sgroups.num_ns = 1; 1705 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1706 ns_info.channel = &io_ch; 1707 sgroups.ns_info = &ns_info; 1708 TAILQ_INIT(&sgroups.queued); 1709 group.sgroups = &sgroups; 1710 TAILQ_INIT(&qpair.outstanding); 1711 1712 qpair.ctrlr = &ctrlr; 1713 qpair.group = &group; 1714 qpair.qid = 1; 1715 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1716 1717 cmd.nsid = 1; 1718 1719 req.qpair = &qpair; 1720 req.cmd = (union nvmf_h2c_msg *)&cmd; 1721 req.rsp = &rsp; 1722 1723 /* SUCCESS/SUCCESS */ 1724 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1725 cmd.opc = SPDK_NVME_OPC_COMPARE; 1726 1727 spdk_nvmf_request_exec(&req); 1728 CU_ASSERT(qpair.first_fused_req != NULL); 1729 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1730 1731 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1732 cmd.opc = SPDK_NVME_OPC_WRITE; 1733 1734 spdk_nvmf_request_exec(&req); 1735 CU_ASSERT(qpair.first_fused_req == NULL); 1736 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1737 1738 /* Wrong sequence */ 1739 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1740 cmd.opc = SPDK_NVME_OPC_WRITE; 1741 1742 spdk_nvmf_request_exec(&req); 1743 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1744 CU_ASSERT(qpair.first_fused_req == NULL); 1745 1746 /* Write as FUSE_FIRST (Wrong op code) */ 1747 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1748 cmd.opc = SPDK_NVME_OPC_WRITE; 1749 1750 spdk_nvmf_request_exec(&req); 1751 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1752 CU_ASSERT(qpair.first_fused_req == NULL); 1753 1754 /* Compare as FUSE_SECOND (Wrong op code) */ 1755 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1756 cmd.opc = SPDK_NVME_OPC_COMPARE; 1757 1758 spdk_nvmf_request_exec(&req); 1759 CU_ASSERT(qpair.first_fused_req != NULL); 1760 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1761 1762 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1763 cmd.opc = SPDK_NVME_OPC_COMPARE; 1764 1765 spdk_nvmf_request_exec(&req); 1766 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1767 CU_ASSERT(qpair.first_fused_req == NULL); 1768 } 1769 1770 static void 1771 test_multi_async_event_reqs(void) 1772 { 1773 struct spdk_nvmf_subsystem subsystem = {}; 1774 struct spdk_nvmf_qpair qpair = {}; 1775 struct spdk_nvmf_ctrlr ctrlr = {}; 1776 struct spdk_nvmf_request req[5] = {}; 1777 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1778 struct spdk_nvmf_ns ns = {}; 1779 union nvmf_h2c_msg cmd[5] = {}; 1780 union nvmf_c2h_msg rsp[5] = {}; 1781 1782 struct spdk_nvmf_poll_group group = {}; 1783 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1784 1785 int i; 1786 1787 ns_ptrs[0] = &ns; 1788 subsystem.ns = ns_ptrs; 1789 subsystem.max_nsid = 1; 1790 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1791 1792 ns.opts.nsid = 1; 1793 group.sgroups = &sgroups; 1794 1795 qpair.ctrlr = &ctrlr; 1796 qpair.group = &group; 1797 TAILQ_INIT(&qpair.outstanding); 1798 1799 ctrlr.subsys = &subsystem; 1800 ctrlr.vcprop.cc.bits.en = 1; 1801 ctrlr.thread = spdk_get_thread(); 1802 1803 for (i = 0; i < 5; i++) { 1804 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1805 cmd[i].nvme_cmd.nsid = 1; 1806 cmd[i].nvme_cmd.cid = i; 1807 1808 req[i].qpair = &qpair; 1809 req[i].cmd = &cmd[i]; 1810 req[i].rsp = &rsp[i]; 1811 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1812 } 1813 1814 /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */ 1815 sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS; 1816 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1817 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1818 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1819 } 1820 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 1821 1822 /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */ 1823 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1824 CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS); 1825 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1826 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 1827 1828 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 1829 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 1830 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1831 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1832 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 1833 1834 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 1835 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1836 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1837 CU_ASSERT(ctrlr.aer_req[2] == NULL); 1838 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 1839 1840 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 1841 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 1842 } 1843 1844 static void 1845 test_get_ana_log_page_one_ns_per_anagrp(void) 1846 { 1847 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 1848 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 1849 uint32_t ana_group[3]; 1850 struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group }; 1851 struct spdk_nvmf_ctrlr ctrlr = {}; 1852 enum spdk_nvme_ana_state ana_state[3]; 1853 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1854 struct spdk_nvmf_ns ns[3]; 1855 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 1856 uint64_t offset; 1857 uint32_t length; 1858 int i; 1859 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1860 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1861 struct iovec iov, iovs[2]; 1862 struct spdk_nvme_ana_page *ana_hdr; 1863 char _ana_desc[UT_ANA_DESC_SIZE]; 1864 struct spdk_nvme_ana_group_descriptor *ana_desc; 1865 1866 subsystem.ns = ns_arr; 1867 subsystem.max_nsid = 3; 1868 for (i = 0; i < 3; i++) { 1869 subsystem.ana_group[i] = 1; 1870 } 1871 ctrlr.subsys = &subsystem; 1872 ctrlr.listener = &listener; 1873 1874 for (i = 0; i < 3; i++) { 1875 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1876 } 1877 1878 for (i = 0; i < 3; i++) { 1879 ns_arr[i]->nsid = i + 1; 1880 ns_arr[i]->anagrpid = i + 1; 1881 } 1882 1883 /* create expected page */ 1884 ana_hdr = (void *)&expected_page[0]; 1885 ana_hdr->num_ana_group_desc = 3; 1886 ana_hdr->change_count = 0; 1887 1888 /* descriptor may be unaligned. So create data and then copy it to the location. */ 1889 ana_desc = (void *)_ana_desc; 1890 offset = sizeof(struct spdk_nvme_ana_page); 1891 1892 for (i = 0; i < 3; i++) { 1893 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 1894 ana_desc->ana_group_id = ns_arr[i]->nsid; 1895 ana_desc->num_of_nsid = 1; 1896 ana_desc->change_count = 0; 1897 ana_desc->ana_state = ctrlr.listener->ana_state[i]; 1898 ana_desc->nsid[0] = ns_arr[i]->nsid; 1899 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 1900 offset += UT_ANA_DESC_SIZE; 1901 } 1902 1903 /* read entire actual log page */ 1904 offset = 0; 1905 while (offset < UT_ANA_LOG_PAGE_SIZE) { 1906 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 1907 iov.iov_base = &actual_page[offset]; 1908 iov.iov_len = length; 1909 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 1910 offset += length; 1911 } 1912 1913 /* compare expected page and actual page */ 1914 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1915 1916 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 1917 offset = 0; 1918 iovs[0].iov_base = &actual_page[offset]; 1919 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 1920 offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 1921 iovs[1].iov_base = &actual_page[offset]; 1922 iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset; 1923 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 1924 1925 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1926 1927 #undef UT_ANA_DESC_SIZE 1928 #undef UT_ANA_LOG_PAGE_SIZE 1929 } 1930 1931 static void 1932 test_get_ana_log_page_multi_ns_per_anagrp(void) 1933 { 1934 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + \ 1935 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 + \ 1936 sizeof(uint32_t) * 5) 1937 struct spdk_nvmf_ns ns[5]; 1938 struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]}; 1939 uint32_t ana_group[5] = {0}; 1940 struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, }; 1941 enum spdk_nvme_ana_state ana_state[5]; 1942 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, }; 1943 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, }; 1944 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1945 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1946 struct iovec iov, iovs[2]; 1947 struct spdk_nvme_ana_page *ana_hdr; 1948 char _ana_desc[UT_ANA_LOG_PAGE_SIZE]; 1949 struct spdk_nvme_ana_group_descriptor *ana_desc; 1950 uint64_t offset; 1951 uint32_t length; 1952 int i; 1953 1954 subsystem.max_nsid = 5; 1955 subsystem.ana_group[1] = 3; 1956 subsystem.ana_group[2] = 2; 1957 for (i = 0; i < 5; i++) { 1958 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1959 } 1960 1961 for (i = 0; i < 5; i++) { 1962 ns_arr[i]->nsid = i + 1; 1963 } 1964 ns_arr[0]->anagrpid = 2; 1965 ns_arr[1]->anagrpid = 3; 1966 ns_arr[2]->anagrpid = 2; 1967 ns_arr[3]->anagrpid = 3; 1968 ns_arr[4]->anagrpid = 2; 1969 1970 /* create expected page */ 1971 ana_hdr = (void *)&expected_page[0]; 1972 ana_hdr->num_ana_group_desc = 2; 1973 ana_hdr->change_count = 0; 1974 1975 /* descriptor may be unaligned. So create data and then copy it to the location. */ 1976 ana_desc = (void *)_ana_desc; 1977 offset = sizeof(struct spdk_nvme_ana_page); 1978 1979 memset(_ana_desc, 0, sizeof(_ana_desc)); 1980 ana_desc->ana_group_id = 2; 1981 ana_desc->num_of_nsid = 3; 1982 ana_desc->change_count = 0; 1983 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1984 ana_desc->nsid[0] = 1; 1985 ana_desc->nsid[1] = 3; 1986 ana_desc->nsid[2] = 5; 1987 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 1988 sizeof(uint32_t) * 3); 1989 offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3; 1990 1991 memset(_ana_desc, 0, sizeof(_ana_desc)); 1992 ana_desc->ana_group_id = 3; 1993 ana_desc->num_of_nsid = 2; 1994 ana_desc->change_count = 0; 1995 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1996 ana_desc->nsid[0] = 2; 1997 ana_desc->nsid[1] = 4; 1998 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 1999 sizeof(uint32_t) * 2); 2000 2001 /* read entire actual log page, and compare expected page and actual page. */ 2002 offset = 0; 2003 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2004 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2005 iov.iov_base = &actual_page[offset]; 2006 iov.iov_len = length; 2007 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2008 offset += length; 2009 } 2010 2011 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2012 2013 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2014 offset = 0; 2015 iovs[0].iov_base = &actual_page[offset]; 2016 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2017 offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2018 iovs[1].iov_base = &actual_page[offset]; 2019 iovs[1].iov_len = sizeof(uint32_t) * 5; 2020 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2021 2022 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2023 2024 #undef UT_ANA_LOG_PAGE_SIZE 2025 } 2026 static void 2027 test_multi_async_events(void) 2028 { 2029 struct spdk_nvmf_subsystem subsystem = {}; 2030 struct spdk_nvmf_qpair qpair = {}; 2031 struct spdk_nvmf_ctrlr ctrlr = {}; 2032 struct spdk_nvmf_request req[4] = {}; 2033 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2034 struct spdk_nvmf_ns ns = {}; 2035 union nvmf_h2c_msg cmd[4] = {}; 2036 union nvmf_c2h_msg rsp[4] = {}; 2037 union spdk_nvme_async_event_completion event = {}; 2038 struct spdk_nvmf_poll_group group = {}; 2039 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2040 int i; 2041 2042 ns_ptrs[0] = &ns; 2043 subsystem.ns = ns_ptrs; 2044 subsystem.max_nsid = 1; 2045 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2046 2047 ns.opts.nsid = 1; 2048 group.sgroups = &sgroups; 2049 2050 qpair.ctrlr = &ctrlr; 2051 qpair.group = &group; 2052 TAILQ_INIT(&qpair.outstanding); 2053 2054 ctrlr.subsys = &subsystem; 2055 ctrlr.vcprop.cc.bits.en = 1; 2056 ctrlr.thread = spdk_get_thread(); 2057 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2058 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 2059 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 2060 init_pending_async_events(&ctrlr); 2061 2062 /* Target queue pending events when there is no outstanding AER request */ 2063 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2064 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 2065 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 2066 2067 for (i = 0; i < 4; i++) { 2068 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2069 cmd[i].nvme_cmd.nsid = 1; 2070 cmd[i].nvme_cmd.cid = i; 2071 2072 req[i].qpair = &qpair; 2073 req[i].cmd = &cmd[i]; 2074 req[i].rsp = &rsp[i]; 2075 2076 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2077 2078 sgroups.mgmt_io_outstanding = 1; 2079 if (i < 3) { 2080 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2081 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2082 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 2083 } else { 2084 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2085 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2086 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 2087 } 2088 } 2089 2090 event.raw = rsp[0].nvme_cpl.cdw0; 2091 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2092 event.raw = rsp[1].nvme_cpl.cdw0; 2093 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 2094 event.raw = rsp[2].nvme_cpl.cdw0; 2095 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 2096 2097 cleanup_pending_async_events(&ctrlr); 2098 } 2099 2100 static void 2101 test_rae(void) 2102 { 2103 struct spdk_nvmf_subsystem subsystem = {}; 2104 struct spdk_nvmf_qpair qpair = {}; 2105 struct spdk_nvmf_ctrlr ctrlr = {}; 2106 struct spdk_nvmf_request req[3] = {}; 2107 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2108 struct spdk_nvmf_ns ns = {}; 2109 union nvmf_h2c_msg cmd[3] = {}; 2110 union nvmf_c2h_msg rsp[3] = {}; 2111 union spdk_nvme_async_event_completion event = {}; 2112 struct spdk_nvmf_poll_group group = {}; 2113 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2114 int i; 2115 char data[4096]; 2116 2117 ns_ptrs[0] = &ns; 2118 subsystem.ns = ns_ptrs; 2119 subsystem.max_nsid = 1; 2120 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2121 2122 ns.opts.nsid = 1; 2123 group.sgroups = &sgroups; 2124 2125 qpair.ctrlr = &ctrlr; 2126 qpair.group = &group; 2127 TAILQ_INIT(&qpair.outstanding); 2128 2129 ctrlr.subsys = &subsystem; 2130 ctrlr.vcprop.cc.bits.en = 1; 2131 ctrlr.thread = spdk_get_thread(); 2132 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2133 init_pending_async_events(&ctrlr); 2134 2135 /* Target queue pending events when there is no outstanding AER request */ 2136 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2137 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2138 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2139 /* only one event will be queued before RAE is clear */ 2140 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2141 2142 req[0].qpair = &qpair; 2143 req[0].cmd = &cmd[0]; 2144 req[0].rsp = &rsp[0]; 2145 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2146 cmd[0].nvme_cmd.nsid = 1; 2147 cmd[0].nvme_cmd.cid = 0; 2148 2149 for (i = 1; i < 3; i++) { 2150 req[i].qpair = &qpair; 2151 req[i].cmd = &cmd[i]; 2152 req[i].rsp = &rsp[i]; 2153 req[i].data = &data; 2154 req[i].length = sizeof(data); 2155 2156 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 2157 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 2158 SPDK_NVME_LOG_CHANGED_NS_LIST; 2159 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 2160 spdk_nvme_bytes_to_numd(req[i].length); 2161 cmd[i].nvme_cmd.cid = i; 2162 } 2163 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 2164 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 2165 2166 /* consume the pending event */ 2167 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 2168 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2169 event.raw = rsp[0].nvme_cpl.cdw0; 2170 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2171 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2172 2173 /* get log with RAE set */ 2174 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2175 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2176 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2177 2178 /* will not generate new event until RAE is clear */ 2179 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2180 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2181 2182 /* get log with RAE clear */ 2183 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2184 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2185 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2186 2187 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2188 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2189 2190 cleanup_pending_async_events(&ctrlr); 2191 } 2192 2193 static void 2194 test_nvmf_ctrlr_create_destruct(void) 2195 { 2196 struct spdk_nvmf_fabric_connect_data connect_data = {}; 2197 struct spdk_nvmf_poll_group group = {}; 2198 struct spdk_nvmf_subsystem_poll_group sgroups[2] = {}; 2199 struct spdk_nvmf_transport transport = {}; 2200 struct spdk_nvmf_transport_ops tops = {}; 2201 struct spdk_nvmf_subsystem subsystem = {}; 2202 struct spdk_nvmf_request req = {}; 2203 struct spdk_nvmf_qpair qpair = {}; 2204 struct spdk_nvmf_ctrlr *ctrlr = NULL; 2205 struct spdk_nvmf_tgt tgt = {}; 2206 union nvmf_h2c_msg cmd = {}; 2207 union nvmf_c2h_msg rsp = {}; 2208 const uint8_t hostid[16] = { 2209 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2210 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 2211 }; 2212 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 2213 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 2214 2215 group.thread = spdk_get_thread(); 2216 transport.ops = &tops; 2217 transport.opts.max_aq_depth = 32; 2218 transport.opts.max_queue_depth = 64; 2219 transport.opts.max_qpairs_per_ctrlr = 3; 2220 transport.opts.dif_insert_or_strip = true; 2221 transport.tgt = &tgt; 2222 qpair.transport = &transport; 2223 qpair.group = &group; 2224 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2225 TAILQ_INIT(&qpair.outstanding); 2226 2227 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 2228 connect_data.cntlid = 0xFFFF; 2229 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 2230 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 2231 2232 subsystem.thread = spdk_get_thread(); 2233 subsystem.id = 1; 2234 TAILQ_INIT(&subsystem.ctrlrs); 2235 subsystem.tgt = &tgt; 2236 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2237 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2238 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 2239 2240 group.sgroups = sgroups; 2241 2242 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2243 cmd.connect_cmd.cid = 1; 2244 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 2245 cmd.connect_cmd.recfmt = 0; 2246 cmd.connect_cmd.qid = 0; 2247 cmd.connect_cmd.sqsize = 31; 2248 cmd.connect_cmd.cattr = 0; 2249 cmd.connect_cmd.kato = 120000; 2250 2251 req.qpair = &qpair; 2252 req.length = sizeof(connect_data); 2253 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 2254 req.data = &connect_data; 2255 req.cmd = &cmd; 2256 req.rsp = &rsp; 2257 2258 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 2259 sgroups[subsystem.id].mgmt_io_outstanding++; 2260 2261 ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.data); 2262 poll_threads(); 2263 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2264 CU_ASSERT(req.qpair->ctrlr == ctrlr); 2265 CU_ASSERT(ctrlr->subsys == &subsystem); 2266 CU_ASSERT(ctrlr->thread == req.qpair->group->thread); 2267 CU_ASSERT(ctrlr->disconnect_in_progress == false); 2268 CU_ASSERT(ctrlr->qpair_mask != NULL); 2269 CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000); 2270 CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1); 2271 CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1); 2272 CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1); 2273 CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1); 2274 CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16)); 2275 CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1); 2276 CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63); 2277 CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0); 2278 CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500); 2279 CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0); 2280 CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM); 2281 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0); 2282 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0); 2283 CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1); 2284 CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3); 2285 CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0); 2286 CU_ASSERT(ctrlr->vcprop.cc.raw == 0); 2287 CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0); 2288 CU_ASSERT(ctrlr->vcprop.csts.raw == 0); 2289 CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0); 2290 CU_ASSERT(ctrlr->dif_insert_or_strip == true); 2291 2292 ctrlr->in_destruct = true; 2293 nvmf_ctrlr_destruct(ctrlr); 2294 poll_threads(); 2295 CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs)); 2296 CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding)); 2297 } 2298 2299 static void 2300 test_nvmf_ctrlr_use_zcopy(void) 2301 { 2302 struct spdk_nvmf_subsystem subsystem = {}; 2303 struct spdk_nvmf_transport transport = {}; 2304 struct spdk_nvmf_request req = {}; 2305 struct spdk_nvmf_qpair qpair = {}; 2306 struct spdk_nvmf_ctrlr ctrlr = {}; 2307 union nvmf_h2c_msg cmd = {}; 2308 struct spdk_nvmf_ns ns = {}; 2309 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2310 struct spdk_bdev bdev = {}; 2311 struct spdk_nvmf_poll_group group = {}; 2312 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2313 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2314 struct spdk_io_channel io_ch = {}; 2315 int opc; 2316 2317 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2318 ns.bdev = &bdev; 2319 2320 subsystem.id = 0; 2321 subsystem.max_nsid = 1; 2322 subsys_ns[0] = &ns; 2323 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2324 2325 ctrlr.subsys = &subsystem; 2326 2327 transport.opts.zcopy = true; 2328 2329 qpair.ctrlr = &ctrlr; 2330 qpair.group = &group; 2331 qpair.qid = 1; 2332 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2333 qpair.transport = &transport; 2334 2335 group.thread = spdk_get_thread(); 2336 group.num_sgroups = 1; 2337 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2338 sgroups.num_ns = 1; 2339 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2340 ns_info.channel = &io_ch; 2341 sgroups.ns_info = &ns_info; 2342 TAILQ_INIT(&sgroups.queued); 2343 group.sgroups = &sgroups; 2344 TAILQ_INIT(&qpair.outstanding); 2345 2346 req.qpair = &qpair; 2347 req.cmd = &cmd; 2348 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2349 2350 /* Admin queue */ 2351 qpair.qid = 0; 2352 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2353 qpair.qid = 1; 2354 2355 /* Invalid Opcodes */ 2356 for (opc = 0; opc <= 255; opc++) { 2357 cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc; 2358 if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) && 2359 (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) { 2360 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2361 } 2362 } 2363 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 2364 2365 /* Fused WRITE */ 2366 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2367 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2368 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE; 2369 2370 /* Non bdev */ 2371 cmd.nvme_cmd.nsid = 4; 2372 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2373 cmd.nvme_cmd.nsid = 1; 2374 2375 /* ZCOPY Not supported */ 2376 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2377 ns.zcopy = true; 2378 2379 /* ZCOPY disabled on transport level */ 2380 transport.opts.zcopy = false; 2381 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2382 transport.opts.zcopy = true; 2383 2384 /* Success */ 2385 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2386 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2387 } 2388 2389 static void 2390 qpair_state_change_done(void *cb_arg, int status) 2391 { 2392 } 2393 2394 static void 2395 test_spdk_nvmf_request_zcopy_start(void) 2396 { 2397 struct spdk_nvmf_request req = {}; 2398 struct spdk_nvmf_qpair qpair = {}; 2399 struct spdk_nvmf_transport transport = {}; 2400 struct spdk_nvme_cmd cmd = {}; 2401 union nvmf_c2h_msg rsp = {}; 2402 struct spdk_nvmf_ctrlr ctrlr = {}; 2403 struct spdk_nvmf_subsystem subsystem = {}; 2404 struct spdk_nvmf_ns ns = {}; 2405 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2406 enum spdk_nvme_ana_state ana_state[1]; 2407 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2408 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2409 2410 struct spdk_nvmf_poll_group group = {}; 2411 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2412 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2413 struct spdk_io_channel io_ch = {}; 2414 2415 ns.bdev = &bdev; 2416 ns.zcopy = true; 2417 ns.anagrpid = 1; 2418 2419 subsystem.id = 0; 2420 subsystem.max_nsid = 1; 2421 subsys_ns[0] = &ns; 2422 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2423 2424 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2425 2426 /* Enable controller */ 2427 ctrlr.vcprop.cc.bits.en = 1; 2428 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2429 ctrlr.listener = &listener; 2430 2431 transport.opts.zcopy = true; 2432 2433 group.thread = spdk_get_thread(); 2434 group.num_sgroups = 1; 2435 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2436 sgroups.num_ns = 1; 2437 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2438 ns_info.channel = &io_ch; 2439 sgroups.ns_info = &ns_info; 2440 TAILQ_INIT(&sgroups.queued); 2441 group.sgroups = &sgroups; 2442 TAILQ_INIT(&qpair.outstanding); 2443 2444 qpair.ctrlr = &ctrlr; 2445 qpair.group = &group; 2446 qpair.transport = &transport; 2447 qpair.qid = 1; 2448 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2449 2450 cmd.nsid = 1; 2451 2452 req.qpair = &qpair; 2453 req.cmd = (union nvmf_h2c_msg *)&cmd; 2454 req.rsp = &rsp; 2455 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2456 cmd.opc = SPDK_NVME_OPC_READ; 2457 2458 /* Fail because no controller */ 2459 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2460 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2461 qpair.ctrlr = NULL; 2462 spdk_nvmf_request_zcopy_start(&req); 2463 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2464 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2465 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 2466 qpair.ctrlr = &ctrlr; 2467 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2468 2469 /* Fail because bad NSID */ 2470 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2471 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2472 cmd.nsid = 0; 2473 spdk_nvmf_request_zcopy_start(&req); 2474 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2475 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2476 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2477 cmd.nsid = 1; 2478 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2479 2480 /* Fail because bad Channel */ 2481 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2482 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2483 ns_info.channel = NULL; 2484 spdk_nvmf_request_zcopy_start(&req); 2485 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2486 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2487 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2488 ns_info.channel = &io_ch; 2489 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2490 2491 /* Queue the requet because NSID is not active */ 2492 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2493 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2494 ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING; 2495 spdk_nvmf_request_zcopy_start(&req); 2496 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT); 2497 CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req); 2498 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2499 TAILQ_REMOVE(&sgroups.queued, &req, link); 2500 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2501 2502 /* Fail because QPair is not active */ 2503 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2504 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2505 qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 2506 qpair.state_cb = qpair_state_change_done; 2507 spdk_nvmf_request_zcopy_start(&req); 2508 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED); 2509 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2510 qpair.state_cb = NULL; 2511 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2512 2513 /* Fail because nvmf_bdev_ctrlr_zcopy_start fails */ 2514 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2515 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2516 cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */ 2517 cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 2518 req.length = (cmd.cdw12 + 1) * bdev.blocklen; 2519 spdk_nvmf_request_zcopy_start(&req); 2520 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2521 cmd.cdw10 = 0; 2522 cmd.cdw12 = 0; 2523 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2524 2525 /* Success */ 2526 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2527 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2528 spdk_nvmf_request_zcopy_start(&req); 2529 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2530 } 2531 2532 static void 2533 test_zcopy_read(void) 2534 { 2535 struct spdk_nvmf_request req = {}; 2536 struct spdk_nvmf_qpair qpair = {}; 2537 struct spdk_nvmf_transport transport = {}; 2538 struct spdk_nvme_cmd cmd = {}; 2539 union nvmf_c2h_msg rsp = {}; 2540 struct spdk_nvmf_ctrlr ctrlr = {}; 2541 struct spdk_nvmf_subsystem subsystem = {}; 2542 struct spdk_nvmf_ns ns = {}; 2543 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2544 enum spdk_nvme_ana_state ana_state[1]; 2545 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2546 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2547 2548 struct spdk_nvmf_poll_group group = {}; 2549 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2550 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2551 struct spdk_io_channel io_ch = {}; 2552 2553 ns.bdev = &bdev; 2554 ns.zcopy = true; 2555 ns.anagrpid = 1; 2556 2557 subsystem.id = 0; 2558 subsystem.max_nsid = 1; 2559 subsys_ns[0] = &ns; 2560 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2561 2562 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2563 2564 /* Enable controller */ 2565 ctrlr.vcprop.cc.bits.en = 1; 2566 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2567 ctrlr.listener = &listener; 2568 2569 transport.opts.zcopy = true; 2570 2571 group.thread = spdk_get_thread(); 2572 group.num_sgroups = 1; 2573 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2574 sgroups.num_ns = 1; 2575 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2576 ns_info.channel = &io_ch; 2577 sgroups.ns_info = &ns_info; 2578 TAILQ_INIT(&sgroups.queued); 2579 group.sgroups = &sgroups; 2580 TAILQ_INIT(&qpair.outstanding); 2581 2582 qpair.ctrlr = &ctrlr; 2583 qpair.group = &group; 2584 qpair.transport = &transport; 2585 qpair.qid = 1; 2586 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2587 2588 cmd.nsid = 1; 2589 2590 req.qpair = &qpair; 2591 req.cmd = (union nvmf_h2c_msg *)&cmd; 2592 req.rsp = &rsp; 2593 cmd.opc = SPDK_NVME_OPC_READ; 2594 2595 /* Prepare for zcopy */ 2596 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2597 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2598 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2599 CU_ASSERT(ns_info.io_outstanding == 0); 2600 2601 /* Perform the zcopy start */ 2602 spdk_nvmf_request_zcopy_start(&req); 2603 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2604 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2605 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2606 CU_ASSERT(ns_info.io_outstanding == 1); 2607 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2608 2609 /* Perform the zcopy end */ 2610 spdk_nvmf_request_zcopy_end(&req, false); 2611 CU_ASSERT(req.zcopy_bdev_io == NULL); 2612 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2613 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2614 CU_ASSERT(ns_info.io_outstanding == 0); 2615 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2616 } 2617 2618 static void 2619 test_zcopy_write(void) 2620 { 2621 struct spdk_nvmf_request req = {}; 2622 struct spdk_nvmf_qpair qpair = {}; 2623 struct spdk_nvmf_transport transport = {}; 2624 struct spdk_nvme_cmd cmd = {}; 2625 union nvmf_c2h_msg rsp = {}; 2626 struct spdk_nvmf_ctrlr ctrlr = {}; 2627 struct spdk_nvmf_subsystem subsystem = {}; 2628 struct spdk_nvmf_ns ns = {}; 2629 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2630 enum spdk_nvme_ana_state ana_state[1]; 2631 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2632 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2633 2634 struct spdk_nvmf_poll_group group = {}; 2635 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2636 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2637 struct spdk_io_channel io_ch = {}; 2638 2639 ns.bdev = &bdev; 2640 ns.zcopy = true; 2641 ns.anagrpid = 1; 2642 2643 subsystem.id = 0; 2644 subsystem.max_nsid = 1; 2645 subsys_ns[0] = &ns; 2646 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2647 2648 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2649 2650 /* Enable controller */ 2651 ctrlr.vcprop.cc.bits.en = 1; 2652 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2653 ctrlr.listener = &listener; 2654 2655 transport.opts.zcopy = true; 2656 2657 group.thread = spdk_get_thread(); 2658 group.num_sgroups = 1; 2659 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2660 sgroups.num_ns = 1; 2661 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2662 ns_info.channel = &io_ch; 2663 sgroups.ns_info = &ns_info; 2664 TAILQ_INIT(&sgroups.queued); 2665 group.sgroups = &sgroups; 2666 TAILQ_INIT(&qpair.outstanding); 2667 2668 qpair.ctrlr = &ctrlr; 2669 qpair.group = &group; 2670 qpair.transport = &transport; 2671 qpair.qid = 1; 2672 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2673 2674 cmd.nsid = 1; 2675 2676 req.qpair = &qpair; 2677 req.cmd = (union nvmf_h2c_msg *)&cmd; 2678 req.rsp = &rsp; 2679 cmd.opc = SPDK_NVME_OPC_WRITE; 2680 2681 /* Prepare for zcopy */ 2682 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2683 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2684 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2685 CU_ASSERT(ns_info.io_outstanding == 0); 2686 2687 /* Perform the zcopy start */ 2688 spdk_nvmf_request_zcopy_start(&req); 2689 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2690 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 2691 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2692 CU_ASSERT(ns_info.io_outstanding == 1); 2693 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2694 2695 /* Perform the zcopy end */ 2696 spdk_nvmf_request_zcopy_end(&req, true); 2697 CU_ASSERT(req.zcopy_bdev_io == NULL); 2698 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2699 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2700 CU_ASSERT(ns_info.io_outstanding == 0); 2701 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2702 } 2703 2704 static void 2705 test_nvmf_property_set(void) 2706 { 2707 int rc; 2708 struct spdk_nvmf_request req = {}; 2709 struct spdk_nvmf_qpair qpair = {}; 2710 struct spdk_nvmf_ctrlr ctrlr = {}; 2711 union nvmf_h2c_msg cmd = {}; 2712 union nvmf_c2h_msg rsp = {}; 2713 2714 req.qpair = &qpair; 2715 qpair.ctrlr = &ctrlr; 2716 req.cmd = &cmd; 2717 req.rsp = &rsp; 2718 2719 /* Invalid parameters */ 2720 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2721 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs); 2722 2723 rc = nvmf_property_set(&req); 2724 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2725 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2726 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2727 2728 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms); 2729 2730 rc = nvmf_property_get(&req); 2731 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2732 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2733 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2734 2735 /* Set cc with same property size */ 2736 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2737 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc); 2738 2739 rc = nvmf_property_set(&req); 2740 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2741 2742 /* Emulate cc data */ 2743 ctrlr.vcprop.cc.raw = 0xDEADBEEF; 2744 2745 rc = nvmf_property_get(&req); 2746 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2747 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF); 2748 2749 /* Set asq with different property size */ 2750 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2751 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2752 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq); 2753 2754 rc = nvmf_property_set(&req); 2755 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2756 2757 /* Emulate asq data */ 2758 ctrlr.vcprop.asq = 0xAADDADBEEF; 2759 2760 rc = nvmf_property_get(&req); 2761 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2762 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF); 2763 } 2764 2765 int main(int argc, char **argv) 2766 { 2767 CU_pSuite suite = NULL; 2768 unsigned int num_failures; 2769 2770 CU_set_error_action(CUEA_ABORT); 2771 CU_initialize_registry(); 2772 2773 suite = CU_add_suite("nvmf", NULL, NULL); 2774 CU_ADD_TEST(suite, test_get_log_page); 2775 CU_ADD_TEST(suite, test_process_fabrics_cmd); 2776 CU_ADD_TEST(suite, test_connect); 2777 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 2778 CU_ADD_TEST(suite, test_identify_ns); 2779 CU_ADD_TEST(suite, test_reservation_write_exclusive); 2780 CU_ADD_TEST(suite, test_reservation_exclusive_access); 2781 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 2782 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 2783 CU_ADD_TEST(suite, test_reservation_notification_log_page); 2784 CU_ADD_TEST(suite, test_get_dif_ctx); 2785 CU_ADD_TEST(suite, test_set_get_features); 2786 CU_ADD_TEST(suite, test_identify_ctrlr); 2787 CU_ADD_TEST(suite, test_custom_admin_cmd); 2788 CU_ADD_TEST(suite, test_fused_compare_and_write); 2789 CU_ADD_TEST(suite, test_multi_async_event_reqs); 2790 CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp); 2791 CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp); 2792 CU_ADD_TEST(suite, test_multi_async_events); 2793 CU_ADD_TEST(suite, test_rae); 2794 CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct); 2795 CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy); 2796 CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start); 2797 CU_ADD_TEST(suite, test_zcopy_read); 2798 CU_ADD_TEST(suite, test_zcopy_write); 2799 CU_ADD_TEST(suite, test_nvmf_property_set); 2800 2801 allocate_threads(1); 2802 set_thread(0); 2803 2804 CU_basic_set_mode(CU_BRM_VERBOSE); 2805 CU_basic_run_tests(); 2806 num_failures = CU_get_number_of_failures(); 2807 CU_cleanup_registry(); 2808 2809 free_threads(); 2810 2811 return num_failures; 2812 } 2813