1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk_internal/mock.h" 38 #include "thread/thread_internal.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "nvmf/ctrlr.c" 42 43 SPDK_LOG_REGISTER_COMPONENT(nvmf) 44 45 struct spdk_bdev { 46 int ut_mock; 47 uint64_t blockcnt; 48 uint32_t blocklen; 49 }; 50 51 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 52 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 53 54 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL; 55 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *) 56 0x8877665544332211UL; 57 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL; 58 59 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 60 struct spdk_nvmf_subsystem *, 61 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 62 NULL); 63 64 DEFINE_STUB(spdk_nvmf_poll_group_create, 65 struct spdk_nvmf_poll_group *, 66 (struct spdk_nvmf_tgt *tgt), 67 NULL); 68 69 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 70 const char *, 71 (const struct spdk_nvmf_subsystem *subsystem), 72 subsystem_default_sn); 73 74 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 75 const char *, 76 (const struct spdk_nvmf_subsystem *subsystem), 77 subsystem_default_mn); 78 79 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 80 bool, 81 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 82 true); 83 84 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 85 int, 86 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 87 0); 88 89 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 90 struct spdk_nvmf_ctrlr *, 91 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 92 NULL); 93 94 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 95 bool, 96 (struct spdk_nvmf_ctrlr *ctrlr), 97 false); 98 99 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 100 bool, 101 (struct spdk_nvmf_ctrlr *ctrlr), 102 false); 103 104 DEFINE_STUB_V(nvmf_get_discovery_log_page, 105 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 106 uint32_t iovcnt, uint64_t offset, uint32_t length)); 107 108 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 109 int, 110 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 111 0); 112 113 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 114 bool, 115 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 116 true); 117 118 DEFINE_STUB(nvmf_subsystem_find_listener, 119 struct spdk_nvmf_subsystem_listener *, 120 (struct spdk_nvmf_subsystem *subsystem, 121 const struct spdk_nvme_transport_id *trid), 122 (void *)0x1); 123 124 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 125 int, 126 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 127 struct spdk_nvmf_request *req), 128 0); 129 130 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 131 int, 132 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 struct spdk_nvmf_request *req), 134 0); 135 136 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 137 int, 138 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 139 struct spdk_nvmf_request *req), 140 0); 141 142 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 143 int, 144 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 145 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 146 0); 147 148 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 149 int, 150 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 151 struct spdk_nvmf_request *req), 152 0); 153 154 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 155 int, 156 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 157 struct spdk_nvmf_request *req), 158 0); 159 160 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 161 int, 162 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 163 struct spdk_nvmf_request *req), 164 0); 165 166 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 167 int, 168 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 169 struct spdk_nvmf_request *req), 170 0); 171 172 DEFINE_STUB(nvmf_transport_req_complete, 173 int, 174 (struct spdk_nvmf_request *req), 175 0); 176 177 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 178 179 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 180 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 181 struct spdk_dif_ctx *dif_ctx), 182 true); 183 184 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 185 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 186 187 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 188 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 189 190 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 191 struct spdk_nvmf_ctrlr *ctrlr)); 192 193 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 194 int, 195 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 196 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 197 0); 198 199 DEFINE_STUB(nvmf_transport_req_free, 200 int, 201 (struct spdk_nvmf_request *req), 202 0); 203 204 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 205 int, 206 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 207 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 208 0); 209 210 int 211 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 212 { 213 return 0; 214 } 215 216 void 217 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 218 bool dif_insert_or_strip) 219 { 220 uint64_t num_blocks; 221 222 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 223 num_blocks = ns->bdev->blockcnt; 224 nsdata->nsze = num_blocks; 225 nsdata->ncap = num_blocks; 226 nsdata->nuse = num_blocks; 227 nsdata->nlbaf = 0; 228 nsdata->flbas.format = 0; 229 nsdata->lbaf[0].lbads = spdk_u32log2(512); 230 } 231 232 struct spdk_nvmf_ns * 233 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 234 { 235 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 236 return subsystem->ns[0]; 237 } 238 239 struct spdk_nvmf_ns * 240 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 241 struct spdk_nvmf_ns *prev_ns) 242 { 243 uint32_t nsid; 244 245 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 246 nsid = prev_ns->nsid; 247 248 if (nsid >= subsystem->max_nsid) { 249 return NULL; 250 } 251 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 252 if (subsystem->ns[nsid - 1]) { 253 return subsystem->ns[nsid - 1]; 254 } 255 } 256 return NULL; 257 } 258 259 bool 260 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 261 { 262 return true; 263 } 264 265 int 266 nvmf_bdev_ctrlr_start_zcopy(struct spdk_bdev *bdev, 267 struct spdk_bdev_desc *desc, 268 struct spdk_io_channel *ch, 269 struct spdk_nvmf_request *req) 270 { 271 uint64_t start_lba; 272 uint64_t num_blocks; 273 274 start_lba = from_le64(&req->cmd->nvme_cmd.cdw10); 275 num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1; 276 277 if ((start_lba + num_blocks) > bdev->blockcnt) { 278 return -ENXIO; 279 } 280 281 if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) { 282 req->zcopy_bdev_io = zcopy_start_bdev_io_write; 283 } else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) { 284 req->zcopy_bdev_io = zcopy_start_bdev_io_read; 285 } else { 286 req->zcopy_bdev_io = zcopy_start_bdev_io_fail; 287 } 288 289 290 spdk_nvmf_request_complete(req); 291 return 0; 292 } 293 294 int 295 nvmf_bdev_ctrlr_end_zcopy(struct spdk_nvmf_request *req, bool commit) 296 { 297 req->zcopy_bdev_io = NULL; 298 spdk_nvmf_request_complete(req); 299 return 0; 300 } 301 302 static void 303 test_get_log_page(void) 304 { 305 struct spdk_nvmf_subsystem subsystem = {}; 306 struct spdk_nvmf_request req = {}; 307 struct spdk_nvmf_qpair qpair = {}; 308 struct spdk_nvmf_ctrlr ctrlr = {}; 309 union nvmf_h2c_msg cmd = {}; 310 union nvmf_c2h_msg rsp = {}; 311 char data[4096]; 312 313 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 314 315 ctrlr.subsys = &subsystem; 316 317 qpair.ctrlr = &ctrlr; 318 319 req.qpair = &qpair; 320 req.cmd = &cmd; 321 req.rsp = &rsp; 322 req.data = &data; 323 req.length = sizeof(data); 324 325 /* Get Log Page - all valid */ 326 memset(&cmd, 0, sizeof(cmd)); 327 memset(&rsp, 0, sizeof(rsp)); 328 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 329 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 330 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 331 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 332 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 333 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 334 335 /* Get Log Page with invalid log ID */ 336 memset(&cmd, 0, sizeof(cmd)); 337 memset(&rsp, 0, sizeof(rsp)); 338 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 339 cmd.nvme_cmd.cdw10 = 0; 340 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 341 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 342 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 343 344 /* Get Log Page with invalid offset (not dword aligned) */ 345 memset(&cmd, 0, sizeof(cmd)); 346 memset(&rsp, 0, sizeof(rsp)); 347 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 348 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 349 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 350 cmd.nvme_cmd.cdw12 = 2; 351 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 352 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 353 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 354 355 /* Get Log Page without data buffer */ 356 memset(&cmd, 0, sizeof(cmd)); 357 memset(&rsp, 0, sizeof(rsp)); 358 req.data = NULL; 359 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 360 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 361 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 362 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 363 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 364 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 365 req.data = data; 366 } 367 368 static void 369 test_process_fabrics_cmd(void) 370 { 371 struct spdk_nvmf_request req = {}; 372 int ret; 373 struct spdk_nvmf_qpair req_qpair = {}; 374 union nvmf_h2c_msg req_cmd = {}; 375 union nvmf_c2h_msg req_rsp = {}; 376 377 req.qpair = &req_qpair; 378 req.cmd = &req_cmd; 379 req.rsp = &req_rsp; 380 req.qpair->ctrlr = NULL; 381 382 /* No ctrlr and invalid command check */ 383 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 384 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 385 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 386 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 387 } 388 389 static bool 390 nvme_status_success(const struct spdk_nvme_status *status) 391 { 392 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 393 } 394 395 static void 396 test_connect(void) 397 { 398 struct spdk_nvmf_fabric_connect_data connect_data; 399 struct spdk_nvmf_poll_group group; 400 struct spdk_nvmf_subsystem_poll_group *sgroups; 401 struct spdk_nvmf_transport transport; 402 struct spdk_nvmf_transport_ops tops = {}; 403 struct spdk_nvmf_subsystem subsystem; 404 struct spdk_nvmf_request req; 405 struct spdk_nvmf_qpair admin_qpair; 406 struct spdk_nvmf_qpair qpair; 407 struct spdk_nvmf_qpair qpair2; 408 struct spdk_nvmf_ctrlr ctrlr; 409 struct spdk_nvmf_tgt tgt; 410 union nvmf_h2c_msg cmd; 411 union nvmf_c2h_msg rsp; 412 const uint8_t hostid[16] = { 413 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 414 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 415 }; 416 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 417 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 418 int rc; 419 420 memset(&group, 0, sizeof(group)); 421 group.thread = spdk_get_thread(); 422 423 memset(&ctrlr, 0, sizeof(ctrlr)); 424 ctrlr.subsys = &subsystem; 425 ctrlr.qpair_mask = spdk_bit_array_create(3); 426 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 427 ctrlr.vcprop.cc.bits.en = 1; 428 ctrlr.vcprop.cc.bits.iosqes = 6; 429 ctrlr.vcprop.cc.bits.iocqes = 4; 430 431 memset(&admin_qpair, 0, sizeof(admin_qpair)); 432 admin_qpair.group = &group; 433 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 434 435 memset(&tgt, 0, sizeof(tgt)); 436 memset(&transport, 0, sizeof(transport)); 437 transport.ops = &tops; 438 transport.opts.max_aq_depth = 32; 439 transport.opts.max_queue_depth = 64; 440 transport.opts.max_qpairs_per_ctrlr = 3; 441 transport.tgt = &tgt; 442 443 memset(&qpair, 0, sizeof(qpair)); 444 qpair.transport = &transport; 445 qpair.group = &group; 446 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 447 TAILQ_INIT(&qpair.outstanding); 448 449 memset(&connect_data, 0, sizeof(connect_data)); 450 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 451 connect_data.cntlid = 0xFFFF; 452 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 453 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 454 455 memset(&subsystem, 0, sizeof(subsystem)); 456 subsystem.thread = spdk_get_thread(); 457 subsystem.id = 1; 458 TAILQ_INIT(&subsystem.ctrlrs); 459 subsystem.tgt = &tgt; 460 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 461 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 462 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 463 464 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 465 group.sgroups = sgroups; 466 467 memset(&cmd, 0, sizeof(cmd)); 468 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 469 cmd.connect_cmd.cid = 1; 470 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 471 cmd.connect_cmd.recfmt = 0; 472 cmd.connect_cmd.qid = 0; 473 cmd.connect_cmd.sqsize = 31; 474 cmd.connect_cmd.cattr = 0; 475 cmd.connect_cmd.kato = 120000; 476 477 memset(&req, 0, sizeof(req)); 478 req.qpair = &qpair; 479 req.length = sizeof(connect_data); 480 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 481 req.data = &connect_data; 482 req.cmd = &cmd; 483 req.rsp = &rsp; 484 485 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 486 MOCK_SET(spdk_nvmf_poll_group_create, &group); 487 488 /* Valid admin connect command */ 489 memset(&rsp, 0, sizeof(rsp)); 490 sgroups[subsystem.id].mgmt_io_outstanding++; 491 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 492 rc = nvmf_ctrlr_cmd_connect(&req); 493 poll_threads(); 494 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 495 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 496 CU_ASSERT(qpair.ctrlr != NULL); 497 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 498 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 499 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 500 free(qpair.ctrlr); 501 qpair.ctrlr = NULL; 502 503 /* Valid admin connect command with kato = 0 */ 504 cmd.connect_cmd.kato = 0; 505 memset(&rsp, 0, sizeof(rsp)); 506 sgroups[subsystem.id].mgmt_io_outstanding++; 507 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 508 rc = nvmf_ctrlr_cmd_connect(&req); 509 poll_threads(); 510 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 511 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 512 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 513 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 514 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 515 free(qpair.ctrlr); 516 qpair.ctrlr = NULL; 517 cmd.connect_cmd.kato = 120000; 518 519 /* Invalid data length */ 520 memset(&rsp, 0, sizeof(rsp)); 521 req.length = sizeof(connect_data) - 1; 522 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 523 rc = nvmf_ctrlr_cmd_connect(&req); 524 poll_threads(); 525 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 526 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 527 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 528 CU_ASSERT(qpair.ctrlr == NULL); 529 req.length = sizeof(connect_data); 530 531 /* Invalid recfmt */ 532 memset(&rsp, 0, sizeof(rsp)); 533 cmd.connect_cmd.recfmt = 1234; 534 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 535 rc = nvmf_ctrlr_cmd_connect(&req); 536 poll_threads(); 537 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 538 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 539 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 540 CU_ASSERT(qpair.ctrlr == NULL); 541 cmd.connect_cmd.recfmt = 0; 542 543 /* Subsystem not found */ 544 memset(&rsp, 0, sizeof(rsp)); 545 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 546 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 547 rc = nvmf_ctrlr_cmd_connect(&req); 548 poll_threads(); 549 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 550 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 551 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 552 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 553 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 554 CU_ASSERT(qpair.ctrlr == NULL); 555 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 556 557 /* Unterminated hostnqn */ 558 memset(&rsp, 0, sizeof(rsp)); 559 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 560 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 561 rc = nvmf_ctrlr_cmd_connect(&req); 562 poll_threads(); 563 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 564 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 565 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 566 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 567 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 568 CU_ASSERT(qpair.ctrlr == NULL); 569 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 570 571 /* Host not allowed */ 572 memset(&rsp, 0, sizeof(rsp)); 573 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 574 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 575 rc = nvmf_ctrlr_cmd_connect(&req); 576 poll_threads(); 577 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 578 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 579 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 580 CU_ASSERT(qpair.ctrlr == NULL); 581 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 582 583 /* Invalid sqsize == 0 */ 584 memset(&rsp, 0, sizeof(rsp)); 585 cmd.connect_cmd.sqsize = 0; 586 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 587 rc = nvmf_ctrlr_cmd_connect(&req); 588 poll_threads(); 589 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 590 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 591 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 592 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 593 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 594 CU_ASSERT(qpair.ctrlr == NULL); 595 cmd.connect_cmd.sqsize = 31; 596 597 /* Invalid admin sqsize > max_aq_depth */ 598 memset(&rsp, 0, sizeof(rsp)); 599 cmd.connect_cmd.sqsize = 32; 600 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 601 rc = nvmf_ctrlr_cmd_connect(&req); 602 poll_threads(); 603 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 604 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 605 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 606 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 607 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 608 CU_ASSERT(qpair.ctrlr == NULL); 609 cmd.connect_cmd.sqsize = 31; 610 611 /* Invalid I/O sqsize > max_queue_depth */ 612 memset(&rsp, 0, sizeof(rsp)); 613 cmd.connect_cmd.qid = 1; 614 cmd.connect_cmd.sqsize = 64; 615 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 616 rc = nvmf_ctrlr_cmd_connect(&req); 617 poll_threads(); 618 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 619 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 620 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 621 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 622 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 623 CU_ASSERT(qpair.ctrlr == NULL); 624 cmd.connect_cmd.qid = 0; 625 cmd.connect_cmd.sqsize = 31; 626 627 /* Invalid cntlid for admin queue */ 628 memset(&rsp, 0, sizeof(rsp)); 629 connect_data.cntlid = 0x1234; 630 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 631 rc = nvmf_ctrlr_cmd_connect(&req); 632 poll_threads(); 633 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 634 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 635 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 636 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 637 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 638 CU_ASSERT(qpair.ctrlr == NULL); 639 connect_data.cntlid = 0xFFFF; 640 641 ctrlr.admin_qpair = &admin_qpair; 642 ctrlr.subsys = &subsystem; 643 644 /* Valid I/O queue connect command */ 645 memset(&rsp, 0, sizeof(rsp)); 646 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 647 cmd.connect_cmd.qid = 1; 648 cmd.connect_cmd.sqsize = 63; 649 sgroups[subsystem.id].mgmt_io_outstanding++; 650 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 651 rc = nvmf_ctrlr_cmd_connect(&req); 652 poll_threads(); 653 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 654 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 655 CU_ASSERT(qpair.ctrlr == &ctrlr); 656 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 657 qpair.ctrlr = NULL; 658 cmd.connect_cmd.sqsize = 31; 659 660 /* Non-existent controller */ 661 memset(&rsp, 0, sizeof(rsp)); 662 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 663 sgroups[subsystem.id].mgmt_io_outstanding++; 664 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 665 rc = nvmf_ctrlr_cmd_connect(&req); 666 poll_threads(); 667 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 668 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 669 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 670 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 671 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 672 CU_ASSERT(qpair.ctrlr == NULL); 673 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 674 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 675 676 /* I/O connect to discovery controller */ 677 memset(&rsp, 0, sizeof(rsp)); 678 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 679 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 680 sgroups[subsystem.id].mgmt_io_outstanding++; 681 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 682 rc = nvmf_ctrlr_cmd_connect(&req); 683 poll_threads(); 684 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 685 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 686 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 687 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 688 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 689 CU_ASSERT(qpair.ctrlr == NULL); 690 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 691 692 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 693 cmd.connect_cmd.qid = 0; 694 cmd.connect_cmd.kato = 120000; 695 memset(&rsp, 0, sizeof(rsp)); 696 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 697 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 698 sgroups[subsystem.id].mgmt_io_outstanding++; 699 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 700 rc = nvmf_ctrlr_cmd_connect(&req); 701 poll_threads(); 702 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 703 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 704 CU_ASSERT(qpair.ctrlr != NULL); 705 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 706 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 707 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 708 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 709 free(qpair.ctrlr); 710 qpair.ctrlr = NULL; 711 712 /* I/O connect to discovery controller with keep-alive-timeout == 0. 713 * Then, a fixed timeout value is set to keep-alive-timeout. 714 */ 715 cmd.connect_cmd.kato = 0; 716 memset(&rsp, 0, sizeof(rsp)); 717 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 718 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 719 sgroups[subsystem.id].mgmt_io_outstanding++; 720 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 721 rc = nvmf_ctrlr_cmd_connect(&req); 722 poll_threads(); 723 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 724 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 725 CU_ASSERT(qpair.ctrlr != NULL); 726 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 727 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 728 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 729 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 730 free(qpair.ctrlr); 731 qpair.ctrlr = NULL; 732 cmd.connect_cmd.qid = 1; 733 cmd.connect_cmd.kato = 120000; 734 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 735 736 /* I/O connect to disabled controller */ 737 memset(&rsp, 0, sizeof(rsp)); 738 ctrlr.vcprop.cc.bits.en = 0; 739 sgroups[subsystem.id].mgmt_io_outstanding++; 740 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 741 rc = nvmf_ctrlr_cmd_connect(&req); 742 poll_threads(); 743 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 744 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 745 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 746 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 747 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 748 CU_ASSERT(qpair.ctrlr == NULL); 749 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 750 ctrlr.vcprop.cc.bits.en = 1; 751 752 /* I/O connect with invalid IOSQES */ 753 memset(&rsp, 0, sizeof(rsp)); 754 ctrlr.vcprop.cc.bits.iosqes = 3; 755 sgroups[subsystem.id].mgmt_io_outstanding++; 756 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 757 rc = nvmf_ctrlr_cmd_connect(&req); 758 poll_threads(); 759 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 760 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 761 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 762 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 763 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 764 CU_ASSERT(qpair.ctrlr == NULL); 765 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 766 ctrlr.vcprop.cc.bits.iosqes = 6; 767 768 /* I/O connect with invalid IOCQES */ 769 memset(&rsp, 0, sizeof(rsp)); 770 ctrlr.vcprop.cc.bits.iocqes = 3; 771 sgroups[subsystem.id].mgmt_io_outstanding++; 772 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 773 rc = nvmf_ctrlr_cmd_connect(&req); 774 poll_threads(); 775 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 776 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 777 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 778 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 779 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 780 CU_ASSERT(qpair.ctrlr == NULL); 781 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 782 ctrlr.vcprop.cc.bits.iocqes = 4; 783 784 /* I/O connect with too many existing qpairs */ 785 memset(&rsp, 0, sizeof(rsp)); 786 spdk_bit_array_set(ctrlr.qpair_mask, 0); 787 spdk_bit_array_set(ctrlr.qpair_mask, 1); 788 spdk_bit_array_set(ctrlr.qpair_mask, 2); 789 sgroups[subsystem.id].mgmt_io_outstanding++; 790 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 791 rc = nvmf_ctrlr_cmd_connect(&req); 792 poll_threads(); 793 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 794 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 795 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 796 CU_ASSERT(qpair.ctrlr == NULL); 797 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 798 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 799 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 800 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 801 802 /* I/O connect with duplicate queue ID */ 803 memset(&rsp, 0, sizeof(rsp)); 804 memset(&qpair2, 0, sizeof(qpair2)); 805 qpair2.group = &group; 806 qpair2.qid = 1; 807 spdk_bit_array_set(ctrlr.qpair_mask, 1); 808 cmd.connect_cmd.qid = 1; 809 sgroups[subsystem.id].mgmt_io_outstanding++; 810 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 811 rc = nvmf_ctrlr_cmd_connect(&req); 812 poll_threads(); 813 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 814 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 815 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 816 CU_ASSERT(qpair.ctrlr == NULL); 817 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 818 819 /* I/O connect when admin qpair is being destroyed */ 820 admin_qpair.group = NULL; 821 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 822 memset(&rsp, 0, sizeof(rsp)); 823 sgroups[subsystem.id].mgmt_io_outstanding++; 824 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 825 rc = nvmf_ctrlr_cmd_connect(&req); 826 poll_threads(); 827 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 828 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 829 CU_ASSERT(qpair.ctrlr == NULL); 830 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 831 admin_qpair.group = &group; 832 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 833 834 /* Clean up globals */ 835 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 836 MOCK_CLEAR(spdk_nvmf_poll_group_create); 837 838 spdk_bit_array_free(&ctrlr.qpair_mask); 839 free(sgroups); 840 } 841 842 static void 843 test_get_ns_id_desc_list(void) 844 { 845 struct spdk_nvmf_subsystem subsystem; 846 struct spdk_nvmf_qpair qpair; 847 struct spdk_nvmf_ctrlr ctrlr; 848 struct spdk_nvmf_request req; 849 struct spdk_nvmf_ns *ns_ptrs[1]; 850 struct spdk_nvmf_ns ns; 851 union nvmf_h2c_msg cmd; 852 union nvmf_c2h_msg rsp; 853 struct spdk_bdev bdev; 854 uint8_t buf[4096]; 855 856 memset(&subsystem, 0, sizeof(subsystem)); 857 ns_ptrs[0] = &ns; 858 subsystem.ns = ns_ptrs; 859 subsystem.max_nsid = 1; 860 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 861 862 memset(&ns, 0, sizeof(ns)); 863 ns.opts.nsid = 1; 864 ns.bdev = &bdev; 865 866 memset(&qpair, 0, sizeof(qpair)); 867 qpair.ctrlr = &ctrlr; 868 869 memset(&ctrlr, 0, sizeof(ctrlr)); 870 ctrlr.subsys = &subsystem; 871 ctrlr.vcprop.cc.bits.en = 1; 872 873 memset(&req, 0, sizeof(req)); 874 req.qpair = &qpair; 875 req.cmd = &cmd; 876 req.rsp = &rsp; 877 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 878 req.data = buf; 879 req.length = sizeof(buf); 880 881 memset(&cmd, 0, sizeof(cmd)); 882 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 883 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 884 885 /* Invalid NSID */ 886 cmd.nvme_cmd.nsid = 0; 887 memset(&rsp, 0, sizeof(rsp)); 888 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 889 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 890 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 891 892 /* Valid NSID, but ns has no IDs defined */ 893 cmd.nvme_cmd.nsid = 1; 894 memset(&rsp, 0, sizeof(rsp)); 895 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 896 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 897 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 898 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 899 900 /* Valid NSID, only EUI64 defined */ 901 ns.opts.eui64[0] = 0x11; 902 ns.opts.eui64[7] = 0xFF; 903 memset(&rsp, 0, sizeof(rsp)); 904 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 905 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 906 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 907 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 908 CU_ASSERT(buf[1] == 8); 909 CU_ASSERT(buf[4] == 0x11); 910 CU_ASSERT(buf[11] == 0xFF); 911 CU_ASSERT(buf[13] == 0); 912 913 /* Valid NSID, only NGUID defined */ 914 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 915 ns.opts.nguid[0] = 0x22; 916 ns.opts.nguid[15] = 0xEE; 917 memset(&rsp, 0, sizeof(rsp)); 918 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 919 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 920 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 921 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 922 CU_ASSERT(buf[1] == 16); 923 CU_ASSERT(buf[4] == 0x22); 924 CU_ASSERT(buf[19] == 0xEE); 925 CU_ASSERT(buf[21] == 0); 926 927 /* Valid NSID, both EUI64 and NGUID defined */ 928 ns.opts.eui64[0] = 0x11; 929 ns.opts.eui64[7] = 0xFF; 930 ns.opts.nguid[0] = 0x22; 931 ns.opts.nguid[15] = 0xEE; 932 memset(&rsp, 0, sizeof(rsp)); 933 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 934 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 935 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 936 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 937 CU_ASSERT(buf[1] == 8); 938 CU_ASSERT(buf[4] == 0x11); 939 CU_ASSERT(buf[11] == 0xFF); 940 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 941 CU_ASSERT(buf[13] == 16); 942 CU_ASSERT(buf[16] == 0x22); 943 CU_ASSERT(buf[31] == 0xEE); 944 CU_ASSERT(buf[33] == 0); 945 946 /* Valid NSID, EUI64, NGUID, and UUID defined */ 947 ns.opts.eui64[0] = 0x11; 948 ns.opts.eui64[7] = 0xFF; 949 ns.opts.nguid[0] = 0x22; 950 ns.opts.nguid[15] = 0xEE; 951 ns.opts.uuid.u.raw[0] = 0x33; 952 ns.opts.uuid.u.raw[15] = 0xDD; 953 memset(&rsp, 0, sizeof(rsp)); 954 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 955 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 956 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 957 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 958 CU_ASSERT(buf[1] == 8); 959 CU_ASSERT(buf[4] == 0x11); 960 CU_ASSERT(buf[11] == 0xFF); 961 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 962 CU_ASSERT(buf[13] == 16); 963 CU_ASSERT(buf[16] == 0x22); 964 CU_ASSERT(buf[31] == 0xEE); 965 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 966 CU_ASSERT(buf[33] == 16); 967 CU_ASSERT(buf[36] == 0x33); 968 CU_ASSERT(buf[51] == 0xDD); 969 CU_ASSERT(buf[53] == 0); 970 } 971 972 static void 973 test_identify_ns(void) 974 { 975 struct spdk_nvmf_subsystem subsystem = {}; 976 struct spdk_nvmf_transport transport = {}; 977 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 978 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 979 struct spdk_nvme_cmd cmd = {}; 980 struct spdk_nvme_cpl rsp = {}; 981 struct spdk_nvme_ns_data nsdata = {}; 982 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 983 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 984 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 985 986 subsystem.ns = ns_arr; 987 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 988 989 /* Invalid NSID 0 */ 990 cmd.nsid = 0; 991 memset(&nsdata, 0, sizeof(nsdata)); 992 memset(&rsp, 0, sizeof(rsp)); 993 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 994 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 995 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 996 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 997 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 998 999 /* Valid NSID 1 */ 1000 cmd.nsid = 1; 1001 memset(&nsdata, 0, sizeof(nsdata)); 1002 memset(&rsp, 0, sizeof(rsp)); 1003 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1004 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1005 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1006 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1007 CU_ASSERT(nsdata.nsze == 1234); 1008 1009 /* Valid but inactive NSID 2 */ 1010 cmd.nsid = 2; 1011 memset(&nsdata, 0, sizeof(nsdata)); 1012 memset(&rsp, 0, sizeof(rsp)); 1013 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1014 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1015 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1016 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1017 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1018 1019 /* Valid NSID 3 */ 1020 cmd.nsid = 3; 1021 memset(&nsdata, 0, sizeof(nsdata)); 1022 memset(&rsp, 0, sizeof(rsp)); 1023 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1024 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1025 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1026 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1027 CU_ASSERT(nsdata.nsze == 5678); 1028 1029 /* Invalid NSID 4 */ 1030 cmd.nsid = 4; 1031 memset(&nsdata, 0, sizeof(nsdata)); 1032 memset(&rsp, 0, sizeof(rsp)); 1033 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1034 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1035 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1036 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1037 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1038 1039 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 1040 cmd.nsid = 0xFFFFFFFF; 1041 memset(&nsdata, 0, sizeof(nsdata)); 1042 memset(&rsp, 0, sizeof(rsp)); 1043 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1044 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1045 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1046 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1047 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1048 } 1049 1050 static void 1051 test_set_get_features(void) 1052 { 1053 struct spdk_nvmf_subsystem subsystem = {}; 1054 struct spdk_nvmf_qpair admin_qpair = {}; 1055 enum spdk_nvme_ana_state ana_state[3]; 1056 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1057 struct spdk_nvmf_ctrlr ctrlr = { 1058 .subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener 1059 }; 1060 union nvmf_h2c_msg cmd = {}; 1061 union nvmf_c2h_msg rsp = {}; 1062 struct spdk_nvmf_ns ns[3]; 1063 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1064 struct spdk_nvmf_request req; 1065 int rc; 1066 1067 ns[0].anagrpid = 1; 1068 ns[2].anagrpid = 3; 1069 subsystem.ns = ns_arr; 1070 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1071 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1072 listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1073 admin_qpair.ctrlr = &ctrlr; 1074 req.qpair = &admin_qpair; 1075 cmd.nvme_cmd.nsid = 1; 1076 req.cmd = &cmd; 1077 req.rsp = &rsp; 1078 1079 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1080 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1081 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1082 ns[0].ptpl_file = "testcfg"; 1083 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1084 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1085 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1086 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1087 CU_ASSERT(ns[0].ptpl_activated == true); 1088 1089 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1090 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1091 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1092 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1093 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1094 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1095 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1096 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1097 1098 1099 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1100 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1101 cmd.nvme_cmd.cdw11 = 0x42; 1102 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1103 1104 rc = nvmf_ctrlr_get_features(&req); 1105 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1106 1107 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1108 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1109 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1110 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1111 1112 rc = nvmf_ctrlr_get_features(&req); 1113 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1114 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1115 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1116 1117 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1118 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1119 cmd.nvme_cmd.cdw11 = 0x42; 1120 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1121 1122 rc = nvmf_ctrlr_set_features(&req); 1123 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1124 1125 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1126 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1127 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1128 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1129 1130 rc = nvmf_ctrlr_set_features(&req); 1131 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1132 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1133 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1134 1135 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1136 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1137 cmd.nvme_cmd.cdw11 = 0x42; 1138 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1139 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1140 1141 rc = nvmf_ctrlr_set_features(&req); 1142 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1143 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1144 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1145 1146 1147 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1148 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1149 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1150 1151 rc = nvmf_ctrlr_get_features(&req); 1152 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1153 1154 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1155 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1156 cmd.nvme_cmd.cdw11 = 0x42; 1157 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1158 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1159 1160 rc = nvmf_ctrlr_set_features(&req); 1161 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1162 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1163 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1164 1165 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1166 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1167 cmd.nvme_cmd.cdw11 = 0x42; 1168 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1169 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1170 1171 rc = nvmf_ctrlr_set_features(&req); 1172 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1173 } 1174 1175 /* 1176 * Reservation Unit Test Configuration 1177 * -------- -------- -------- 1178 * | Host A | | Host B | | Host C | 1179 * -------- -------- -------- 1180 * / \ | | 1181 * -------- -------- ------- ------- 1182 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1183 * -------- -------- ------- ------- 1184 * \ \ / / 1185 * \ \ / / 1186 * \ \ / / 1187 * -------------------------------------- 1188 * | NAMESPACE 1 | 1189 * -------------------------------------- 1190 */ 1191 1192 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1193 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1194 1195 static void 1196 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1197 { 1198 /* Host A has two controllers */ 1199 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1200 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1201 1202 /* Host B has 1 controller */ 1203 spdk_uuid_generate(&g_ctrlr_B.hostid); 1204 1205 /* Host C has 1 controller */ 1206 spdk_uuid_generate(&g_ctrlr_C.hostid); 1207 1208 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1209 g_ns_info.rtype = rtype; 1210 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1211 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1212 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1213 } 1214 1215 static void 1216 test_reservation_write_exclusive(void) 1217 { 1218 struct spdk_nvmf_request req = {}; 1219 union nvmf_h2c_msg cmd = {}; 1220 union nvmf_c2h_msg rsp = {}; 1221 int rc; 1222 1223 req.cmd = &cmd; 1224 req.rsp = &rsp; 1225 1226 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1227 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1228 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1229 1230 /* Test Case: Issue a Read command from Host A and Host B */ 1231 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1232 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1233 SPDK_CU_ASSERT_FATAL(rc == 0); 1234 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1235 SPDK_CU_ASSERT_FATAL(rc == 0); 1236 1237 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1238 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1239 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1240 SPDK_CU_ASSERT_FATAL(rc == 0); 1241 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1242 SPDK_CU_ASSERT_FATAL(rc < 0); 1243 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1244 1245 /* Test Case: Issue a Write command from Host C */ 1246 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1247 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1248 SPDK_CU_ASSERT_FATAL(rc < 0); 1249 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1250 1251 /* Test Case: Issue a Read command from Host B */ 1252 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1253 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1254 SPDK_CU_ASSERT_FATAL(rc == 0); 1255 1256 /* Unregister Host C */ 1257 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1258 1259 /* Test Case: Read and Write commands from non-registrant Host C */ 1260 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1261 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1262 SPDK_CU_ASSERT_FATAL(rc < 0); 1263 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1264 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1265 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1266 SPDK_CU_ASSERT_FATAL(rc == 0); 1267 } 1268 1269 static void 1270 test_reservation_exclusive_access(void) 1271 { 1272 struct spdk_nvmf_request req = {}; 1273 union nvmf_h2c_msg cmd = {}; 1274 union nvmf_c2h_msg rsp = {}; 1275 int rc; 1276 1277 req.cmd = &cmd; 1278 req.rsp = &rsp; 1279 1280 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1281 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1282 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1283 1284 /* Test Case: Issue a Read command from Host B */ 1285 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1286 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1287 SPDK_CU_ASSERT_FATAL(rc < 0); 1288 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1289 1290 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1291 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1292 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1293 SPDK_CU_ASSERT_FATAL(rc == 0); 1294 } 1295 1296 static void 1297 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1298 { 1299 struct spdk_nvmf_request req = {}; 1300 union nvmf_h2c_msg cmd = {}; 1301 union nvmf_c2h_msg rsp = {}; 1302 int rc; 1303 1304 req.cmd = &cmd; 1305 req.rsp = &rsp; 1306 1307 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1308 ut_reservation_init(rtype); 1309 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1310 1311 /* Test Case: Issue a Read command from Host A and Host C */ 1312 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1313 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1314 SPDK_CU_ASSERT_FATAL(rc == 0); 1315 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1316 SPDK_CU_ASSERT_FATAL(rc == 0); 1317 1318 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1319 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1320 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1321 SPDK_CU_ASSERT_FATAL(rc == 0); 1322 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1323 SPDK_CU_ASSERT_FATAL(rc == 0); 1324 1325 /* Unregister Host C */ 1326 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1327 1328 /* Test Case: Read and Write commands from non-registrant Host C */ 1329 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1330 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1331 SPDK_CU_ASSERT_FATAL(rc == 0); 1332 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1333 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1334 SPDK_CU_ASSERT_FATAL(rc < 0); 1335 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1336 } 1337 1338 static void 1339 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1340 { 1341 _test_reservation_write_exclusive_regs_only_and_all_regs( 1342 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1343 _test_reservation_write_exclusive_regs_only_and_all_regs( 1344 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1345 } 1346 1347 static void 1348 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1349 { 1350 struct spdk_nvmf_request req = {}; 1351 union nvmf_h2c_msg cmd = {}; 1352 union nvmf_c2h_msg rsp = {}; 1353 int rc; 1354 1355 req.cmd = &cmd; 1356 req.rsp = &rsp; 1357 1358 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1359 ut_reservation_init(rtype); 1360 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1361 1362 /* Test Case: Issue a Write command from Host B */ 1363 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1364 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1365 SPDK_CU_ASSERT_FATAL(rc == 0); 1366 1367 /* Unregister Host B */ 1368 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1369 1370 /* Test Case: Issue a Read command from Host B */ 1371 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1372 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1373 SPDK_CU_ASSERT_FATAL(rc < 0); 1374 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1375 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1376 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1377 SPDK_CU_ASSERT_FATAL(rc < 0); 1378 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1379 } 1380 1381 static void 1382 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1383 { 1384 _test_reservation_exclusive_access_regs_only_and_all_regs( 1385 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1386 _test_reservation_exclusive_access_regs_only_and_all_regs( 1387 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1388 } 1389 1390 static void 1391 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1392 { 1393 STAILQ_INIT(&ctrlr->async_events); 1394 } 1395 1396 static void 1397 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1398 { 1399 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1400 1401 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1402 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1403 free(event); 1404 } 1405 } 1406 1407 static int 1408 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1409 { 1410 int num = 0; 1411 struct spdk_nvmf_async_event_completion *event; 1412 1413 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1414 num++; 1415 } 1416 return num; 1417 } 1418 1419 static void 1420 test_reservation_notification_log_page(void) 1421 { 1422 struct spdk_nvmf_ctrlr ctrlr; 1423 struct spdk_nvmf_qpair qpair; 1424 struct spdk_nvmf_ns ns; 1425 struct spdk_nvmf_request req = {}; 1426 union nvmf_h2c_msg cmd = {}; 1427 union nvmf_c2h_msg rsp = {}; 1428 union spdk_nvme_async_event_completion event = {}; 1429 struct spdk_nvme_reservation_notification_log logs[3]; 1430 struct iovec iov; 1431 1432 memset(&ctrlr, 0, sizeof(ctrlr)); 1433 ctrlr.thread = spdk_get_thread(); 1434 TAILQ_INIT(&ctrlr.log_head); 1435 init_pending_async_events(&ctrlr); 1436 ns.nsid = 1; 1437 1438 /* Test Case: Mask all the reservation notifications */ 1439 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1440 SPDK_NVME_RESERVATION_RELEASED_MASK | 1441 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1442 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1443 SPDK_NVME_REGISTRATION_PREEMPTED); 1444 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1445 SPDK_NVME_RESERVATION_RELEASED); 1446 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1447 SPDK_NVME_RESERVATION_PREEMPTED); 1448 poll_threads(); 1449 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1450 1451 /* Test Case: Unmask all the reservation notifications, 1452 * 3 log pages are generated, and AER was triggered. 1453 */ 1454 ns.mask = 0; 1455 ctrlr.num_avail_log_pages = 0; 1456 req.cmd = &cmd; 1457 req.rsp = &rsp; 1458 ctrlr.aer_req[0] = &req; 1459 ctrlr.nr_aer_reqs = 1; 1460 req.qpair = &qpair; 1461 TAILQ_INIT(&qpair.outstanding); 1462 qpair.ctrlr = NULL; 1463 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1464 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1465 1466 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1467 SPDK_NVME_REGISTRATION_PREEMPTED); 1468 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1469 SPDK_NVME_RESERVATION_RELEASED); 1470 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1471 SPDK_NVME_RESERVATION_PREEMPTED); 1472 poll_threads(); 1473 event.raw = rsp.nvme_cpl.cdw0; 1474 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1475 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1476 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1477 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1478 1479 /* Test Case: Get Log Page to clear the log pages */ 1480 iov.iov_base = &logs[0]; 1481 iov.iov_len = sizeof(logs); 1482 nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0); 1483 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1484 1485 cleanup_pending_async_events(&ctrlr); 1486 } 1487 1488 static void 1489 test_get_dif_ctx(void) 1490 { 1491 struct spdk_nvmf_subsystem subsystem = {}; 1492 struct spdk_nvmf_request req = {}; 1493 struct spdk_nvmf_qpair qpair = {}; 1494 struct spdk_nvmf_ctrlr ctrlr = {}; 1495 struct spdk_nvmf_ns ns = {}; 1496 struct spdk_nvmf_ns *_ns = NULL; 1497 struct spdk_bdev bdev = {}; 1498 union nvmf_h2c_msg cmd = {}; 1499 struct spdk_dif_ctx dif_ctx = {}; 1500 bool ret; 1501 1502 ctrlr.subsys = &subsystem; 1503 1504 qpair.ctrlr = &ctrlr; 1505 1506 req.qpair = &qpair; 1507 req.cmd = &cmd; 1508 1509 ns.bdev = &bdev; 1510 1511 ctrlr.dif_insert_or_strip = false; 1512 1513 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1514 CU_ASSERT(ret == false); 1515 1516 ctrlr.dif_insert_or_strip = true; 1517 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1518 1519 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1520 CU_ASSERT(ret == false); 1521 1522 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1523 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1524 1525 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1526 CU_ASSERT(ret == false); 1527 1528 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1529 1530 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1531 CU_ASSERT(ret == false); 1532 1533 qpair.qid = 1; 1534 1535 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1536 CU_ASSERT(ret == false); 1537 1538 cmd.nvme_cmd.nsid = 1; 1539 1540 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1541 CU_ASSERT(ret == false); 1542 1543 subsystem.max_nsid = 1; 1544 subsystem.ns = &_ns; 1545 subsystem.ns[0] = &ns; 1546 1547 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1548 CU_ASSERT(ret == false); 1549 1550 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1551 1552 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1553 CU_ASSERT(ret == true); 1554 } 1555 1556 static void 1557 test_identify_ctrlr(void) 1558 { 1559 struct spdk_nvmf_tgt tgt = {}; 1560 struct spdk_nvmf_subsystem subsystem = { 1561 .subtype = SPDK_NVMF_SUBTYPE_NVME, 1562 .tgt = &tgt, 1563 }; 1564 struct spdk_nvmf_transport_ops tops = {}; 1565 struct spdk_nvmf_transport transport = { 1566 .ops = &tops, 1567 .opts = { 1568 .in_capsule_data_size = 4096, 1569 }, 1570 }; 1571 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1572 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1573 struct spdk_nvme_ctrlr_data cdata = {}; 1574 uint32_t expected_ioccsz; 1575 1576 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1577 1578 /* Check ioccsz, TCP transport */ 1579 tops.type = SPDK_NVME_TRANSPORT_TCP; 1580 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1581 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1582 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1583 1584 /* Check ioccsz, RDMA transport */ 1585 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1586 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1587 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1588 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1589 1590 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1591 tops.type = SPDK_NVME_TRANSPORT_TCP; 1592 ctrlr.dif_insert_or_strip = true; 1593 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1594 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1595 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1596 } 1597 1598 static int 1599 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1600 { 1601 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1602 1603 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1604 }; 1605 1606 static void 1607 test_custom_admin_cmd(void) 1608 { 1609 struct spdk_nvmf_subsystem subsystem; 1610 struct spdk_nvmf_qpair qpair; 1611 struct spdk_nvmf_ctrlr ctrlr; 1612 struct spdk_nvmf_request req; 1613 struct spdk_nvmf_ns *ns_ptrs[1]; 1614 struct spdk_nvmf_ns ns; 1615 union nvmf_h2c_msg cmd; 1616 union nvmf_c2h_msg rsp; 1617 struct spdk_bdev bdev; 1618 uint8_t buf[4096]; 1619 int rc; 1620 1621 memset(&subsystem, 0, sizeof(subsystem)); 1622 ns_ptrs[0] = &ns; 1623 subsystem.ns = ns_ptrs; 1624 subsystem.max_nsid = 1; 1625 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1626 1627 memset(&ns, 0, sizeof(ns)); 1628 ns.opts.nsid = 1; 1629 ns.bdev = &bdev; 1630 1631 memset(&qpair, 0, sizeof(qpair)); 1632 qpair.ctrlr = &ctrlr; 1633 1634 memset(&ctrlr, 0, sizeof(ctrlr)); 1635 ctrlr.subsys = &subsystem; 1636 ctrlr.vcprop.cc.bits.en = 1; 1637 1638 memset(&req, 0, sizeof(req)); 1639 req.qpair = &qpair; 1640 req.cmd = &cmd; 1641 req.rsp = &rsp; 1642 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1643 req.data = buf; 1644 req.length = sizeof(buf); 1645 1646 memset(&cmd, 0, sizeof(cmd)); 1647 cmd.nvme_cmd.opc = 0xc1; 1648 cmd.nvme_cmd.nsid = 0; 1649 memset(&rsp, 0, sizeof(rsp)); 1650 1651 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1652 1653 /* Ensure that our hdlr is being called */ 1654 rc = nvmf_ctrlr_process_admin_cmd(&req); 1655 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1656 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1657 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1658 } 1659 1660 static void 1661 test_fused_compare_and_write(void) 1662 { 1663 struct spdk_nvmf_request req = {}; 1664 struct spdk_nvmf_qpair qpair = {}; 1665 struct spdk_nvme_cmd cmd = {}; 1666 union nvmf_c2h_msg rsp = {}; 1667 struct spdk_nvmf_ctrlr ctrlr = {}; 1668 struct spdk_nvmf_subsystem subsystem = {}; 1669 struct spdk_nvmf_ns ns = {}; 1670 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1671 enum spdk_nvme_ana_state ana_state[1]; 1672 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1673 struct spdk_bdev bdev = {}; 1674 1675 struct spdk_nvmf_poll_group group = {}; 1676 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1677 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1678 struct spdk_io_channel io_ch = {}; 1679 1680 ns.bdev = &bdev; 1681 ns.anagrpid = 1; 1682 1683 subsystem.id = 0; 1684 subsystem.max_nsid = 1; 1685 subsys_ns[0] = &ns; 1686 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1687 1688 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1689 1690 /* Enable controller */ 1691 ctrlr.vcprop.cc.bits.en = 1; 1692 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1693 ctrlr.listener = &listener; 1694 1695 group.num_sgroups = 1; 1696 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1697 sgroups.num_ns = 1; 1698 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1699 ns_info.channel = &io_ch; 1700 sgroups.ns_info = &ns_info; 1701 TAILQ_INIT(&sgroups.queued); 1702 group.sgroups = &sgroups; 1703 TAILQ_INIT(&qpair.outstanding); 1704 1705 qpair.ctrlr = &ctrlr; 1706 qpair.group = &group; 1707 qpair.qid = 1; 1708 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1709 1710 cmd.nsid = 1; 1711 1712 req.qpair = &qpair; 1713 req.cmd = (union nvmf_h2c_msg *)&cmd; 1714 req.rsp = &rsp; 1715 1716 /* SUCCESS/SUCCESS */ 1717 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1718 cmd.opc = SPDK_NVME_OPC_COMPARE; 1719 1720 spdk_nvmf_request_exec(&req); 1721 CU_ASSERT(qpair.first_fused_req != NULL); 1722 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1723 1724 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1725 cmd.opc = SPDK_NVME_OPC_WRITE; 1726 1727 spdk_nvmf_request_exec(&req); 1728 CU_ASSERT(qpair.first_fused_req == NULL); 1729 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1730 1731 /* Wrong sequence */ 1732 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1733 cmd.opc = SPDK_NVME_OPC_WRITE; 1734 1735 spdk_nvmf_request_exec(&req); 1736 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1737 CU_ASSERT(qpair.first_fused_req == NULL); 1738 1739 /* Write as FUSE_FIRST (Wrong op code) */ 1740 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1741 cmd.opc = SPDK_NVME_OPC_WRITE; 1742 1743 spdk_nvmf_request_exec(&req); 1744 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1745 CU_ASSERT(qpair.first_fused_req == NULL); 1746 1747 /* Compare as FUSE_SECOND (Wrong op code) */ 1748 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1749 cmd.opc = SPDK_NVME_OPC_COMPARE; 1750 1751 spdk_nvmf_request_exec(&req); 1752 CU_ASSERT(qpair.first_fused_req != NULL); 1753 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1754 1755 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1756 cmd.opc = SPDK_NVME_OPC_COMPARE; 1757 1758 spdk_nvmf_request_exec(&req); 1759 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1760 CU_ASSERT(qpair.first_fused_req == NULL); 1761 } 1762 1763 static void 1764 test_multi_async_event_reqs(void) 1765 { 1766 struct spdk_nvmf_subsystem subsystem = {}; 1767 struct spdk_nvmf_qpair qpair = {}; 1768 struct spdk_nvmf_ctrlr ctrlr = {}; 1769 struct spdk_nvmf_request req[5] = {}; 1770 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1771 struct spdk_nvmf_ns ns = {}; 1772 union nvmf_h2c_msg cmd[5] = {}; 1773 union nvmf_c2h_msg rsp[5] = {}; 1774 1775 struct spdk_nvmf_poll_group group = {}; 1776 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1777 1778 int i; 1779 1780 ns_ptrs[0] = &ns; 1781 subsystem.ns = ns_ptrs; 1782 subsystem.max_nsid = 1; 1783 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1784 1785 ns.opts.nsid = 1; 1786 group.sgroups = &sgroups; 1787 1788 qpair.ctrlr = &ctrlr; 1789 qpair.group = &group; 1790 TAILQ_INIT(&qpair.outstanding); 1791 1792 ctrlr.subsys = &subsystem; 1793 ctrlr.vcprop.cc.bits.en = 1; 1794 1795 for (i = 0; i < 5; i++) { 1796 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1797 cmd[i].nvme_cmd.nsid = 1; 1798 cmd[i].nvme_cmd.cid = i; 1799 1800 req[i].qpair = &qpair; 1801 req[i].cmd = &cmd[i]; 1802 req[i].rsp = &rsp[i]; 1803 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1804 } 1805 1806 /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */ 1807 sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS; 1808 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1809 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1810 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1811 } 1812 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 1813 1814 /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */ 1815 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1816 CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS); 1817 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1818 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 1819 1820 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 1821 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 1822 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1823 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1824 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 1825 1826 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 1827 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1828 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1829 CU_ASSERT(ctrlr.aer_req[2] == NULL); 1830 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 1831 1832 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 1833 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 1834 } 1835 1836 static void 1837 test_get_ana_log_page_one_ns_per_anagrp(void) 1838 { 1839 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 1840 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 1841 uint32_t ana_group[3]; 1842 struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group }; 1843 struct spdk_nvmf_ctrlr ctrlr = {}; 1844 enum spdk_nvme_ana_state ana_state[3]; 1845 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1846 struct spdk_nvmf_ns ns[3]; 1847 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 1848 uint64_t offset; 1849 uint32_t length; 1850 int i; 1851 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1852 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1853 struct iovec iov, iovs[2]; 1854 struct spdk_nvme_ana_page *ana_hdr; 1855 char _ana_desc[UT_ANA_DESC_SIZE]; 1856 struct spdk_nvme_ana_group_descriptor *ana_desc; 1857 1858 subsystem.ns = ns_arr; 1859 subsystem.max_nsid = 3; 1860 for (i = 0; i < 3; i++) { 1861 subsystem.ana_group[i] = 1; 1862 } 1863 ctrlr.subsys = &subsystem; 1864 ctrlr.listener = &listener; 1865 1866 for (i = 0; i < 3; i++) { 1867 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1868 } 1869 1870 for (i = 0; i < 3; i++) { 1871 ns_arr[i]->nsid = i + 1; 1872 ns_arr[i]->anagrpid = i + 1; 1873 } 1874 1875 /* create expected page */ 1876 ana_hdr = (void *)&expected_page[0]; 1877 ana_hdr->num_ana_group_desc = 3; 1878 ana_hdr->change_count = 0; 1879 1880 /* descriptor may be unaligned. So create data and then copy it to the location. */ 1881 ana_desc = (void *)_ana_desc; 1882 offset = sizeof(struct spdk_nvme_ana_page); 1883 1884 for (i = 0; i < 3; i++) { 1885 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 1886 ana_desc->ana_group_id = ns_arr[i]->nsid; 1887 ana_desc->num_of_nsid = 1; 1888 ana_desc->change_count = 0; 1889 ana_desc->ana_state = ctrlr.listener->ana_state[i]; 1890 ana_desc->nsid[0] = ns_arr[i]->nsid; 1891 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 1892 offset += UT_ANA_DESC_SIZE; 1893 } 1894 1895 /* read entire actual log page */ 1896 offset = 0; 1897 while (offset < UT_ANA_LOG_PAGE_SIZE) { 1898 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 1899 iov.iov_base = &actual_page[offset]; 1900 iov.iov_len = length; 1901 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 1902 offset += length; 1903 } 1904 1905 /* compare expected page and actual page */ 1906 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1907 1908 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 1909 offset = 0; 1910 iovs[0].iov_base = &actual_page[offset]; 1911 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 1912 offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 1913 iovs[1].iov_base = &actual_page[offset]; 1914 iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset; 1915 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 1916 1917 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1918 1919 #undef UT_ANA_DESC_SIZE 1920 #undef UT_ANA_LOG_PAGE_SIZE 1921 } 1922 1923 static void 1924 test_get_ana_log_page_multi_ns_per_anagrp(void) 1925 { 1926 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + \ 1927 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 + \ 1928 sizeof(uint32_t) * 5) 1929 struct spdk_nvmf_ns ns[5]; 1930 struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]}; 1931 uint32_t ana_group[5] = {0}; 1932 struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, }; 1933 enum spdk_nvme_ana_state ana_state[5]; 1934 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, }; 1935 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, }; 1936 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1937 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1938 struct iovec iov, iovs[2]; 1939 struct spdk_nvme_ana_page *ana_hdr; 1940 char _ana_desc[UT_ANA_LOG_PAGE_SIZE]; 1941 struct spdk_nvme_ana_group_descriptor *ana_desc; 1942 uint64_t offset; 1943 uint32_t length; 1944 int i; 1945 1946 subsystem.max_nsid = 5; 1947 subsystem.ana_group[1] = 3; 1948 subsystem.ana_group[2] = 2; 1949 for (i = 0; i < 5; i++) { 1950 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1951 } 1952 1953 for (i = 0; i < 5; i++) { 1954 ns_arr[i]->nsid = i + 1; 1955 } 1956 ns_arr[0]->anagrpid = 2; 1957 ns_arr[1]->anagrpid = 3; 1958 ns_arr[2]->anagrpid = 2; 1959 ns_arr[3]->anagrpid = 3; 1960 ns_arr[4]->anagrpid = 2; 1961 1962 /* create expected page */ 1963 ana_hdr = (void *)&expected_page[0]; 1964 ana_hdr->num_ana_group_desc = 2; 1965 ana_hdr->change_count = 0; 1966 1967 /* descriptor may be unaligned. So create data and then copy it to the location. */ 1968 ana_desc = (void *)_ana_desc; 1969 offset = sizeof(struct spdk_nvme_ana_page); 1970 1971 memset(_ana_desc, 0, sizeof(_ana_desc)); 1972 ana_desc->ana_group_id = 2; 1973 ana_desc->num_of_nsid = 3; 1974 ana_desc->change_count = 0; 1975 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1976 ana_desc->nsid[0] = 1; 1977 ana_desc->nsid[1] = 3; 1978 ana_desc->nsid[2] = 5; 1979 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 1980 sizeof(uint32_t) * 3); 1981 offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3; 1982 1983 memset(_ana_desc, 0, sizeof(_ana_desc)); 1984 ana_desc->ana_group_id = 3; 1985 ana_desc->num_of_nsid = 2; 1986 ana_desc->change_count = 0; 1987 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1988 ana_desc->nsid[0] = 2; 1989 ana_desc->nsid[1] = 4; 1990 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 1991 sizeof(uint32_t) * 2); 1992 1993 /* read entire actual log page, and compare expected page and actual page. */ 1994 offset = 0; 1995 while (offset < UT_ANA_LOG_PAGE_SIZE) { 1996 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 1997 iov.iov_base = &actual_page[offset]; 1998 iov.iov_len = length; 1999 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2000 offset += length; 2001 } 2002 2003 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2004 2005 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2006 offset = 0; 2007 iovs[0].iov_base = &actual_page[offset]; 2008 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2009 offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2010 iovs[1].iov_base = &actual_page[offset]; 2011 iovs[1].iov_len = sizeof(uint32_t) * 5; 2012 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2013 2014 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2015 2016 #undef UT_ANA_LOG_PAGE_SIZE 2017 } 2018 static void 2019 test_multi_async_events(void) 2020 { 2021 struct spdk_nvmf_subsystem subsystem = {}; 2022 struct spdk_nvmf_qpair qpair = {}; 2023 struct spdk_nvmf_ctrlr ctrlr = {}; 2024 struct spdk_nvmf_request req[4] = {}; 2025 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2026 struct spdk_nvmf_ns ns = {}; 2027 union nvmf_h2c_msg cmd[4] = {}; 2028 union nvmf_c2h_msg rsp[4] = {}; 2029 union spdk_nvme_async_event_completion event = {}; 2030 struct spdk_nvmf_poll_group group = {}; 2031 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2032 int i; 2033 2034 ns_ptrs[0] = &ns; 2035 subsystem.ns = ns_ptrs; 2036 subsystem.max_nsid = 1; 2037 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2038 2039 ns.opts.nsid = 1; 2040 group.sgroups = &sgroups; 2041 2042 qpair.ctrlr = &ctrlr; 2043 qpair.group = &group; 2044 TAILQ_INIT(&qpair.outstanding); 2045 2046 ctrlr.subsys = &subsystem; 2047 ctrlr.vcprop.cc.bits.en = 1; 2048 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2049 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 2050 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 2051 init_pending_async_events(&ctrlr); 2052 2053 /* Target queue pending events when there is no outstanding AER request */ 2054 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2055 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 2056 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 2057 2058 for (i = 0; i < 4; i++) { 2059 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2060 cmd[i].nvme_cmd.nsid = 1; 2061 cmd[i].nvme_cmd.cid = i; 2062 2063 req[i].qpair = &qpair; 2064 req[i].cmd = &cmd[i]; 2065 req[i].rsp = &rsp[i]; 2066 2067 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2068 2069 sgroups.mgmt_io_outstanding = 1; 2070 if (i < 3) { 2071 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2072 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2073 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 2074 } else { 2075 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2076 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2077 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 2078 } 2079 } 2080 2081 event.raw = rsp[0].nvme_cpl.cdw0; 2082 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2083 event.raw = rsp[1].nvme_cpl.cdw0; 2084 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 2085 event.raw = rsp[2].nvme_cpl.cdw0; 2086 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 2087 2088 cleanup_pending_async_events(&ctrlr); 2089 } 2090 2091 static void 2092 test_rae(void) 2093 { 2094 struct spdk_nvmf_subsystem subsystem = {}; 2095 struct spdk_nvmf_qpair qpair = {}; 2096 struct spdk_nvmf_ctrlr ctrlr = {}; 2097 struct spdk_nvmf_request req[3] = {}; 2098 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2099 struct spdk_nvmf_ns ns = {}; 2100 union nvmf_h2c_msg cmd[3] = {}; 2101 union nvmf_c2h_msg rsp[3] = {}; 2102 union spdk_nvme_async_event_completion event = {}; 2103 struct spdk_nvmf_poll_group group = {}; 2104 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2105 int i; 2106 char data[4096]; 2107 2108 ns_ptrs[0] = &ns; 2109 subsystem.ns = ns_ptrs; 2110 subsystem.max_nsid = 1; 2111 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2112 2113 ns.opts.nsid = 1; 2114 group.sgroups = &sgroups; 2115 2116 qpair.ctrlr = &ctrlr; 2117 qpair.group = &group; 2118 TAILQ_INIT(&qpair.outstanding); 2119 2120 ctrlr.subsys = &subsystem; 2121 ctrlr.vcprop.cc.bits.en = 1; 2122 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2123 init_pending_async_events(&ctrlr); 2124 2125 /* Target queue pending events when there is no outstanding AER request */ 2126 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2127 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2128 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2129 /* only one event will be queued before RAE is clear */ 2130 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2131 2132 req[0].qpair = &qpair; 2133 req[0].cmd = &cmd[0]; 2134 req[0].rsp = &rsp[0]; 2135 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2136 cmd[0].nvme_cmd.nsid = 1; 2137 cmd[0].nvme_cmd.cid = 0; 2138 2139 for (i = 1; i < 3; i++) { 2140 req[i].qpair = &qpair; 2141 req[i].cmd = &cmd[i]; 2142 req[i].rsp = &rsp[i]; 2143 req[i].data = &data; 2144 req[i].length = sizeof(data); 2145 2146 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 2147 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 2148 SPDK_NVME_LOG_CHANGED_NS_LIST; 2149 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 2150 spdk_nvme_bytes_to_numd(req[i].length); 2151 cmd[i].nvme_cmd.cid = i; 2152 } 2153 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 2154 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 2155 2156 /* consume the pending event */ 2157 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 2158 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2159 event.raw = rsp[0].nvme_cpl.cdw0; 2160 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2161 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2162 2163 /* get log with RAE set */ 2164 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2165 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2166 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2167 2168 /* will not generate new event until RAE is clear */ 2169 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2170 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2171 2172 /* get log with RAE clear */ 2173 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2174 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2175 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2176 2177 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2178 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2179 2180 cleanup_pending_async_events(&ctrlr); 2181 } 2182 2183 static void 2184 test_nvmf_ctrlr_create_destruct(void) 2185 { 2186 struct spdk_nvmf_fabric_connect_data connect_data = {}; 2187 struct spdk_nvmf_poll_group group = {}; 2188 struct spdk_nvmf_subsystem_poll_group sgroups[2] = {}; 2189 struct spdk_nvmf_transport transport = {}; 2190 struct spdk_nvmf_transport_ops tops = {}; 2191 struct spdk_nvmf_subsystem subsystem = {}; 2192 struct spdk_nvmf_request req = {}; 2193 struct spdk_nvmf_qpair qpair = {}; 2194 struct spdk_nvmf_ctrlr *ctrlr = NULL; 2195 struct spdk_nvmf_tgt tgt = {}; 2196 union nvmf_h2c_msg cmd = {}; 2197 union nvmf_c2h_msg rsp = {}; 2198 const uint8_t hostid[16] = { 2199 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2200 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 2201 }; 2202 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 2203 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 2204 2205 group.thread = spdk_get_thread(); 2206 transport.ops = &tops; 2207 transport.opts.max_aq_depth = 32; 2208 transport.opts.max_queue_depth = 64; 2209 transport.opts.max_qpairs_per_ctrlr = 3; 2210 transport.opts.dif_insert_or_strip = true; 2211 transport.tgt = &tgt; 2212 qpair.transport = &transport; 2213 qpair.group = &group; 2214 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2215 TAILQ_INIT(&qpair.outstanding); 2216 2217 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 2218 connect_data.cntlid = 0xFFFF; 2219 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 2220 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 2221 2222 subsystem.thread = spdk_get_thread(); 2223 subsystem.id = 1; 2224 TAILQ_INIT(&subsystem.ctrlrs); 2225 subsystem.tgt = &tgt; 2226 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2227 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2228 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 2229 2230 group.sgroups = sgroups; 2231 2232 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2233 cmd.connect_cmd.cid = 1; 2234 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 2235 cmd.connect_cmd.recfmt = 0; 2236 cmd.connect_cmd.qid = 0; 2237 cmd.connect_cmd.sqsize = 31; 2238 cmd.connect_cmd.cattr = 0; 2239 cmd.connect_cmd.kato = 120000; 2240 2241 req.qpair = &qpair; 2242 req.length = sizeof(connect_data); 2243 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 2244 req.data = &connect_data; 2245 req.cmd = &cmd; 2246 req.rsp = &rsp; 2247 2248 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 2249 sgroups[subsystem.id].mgmt_io_outstanding++; 2250 2251 ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.data); 2252 poll_threads(); 2253 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2254 CU_ASSERT(req.qpair->ctrlr == ctrlr); 2255 CU_ASSERT(ctrlr->subsys == &subsystem); 2256 CU_ASSERT(ctrlr->thread == req.qpair->group->thread); 2257 CU_ASSERT(ctrlr->disconnect_in_progress == false); 2258 CU_ASSERT(ctrlr->qpair_mask != NULL); 2259 CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000); 2260 CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1); 2261 CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1); 2262 CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1); 2263 CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1); 2264 CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16)); 2265 CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1); 2266 CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63); 2267 CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0); 2268 CU_ASSERT(ctrlr->vcprop.cap.bits.to == 1); 2269 CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0); 2270 CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM); 2271 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0); 2272 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0); 2273 CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1); 2274 CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3); 2275 CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0); 2276 CU_ASSERT(ctrlr->vcprop.cc.raw == 0); 2277 CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0); 2278 CU_ASSERT(ctrlr->vcprop.csts.raw == 0); 2279 CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0); 2280 CU_ASSERT(ctrlr->dif_insert_or_strip == true); 2281 2282 nvmf_ctrlr_destruct(ctrlr); 2283 poll_threads(); 2284 CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs)); 2285 CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding)); 2286 } 2287 2288 static void 2289 test_nvmf_ctrlr_use_zcopy(void) 2290 { 2291 struct spdk_nvmf_subsystem subsystem = {}; 2292 struct spdk_nvmf_request req = {}; 2293 struct spdk_nvmf_qpair qpair = {}; 2294 struct spdk_nvmf_ctrlr ctrlr = {}; 2295 union nvmf_h2c_msg cmd = {}; 2296 struct spdk_nvmf_ns ns = {}; 2297 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2298 struct spdk_bdev bdev = {}; 2299 struct spdk_nvmf_poll_group group = {}; 2300 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2301 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2302 struct spdk_io_channel io_ch = {}; 2303 int opc; 2304 2305 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2306 ns.bdev = &bdev; 2307 2308 subsystem.id = 0; 2309 subsystem.max_nsid = 1; 2310 subsys_ns[0] = &ns; 2311 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2312 2313 ctrlr.subsys = &subsystem; 2314 2315 qpair.ctrlr = &ctrlr; 2316 qpair.group = &group; 2317 qpair.qid = 1; 2318 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2319 2320 group.thread = spdk_get_thread(); 2321 group.num_sgroups = 1; 2322 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2323 sgroups.num_ns = 1; 2324 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2325 ns_info.channel = &io_ch; 2326 sgroups.ns_info = &ns_info; 2327 TAILQ_INIT(&sgroups.queued); 2328 group.sgroups = &sgroups; 2329 TAILQ_INIT(&qpair.outstanding); 2330 2331 req.qpair = &qpair; 2332 req.cmd = &cmd; 2333 2334 /* Admin queue */ 2335 qpair.qid = 0; 2336 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2337 qpair.qid = 1; 2338 2339 /* Invalid Opcodes */ 2340 for (opc = 0; opc <= 255; opc++) { 2341 cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc; 2342 if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) && 2343 (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) { 2344 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2345 } 2346 } 2347 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 2348 2349 /* Fused WRITE */ 2350 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2351 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2352 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE; 2353 2354 /* Non bdev */ 2355 cmd.nvme_cmd.nsid = 4; 2356 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2357 cmd.nvme_cmd.nsid = 1; 2358 2359 /* ZCOPY Not supported */ 2360 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2361 2362 /* Success */ 2363 ns.zcopy = true; 2364 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2365 } 2366 2367 static void 2368 test_spdk_nvmf_request_zcopy_start(void) 2369 { 2370 struct spdk_nvmf_request req = {}; 2371 struct spdk_nvmf_qpair qpair = {}; 2372 struct spdk_nvme_cmd cmd = {}; 2373 union nvmf_c2h_msg rsp = {}; 2374 struct spdk_nvmf_ctrlr ctrlr = {}; 2375 struct spdk_nvmf_subsystem subsystem = {}; 2376 struct spdk_nvmf_ns ns = {}; 2377 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2378 enum spdk_nvme_ana_state ana_state[1]; 2379 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2380 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2381 2382 struct spdk_nvmf_poll_group group = {}; 2383 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2384 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2385 struct spdk_io_channel io_ch = {}; 2386 2387 ns.bdev = &bdev; 2388 ns.zcopy = true; 2389 ns.anagrpid = 1; 2390 2391 subsystem.id = 0; 2392 subsystem.max_nsid = 1; 2393 subsys_ns[0] = &ns; 2394 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2395 2396 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2397 2398 /* Enable controller */ 2399 ctrlr.vcprop.cc.bits.en = 1; 2400 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2401 ctrlr.listener = &listener; 2402 2403 group.thread = spdk_get_thread(); 2404 group.num_sgroups = 1; 2405 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2406 sgroups.num_ns = 1; 2407 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2408 ns_info.channel = &io_ch; 2409 sgroups.ns_info = &ns_info; 2410 TAILQ_INIT(&sgroups.queued); 2411 group.sgroups = &sgroups; 2412 TAILQ_INIT(&qpair.outstanding); 2413 2414 qpair.ctrlr = &ctrlr; 2415 qpair.group = &group; 2416 qpair.qid = 1; 2417 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2418 2419 cmd.nsid = 1; 2420 2421 req.qpair = &qpair; 2422 req.cmd = (union nvmf_h2c_msg *)&cmd; 2423 req.rsp = &rsp; 2424 cmd.opc = SPDK_NVME_OPC_READ; 2425 2426 /* Fail because no controller */ 2427 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2428 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2429 qpair.ctrlr = NULL; 2430 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0); 2431 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE); 2432 qpair.ctrlr = &ctrlr; 2433 2434 /* Fail because no sgroup */ 2435 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2436 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2437 group.sgroups = NULL; 2438 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0); 2439 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE); 2440 group.sgroups = &sgroups; 2441 2442 /* Fail because bad NSID */ 2443 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2444 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2445 cmd.nsid = 0; 2446 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0); 2447 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE); 2448 cmd.nsid = 1; 2449 2450 /* Fail because bad Channel */ 2451 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2452 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2453 ns_info.channel = NULL; 2454 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0); 2455 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE); 2456 ns_info.channel = &io_ch; 2457 2458 /* Fail because NSID is not active */ 2459 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2460 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2461 ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING; 2462 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0); 2463 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE); 2464 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2465 2466 /* Fail because QPair is not active */ 2467 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2468 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2469 qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 2470 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0); 2471 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE); 2472 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2473 2474 /* Fail because nvmf_bdev_ctrlr_start_zcopy fails */ 2475 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2476 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2477 cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */ 2478 cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 2479 req.length = (cmd.cdw12 + 1) * bdev.blocklen; 2480 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) < 0); 2481 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_NONE); 2482 cmd.cdw10 = 0; 2483 cmd.cdw12 = 0; 2484 2485 /* Success */ 2486 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2487 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2488 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0); 2489 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2490 } 2491 2492 static void 2493 test_zcopy_read(void) 2494 { 2495 struct spdk_nvmf_request req = {}; 2496 struct spdk_nvmf_qpair qpair = {}; 2497 struct spdk_nvme_cmd cmd = {}; 2498 union nvmf_c2h_msg rsp = {}; 2499 struct spdk_nvmf_ctrlr ctrlr = {}; 2500 struct spdk_nvmf_subsystem subsystem = {}; 2501 struct spdk_nvmf_ns ns = {}; 2502 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2503 enum spdk_nvme_ana_state ana_state[1]; 2504 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2505 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2506 2507 struct spdk_nvmf_poll_group group = {}; 2508 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2509 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2510 struct spdk_io_channel io_ch = {}; 2511 2512 ns.bdev = &bdev; 2513 ns.zcopy = true; 2514 ns.anagrpid = 1; 2515 2516 subsystem.id = 0; 2517 subsystem.max_nsid = 1; 2518 subsys_ns[0] = &ns; 2519 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2520 2521 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2522 2523 /* Enable controller */ 2524 ctrlr.vcprop.cc.bits.en = 1; 2525 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2526 ctrlr.listener = &listener; 2527 2528 group.thread = spdk_get_thread(); 2529 group.num_sgroups = 1; 2530 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2531 sgroups.num_ns = 1; 2532 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2533 ns_info.channel = &io_ch; 2534 sgroups.ns_info = &ns_info; 2535 TAILQ_INIT(&sgroups.queued); 2536 group.sgroups = &sgroups; 2537 TAILQ_INIT(&qpair.outstanding); 2538 2539 qpair.ctrlr = &ctrlr; 2540 qpair.group = &group; 2541 qpair.qid = 1; 2542 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2543 2544 cmd.nsid = 1; 2545 2546 req.qpair = &qpair; 2547 req.cmd = (union nvmf_h2c_msg *)&cmd; 2548 req.rsp = &rsp; 2549 cmd.opc = SPDK_NVME_OPC_READ; 2550 2551 /* Prepare for zcopy */ 2552 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2553 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2554 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2555 CU_ASSERT(ns_info.io_outstanding == 0); 2556 2557 /* Perform the zcopy start */ 2558 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0); 2559 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2560 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2561 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2562 CU_ASSERT(ns_info.io_outstanding == 1); 2563 2564 /* Execute the request */ 2565 spdk_nvmf_request_exec(&req); 2566 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2567 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2568 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2569 CU_ASSERT(ns_info.io_outstanding == 1); 2570 2571 /* Perform the zcopy end */ 2572 spdk_nvmf_request_zcopy_end(&req, false); 2573 CU_ASSERT(req.zcopy_bdev_io == NULL); 2574 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2575 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2576 CU_ASSERT(ns_info.io_outstanding == 0); 2577 } 2578 2579 static void 2580 test_zcopy_write(void) 2581 { 2582 struct spdk_nvmf_request req = {}; 2583 struct spdk_nvmf_qpair qpair = {}; 2584 struct spdk_nvme_cmd cmd = {}; 2585 union nvmf_c2h_msg rsp = {}; 2586 struct spdk_nvmf_ctrlr ctrlr = {}; 2587 struct spdk_nvmf_subsystem subsystem = {}; 2588 struct spdk_nvmf_ns ns = {}; 2589 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2590 enum spdk_nvme_ana_state ana_state[1]; 2591 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2592 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2593 2594 struct spdk_nvmf_poll_group group = {}; 2595 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2596 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2597 struct spdk_io_channel io_ch = {}; 2598 2599 ns.bdev = &bdev; 2600 ns.zcopy = true; 2601 ns.anagrpid = 1; 2602 2603 subsystem.id = 0; 2604 subsystem.max_nsid = 1; 2605 subsys_ns[0] = &ns; 2606 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2607 2608 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2609 2610 /* Enable controller */ 2611 ctrlr.vcprop.cc.bits.en = 1; 2612 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2613 ctrlr.listener = &listener; 2614 2615 group.thread = spdk_get_thread(); 2616 group.num_sgroups = 1; 2617 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2618 sgroups.num_ns = 1; 2619 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2620 ns_info.channel = &io_ch; 2621 sgroups.ns_info = &ns_info; 2622 TAILQ_INIT(&sgroups.queued); 2623 group.sgroups = &sgroups; 2624 TAILQ_INIT(&qpair.outstanding); 2625 2626 qpair.ctrlr = &ctrlr; 2627 qpair.group = &group; 2628 qpair.qid = 1; 2629 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2630 2631 cmd.nsid = 1; 2632 2633 req.qpair = &qpair; 2634 req.cmd = (union nvmf_h2c_msg *)&cmd; 2635 req.rsp = &rsp; 2636 cmd.opc = SPDK_NVME_OPC_WRITE; 2637 2638 /* Prepare for zcopy */ 2639 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2640 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2641 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2642 CU_ASSERT(ns_info.io_outstanding == 0); 2643 2644 /* Perform the zcopy start */ 2645 CU_ASSERT(spdk_nvmf_request_zcopy_start(&req) == 0); 2646 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2647 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 2648 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2649 CU_ASSERT(ns_info.io_outstanding == 1); 2650 2651 /* Execute the request */ 2652 spdk_nvmf_request_exec(&req); 2653 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2654 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 2655 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2656 CU_ASSERT(ns_info.io_outstanding == 1); 2657 2658 /* Perform the zcopy end */ 2659 spdk_nvmf_request_zcopy_end(&req, true); 2660 CU_ASSERT(req.zcopy_bdev_io == NULL); 2661 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2662 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2663 CU_ASSERT(ns_info.io_outstanding == 0); 2664 } 2665 2666 static void 2667 test_nvmf_property_set(void) 2668 { 2669 int rc; 2670 struct spdk_nvmf_request req = {}; 2671 struct spdk_nvmf_qpair qpair = {}; 2672 struct spdk_nvmf_ctrlr ctrlr = {}; 2673 union nvmf_h2c_msg cmd = {}; 2674 union nvmf_c2h_msg rsp = {}; 2675 2676 req.qpair = &qpair; 2677 qpair.ctrlr = &ctrlr; 2678 req.cmd = &cmd; 2679 req.rsp = &rsp; 2680 2681 /* Invalid parameters */ 2682 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2683 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs); 2684 2685 rc = nvmf_property_set(&req); 2686 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2687 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2688 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2689 2690 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms); 2691 2692 rc = nvmf_property_get(&req); 2693 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2694 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2695 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2696 2697 /* Set cc with same property size */ 2698 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2699 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc); 2700 2701 rc = nvmf_property_set(&req); 2702 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2703 2704 /* Emulate cc data */ 2705 ctrlr.vcprop.cc.raw = 0xDEADBEEF; 2706 2707 rc = nvmf_property_get(&req); 2708 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2709 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF); 2710 2711 /* Set asq with different property size */ 2712 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2713 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2714 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq); 2715 2716 rc = nvmf_property_set(&req); 2717 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2718 2719 /* Emulate asq data */ 2720 ctrlr.vcprop.asq = 0xAADDADBEEF; 2721 2722 rc = nvmf_property_get(&req); 2723 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2724 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF); 2725 } 2726 2727 int main(int argc, char **argv) 2728 { 2729 CU_pSuite suite = NULL; 2730 unsigned int num_failures; 2731 2732 CU_set_error_action(CUEA_ABORT); 2733 CU_initialize_registry(); 2734 2735 suite = CU_add_suite("nvmf", NULL, NULL); 2736 CU_ADD_TEST(suite, test_get_log_page); 2737 CU_ADD_TEST(suite, test_process_fabrics_cmd); 2738 CU_ADD_TEST(suite, test_connect); 2739 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 2740 CU_ADD_TEST(suite, test_identify_ns); 2741 CU_ADD_TEST(suite, test_reservation_write_exclusive); 2742 CU_ADD_TEST(suite, test_reservation_exclusive_access); 2743 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 2744 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 2745 CU_ADD_TEST(suite, test_reservation_notification_log_page); 2746 CU_ADD_TEST(suite, test_get_dif_ctx); 2747 CU_ADD_TEST(suite, test_set_get_features); 2748 CU_ADD_TEST(suite, test_identify_ctrlr); 2749 CU_ADD_TEST(suite, test_custom_admin_cmd); 2750 CU_ADD_TEST(suite, test_fused_compare_and_write); 2751 CU_ADD_TEST(suite, test_multi_async_event_reqs); 2752 CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp); 2753 CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp); 2754 CU_ADD_TEST(suite, test_multi_async_events); 2755 CU_ADD_TEST(suite, test_rae); 2756 CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct); 2757 CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy); 2758 CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start); 2759 CU_ADD_TEST(suite, test_zcopy_read); 2760 CU_ADD_TEST(suite, test_zcopy_write); 2761 CU_ADD_TEST(suite, test_nvmf_property_set); 2762 2763 allocate_threads(1); 2764 set_thread(0); 2765 2766 CU_basic_set_mode(CU_BRM_VERBOSE); 2767 CU_basic_run_tests(); 2768 num_failures = CU_get_number_of_failures(); 2769 CU_cleanup_registry(); 2770 2771 free_threads(); 2772 2773 return num_failures; 2774 } 2775