1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk_cunit.h" 10 #include "spdk_internal/mock.h" 11 #include "thread/thread_internal.h" 12 13 #include "common/lib/ut_multithread.c" 14 #include "nvmf/ctrlr.c" 15 16 SPDK_LOG_REGISTER_COMPONENT(nvmf) 17 18 struct spdk_bdev { 19 int ut_mock; 20 uint64_t blockcnt; 21 uint32_t blocklen; 22 }; 23 24 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 25 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 26 27 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL; 28 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *) 29 0x8877665544332211UL; 30 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL; 31 32 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 33 struct spdk_nvmf_subsystem *, 34 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 35 NULL); 36 37 DEFINE_STUB(spdk_nvmf_poll_group_create, 38 struct spdk_nvmf_poll_group *, 39 (struct spdk_nvmf_tgt *tgt), 40 NULL); 41 42 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 43 const char *, 44 (const struct spdk_nvmf_subsystem *subsystem), 45 subsystem_default_sn); 46 47 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 48 const char *, 49 (const struct spdk_nvmf_subsystem *subsystem), 50 subsystem_default_mn); 51 52 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 53 bool, 54 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 55 true); 56 57 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 58 int, 59 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 60 0); 61 62 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 63 struct spdk_nvmf_ctrlr *, 64 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 65 NULL); 66 67 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 68 bool, 69 (struct spdk_nvmf_ctrlr *ctrlr), 70 false); 71 72 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 73 bool, 74 (struct spdk_nvmf_ctrlr *ctrlr), 75 false); 76 77 DEFINE_STUB(nvmf_ctrlr_copy_supported, 78 bool, 79 (struct spdk_nvmf_ctrlr *ctrlr), 80 false); 81 82 DEFINE_STUB_V(nvmf_get_discovery_log_page, 83 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 84 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 85 86 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 87 int, 88 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 89 0); 90 91 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 92 bool, 93 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 94 true); 95 96 DEFINE_STUB(nvmf_subsystem_find_listener, 97 struct spdk_nvmf_subsystem_listener *, 98 (struct spdk_nvmf_subsystem *subsystem, 99 const struct spdk_nvme_transport_id *trid), 100 (void *)0x1); 101 102 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 103 int, 104 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 105 struct spdk_nvmf_request *req), 106 0); 107 108 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 109 int, 110 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 111 struct spdk_nvmf_request *req), 112 0); 113 114 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 115 int, 116 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 117 struct spdk_nvmf_request *req), 118 0); 119 120 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 121 int, 122 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 123 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 124 0); 125 126 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 127 int, 128 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 129 struct spdk_nvmf_request *req), 130 0); 131 132 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 133 int, 134 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 135 struct spdk_nvmf_request *req), 136 0); 137 138 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 139 int, 140 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 141 struct spdk_nvmf_request *req), 142 0); 143 144 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 145 int, 146 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 147 struct spdk_nvmf_request *req), 148 0); 149 150 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 151 int, 152 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 153 struct spdk_nvmf_request *req), 154 0); 155 156 DEFINE_STUB(nvmf_transport_req_complete, 157 int, 158 (struct spdk_nvmf_request *req), 159 0); 160 161 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 162 163 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 164 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 165 struct spdk_dif_ctx *dif_ctx), 166 true); 167 168 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 169 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 170 171 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 172 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 173 174 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 175 struct spdk_nvmf_ctrlr *ctrlr)); 176 177 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 178 int, 179 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 180 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 181 0); 182 183 DEFINE_STUB(nvmf_transport_req_free, 184 int, 185 (struct spdk_nvmf_request *req), 186 0); 187 188 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 189 int, 190 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 191 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 192 0); 193 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 194 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 195 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 196 197 int 198 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 199 { 200 return 0; 201 } 202 203 void 204 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 205 bool dif_insert_or_strip) 206 { 207 uint64_t num_blocks; 208 209 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 210 num_blocks = ns->bdev->blockcnt; 211 nsdata->nsze = num_blocks; 212 nsdata->ncap = num_blocks; 213 nsdata->nuse = num_blocks; 214 nsdata->nlbaf = 0; 215 nsdata->flbas.format = 0; 216 nsdata->lbaf[0].lbads = spdk_u32log2(512); 217 } 218 219 struct spdk_nvmf_ns * 220 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 221 { 222 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 223 return subsystem->ns[0]; 224 } 225 226 struct spdk_nvmf_ns * 227 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 228 struct spdk_nvmf_ns *prev_ns) 229 { 230 uint32_t nsid; 231 232 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 233 nsid = prev_ns->nsid; 234 235 if (nsid >= subsystem->max_nsid) { 236 return NULL; 237 } 238 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 239 if (subsystem->ns[nsid - 1]) { 240 return subsystem->ns[nsid - 1]; 241 } 242 } 243 return NULL; 244 } 245 246 bool 247 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 248 { 249 return true; 250 } 251 252 int 253 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 254 struct spdk_bdev_desc *desc, 255 struct spdk_io_channel *ch, 256 struct spdk_nvmf_request *req) 257 { 258 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 259 uint64_t start_lba; 260 uint64_t num_blocks; 261 262 start_lba = from_le64(&req->cmd->nvme_cmd.cdw10); 263 num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1; 264 265 if ((start_lba + num_blocks) > bdev->blockcnt) { 266 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 267 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 268 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 269 } 270 271 if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) { 272 req->zcopy_bdev_io = zcopy_start_bdev_io_write; 273 } else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) { 274 req->zcopy_bdev_io = zcopy_start_bdev_io_read; 275 } else { 276 req->zcopy_bdev_io = zcopy_start_bdev_io_fail; 277 } 278 279 280 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 281 } 282 283 void 284 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 285 { 286 req->zcopy_bdev_io = NULL; 287 spdk_nvmf_request_complete(req); 288 } 289 290 static void 291 test_get_log_page(void) 292 { 293 struct spdk_nvmf_subsystem subsystem = {}; 294 struct spdk_nvmf_request req = {}; 295 struct spdk_nvmf_qpair qpair = {}; 296 struct spdk_nvmf_ctrlr ctrlr = {}; 297 union nvmf_h2c_msg cmd = {}; 298 union nvmf_c2h_msg rsp = {}; 299 char data[4096]; 300 301 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 302 303 ctrlr.subsys = &subsystem; 304 305 qpair.ctrlr = &ctrlr; 306 307 req.qpair = &qpair; 308 req.cmd = &cmd; 309 req.rsp = &rsp; 310 req.data = &data; 311 req.length = sizeof(data); 312 313 /* Get Log Page - all valid */ 314 memset(&cmd, 0, sizeof(cmd)); 315 memset(&rsp, 0, sizeof(rsp)); 316 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 317 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 318 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 319 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 320 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 321 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 322 323 /* Get Log Page with invalid log ID */ 324 memset(&cmd, 0, sizeof(cmd)); 325 memset(&rsp, 0, sizeof(rsp)); 326 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 327 cmd.nvme_cmd.cdw10 = 0; 328 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 329 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 330 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 331 332 /* Get Log Page with invalid offset (not dword aligned) */ 333 memset(&cmd, 0, sizeof(cmd)); 334 memset(&rsp, 0, sizeof(rsp)); 335 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 336 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 337 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 338 cmd.nvme_cmd.cdw12 = 2; 339 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 340 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 341 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 342 343 /* Get Log Page without data buffer */ 344 memset(&cmd, 0, sizeof(cmd)); 345 memset(&rsp, 0, sizeof(rsp)); 346 req.data = NULL; 347 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 348 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 349 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 350 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 351 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 352 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 353 req.data = data; 354 } 355 356 static void 357 test_process_fabrics_cmd(void) 358 { 359 struct spdk_nvmf_request req = {}; 360 int ret; 361 struct spdk_nvmf_qpair req_qpair = {}; 362 union nvmf_h2c_msg req_cmd = {}; 363 union nvmf_c2h_msg req_rsp = {}; 364 365 req.qpair = &req_qpair; 366 req.cmd = &req_cmd; 367 req.rsp = &req_rsp; 368 req.qpair->ctrlr = NULL; 369 370 /* No ctrlr and invalid command check */ 371 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 372 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 373 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 374 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 375 } 376 377 static bool 378 nvme_status_success(const struct spdk_nvme_status *status) 379 { 380 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 381 } 382 383 static void 384 test_connect(void) 385 { 386 struct spdk_nvmf_fabric_connect_data connect_data; 387 struct spdk_nvmf_poll_group group; 388 struct spdk_nvmf_subsystem_poll_group *sgroups; 389 struct spdk_nvmf_transport transport; 390 struct spdk_nvmf_transport_ops tops = {}; 391 struct spdk_nvmf_subsystem subsystem; 392 struct spdk_nvmf_request req; 393 struct spdk_nvmf_qpair admin_qpair; 394 struct spdk_nvmf_qpair qpair; 395 struct spdk_nvmf_qpair qpair2; 396 struct spdk_nvmf_ctrlr ctrlr; 397 struct spdk_nvmf_tgt tgt; 398 union nvmf_h2c_msg cmd; 399 union nvmf_c2h_msg rsp; 400 const uint8_t hostid[16] = { 401 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 402 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 403 }; 404 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 405 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 406 int rc; 407 408 memset(&group, 0, sizeof(group)); 409 group.thread = spdk_get_thread(); 410 411 memset(&ctrlr, 0, sizeof(ctrlr)); 412 ctrlr.subsys = &subsystem; 413 ctrlr.qpair_mask = spdk_bit_array_create(3); 414 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 415 ctrlr.vcprop.cc.bits.en = 1; 416 ctrlr.vcprop.cc.bits.iosqes = 6; 417 ctrlr.vcprop.cc.bits.iocqes = 4; 418 419 memset(&admin_qpair, 0, sizeof(admin_qpair)); 420 admin_qpair.group = &group; 421 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 422 423 memset(&tgt, 0, sizeof(tgt)); 424 memset(&transport, 0, sizeof(transport)); 425 transport.ops = &tops; 426 transport.opts.max_aq_depth = 32; 427 transport.opts.max_queue_depth = 64; 428 transport.opts.max_qpairs_per_ctrlr = 3; 429 transport.tgt = &tgt; 430 431 memset(&qpair, 0, sizeof(qpair)); 432 qpair.transport = &transport; 433 qpair.group = &group; 434 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 435 TAILQ_INIT(&qpair.outstanding); 436 437 memset(&connect_data, 0, sizeof(connect_data)); 438 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 439 connect_data.cntlid = 0xFFFF; 440 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 441 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 442 443 memset(&subsystem, 0, sizeof(subsystem)); 444 subsystem.thread = spdk_get_thread(); 445 subsystem.id = 1; 446 TAILQ_INIT(&subsystem.ctrlrs); 447 subsystem.tgt = &tgt; 448 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 449 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 450 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 451 452 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 453 group.sgroups = sgroups; 454 455 memset(&cmd, 0, sizeof(cmd)); 456 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 457 cmd.connect_cmd.cid = 1; 458 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 459 cmd.connect_cmd.recfmt = 0; 460 cmd.connect_cmd.qid = 0; 461 cmd.connect_cmd.sqsize = 31; 462 cmd.connect_cmd.cattr = 0; 463 cmd.connect_cmd.kato = 120000; 464 465 memset(&req, 0, sizeof(req)); 466 req.qpair = &qpair; 467 req.length = sizeof(connect_data); 468 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 469 req.data = &connect_data; 470 req.cmd = &cmd; 471 req.rsp = &rsp; 472 473 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 474 MOCK_SET(spdk_nvmf_poll_group_create, &group); 475 476 /* Valid admin connect command */ 477 memset(&rsp, 0, sizeof(rsp)); 478 sgroups[subsystem.id].mgmt_io_outstanding++; 479 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 480 rc = nvmf_ctrlr_cmd_connect(&req); 481 poll_threads(); 482 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 483 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 484 CU_ASSERT(qpair.ctrlr != NULL); 485 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 486 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 487 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 488 free(qpair.ctrlr); 489 qpair.ctrlr = NULL; 490 491 /* Valid admin connect command with kato = 0 */ 492 cmd.connect_cmd.kato = 0; 493 memset(&rsp, 0, sizeof(rsp)); 494 sgroups[subsystem.id].mgmt_io_outstanding++; 495 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 496 rc = nvmf_ctrlr_cmd_connect(&req); 497 poll_threads(); 498 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 499 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 500 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 501 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 502 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 503 free(qpair.ctrlr); 504 qpair.ctrlr = NULL; 505 cmd.connect_cmd.kato = 120000; 506 507 /* Invalid data length */ 508 memset(&rsp, 0, sizeof(rsp)); 509 req.length = sizeof(connect_data) - 1; 510 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 511 rc = nvmf_ctrlr_cmd_connect(&req); 512 poll_threads(); 513 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 514 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 515 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 516 CU_ASSERT(qpair.ctrlr == NULL); 517 req.length = sizeof(connect_data); 518 519 /* Invalid recfmt */ 520 memset(&rsp, 0, sizeof(rsp)); 521 cmd.connect_cmd.recfmt = 1234; 522 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 523 rc = nvmf_ctrlr_cmd_connect(&req); 524 poll_threads(); 525 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 526 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 527 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 528 CU_ASSERT(qpair.ctrlr == NULL); 529 cmd.connect_cmd.recfmt = 0; 530 531 /* Subsystem not found */ 532 memset(&rsp, 0, sizeof(rsp)); 533 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 534 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 535 rc = nvmf_ctrlr_cmd_connect(&req); 536 poll_threads(); 537 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 538 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 539 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 540 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 541 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 542 CU_ASSERT(qpair.ctrlr == NULL); 543 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 544 545 /* Unterminated hostnqn */ 546 memset(&rsp, 0, sizeof(rsp)); 547 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 548 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 549 rc = nvmf_ctrlr_cmd_connect(&req); 550 poll_threads(); 551 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 552 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 553 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 554 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 555 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 556 CU_ASSERT(qpair.ctrlr == NULL); 557 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 558 559 /* Host not allowed */ 560 memset(&rsp, 0, sizeof(rsp)); 561 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 562 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 563 rc = nvmf_ctrlr_cmd_connect(&req); 564 poll_threads(); 565 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 566 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 567 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 568 CU_ASSERT(qpair.ctrlr == NULL); 569 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 570 571 /* Invalid sqsize == 0 */ 572 memset(&rsp, 0, sizeof(rsp)); 573 cmd.connect_cmd.sqsize = 0; 574 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 575 rc = nvmf_ctrlr_cmd_connect(&req); 576 poll_threads(); 577 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 578 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 579 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 580 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 581 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 582 CU_ASSERT(qpair.ctrlr == NULL); 583 cmd.connect_cmd.sqsize = 31; 584 585 /* Invalid admin sqsize > max_aq_depth */ 586 memset(&rsp, 0, sizeof(rsp)); 587 cmd.connect_cmd.sqsize = 32; 588 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 589 rc = nvmf_ctrlr_cmd_connect(&req); 590 poll_threads(); 591 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 592 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 593 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 594 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 595 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 596 CU_ASSERT(qpair.ctrlr == NULL); 597 cmd.connect_cmd.sqsize = 31; 598 599 /* Invalid I/O sqsize > max_queue_depth */ 600 memset(&rsp, 0, sizeof(rsp)); 601 cmd.connect_cmd.qid = 1; 602 cmd.connect_cmd.sqsize = 64; 603 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 604 rc = nvmf_ctrlr_cmd_connect(&req); 605 poll_threads(); 606 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 607 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 608 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 609 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 610 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 611 CU_ASSERT(qpair.ctrlr == NULL); 612 cmd.connect_cmd.qid = 0; 613 cmd.connect_cmd.sqsize = 31; 614 615 /* Invalid cntlid for admin queue */ 616 memset(&rsp, 0, sizeof(rsp)); 617 connect_data.cntlid = 0x1234; 618 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 619 rc = nvmf_ctrlr_cmd_connect(&req); 620 poll_threads(); 621 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 622 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 623 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 624 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 625 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 626 CU_ASSERT(qpair.ctrlr == NULL); 627 connect_data.cntlid = 0xFFFF; 628 629 ctrlr.admin_qpair = &admin_qpair; 630 ctrlr.subsys = &subsystem; 631 632 /* Valid I/O queue connect command */ 633 memset(&rsp, 0, sizeof(rsp)); 634 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 635 cmd.connect_cmd.qid = 1; 636 cmd.connect_cmd.sqsize = 63; 637 sgroups[subsystem.id].mgmt_io_outstanding++; 638 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 639 rc = nvmf_ctrlr_cmd_connect(&req); 640 poll_threads(); 641 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 642 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 643 CU_ASSERT(qpair.ctrlr == &ctrlr); 644 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 645 qpair.ctrlr = NULL; 646 cmd.connect_cmd.sqsize = 31; 647 648 /* Non-existent controller */ 649 memset(&rsp, 0, sizeof(rsp)); 650 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 651 sgroups[subsystem.id].mgmt_io_outstanding++; 652 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 653 rc = nvmf_ctrlr_cmd_connect(&req); 654 poll_threads(); 655 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 656 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 657 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 658 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 659 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 660 CU_ASSERT(qpair.ctrlr == NULL); 661 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 662 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 663 664 /* I/O connect to discovery controller */ 665 memset(&rsp, 0, sizeof(rsp)); 666 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 667 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 668 sgroups[subsystem.id].mgmt_io_outstanding++; 669 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 670 rc = nvmf_ctrlr_cmd_connect(&req); 671 poll_threads(); 672 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 673 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 674 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 675 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 676 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 677 CU_ASSERT(qpair.ctrlr == NULL); 678 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 679 680 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 681 cmd.connect_cmd.qid = 0; 682 cmd.connect_cmd.kato = 120000; 683 memset(&rsp, 0, sizeof(rsp)); 684 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 685 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 686 sgroups[subsystem.id].mgmt_io_outstanding++; 687 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 688 rc = nvmf_ctrlr_cmd_connect(&req); 689 poll_threads(); 690 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 691 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 692 CU_ASSERT(qpair.ctrlr != NULL); 693 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 694 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 695 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 696 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 697 free(qpair.ctrlr); 698 qpair.ctrlr = NULL; 699 700 /* I/O connect to discovery controller with keep-alive-timeout == 0. 701 * Then, a fixed timeout value is set to keep-alive-timeout. 702 */ 703 cmd.connect_cmd.kato = 0; 704 memset(&rsp, 0, sizeof(rsp)); 705 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 706 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 707 sgroups[subsystem.id].mgmt_io_outstanding++; 708 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 709 rc = nvmf_ctrlr_cmd_connect(&req); 710 poll_threads(); 711 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 712 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 713 CU_ASSERT(qpair.ctrlr != NULL); 714 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 715 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 716 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 717 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 718 free(qpair.ctrlr); 719 qpair.ctrlr = NULL; 720 cmd.connect_cmd.qid = 1; 721 cmd.connect_cmd.kato = 120000; 722 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 723 724 /* I/O connect to disabled controller */ 725 memset(&rsp, 0, sizeof(rsp)); 726 ctrlr.vcprop.cc.bits.en = 0; 727 sgroups[subsystem.id].mgmt_io_outstanding++; 728 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 729 rc = nvmf_ctrlr_cmd_connect(&req); 730 poll_threads(); 731 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 732 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 733 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 734 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 735 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 736 CU_ASSERT(qpair.ctrlr == NULL); 737 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 738 ctrlr.vcprop.cc.bits.en = 1; 739 740 /* I/O connect with invalid IOSQES */ 741 memset(&rsp, 0, sizeof(rsp)); 742 ctrlr.vcprop.cc.bits.iosqes = 3; 743 sgroups[subsystem.id].mgmt_io_outstanding++; 744 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 745 rc = nvmf_ctrlr_cmd_connect(&req); 746 poll_threads(); 747 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 748 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 749 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 750 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 751 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 752 CU_ASSERT(qpair.ctrlr == NULL); 753 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 754 ctrlr.vcprop.cc.bits.iosqes = 6; 755 756 /* I/O connect with invalid IOCQES */ 757 memset(&rsp, 0, sizeof(rsp)); 758 ctrlr.vcprop.cc.bits.iocqes = 3; 759 sgroups[subsystem.id].mgmt_io_outstanding++; 760 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 761 rc = nvmf_ctrlr_cmd_connect(&req); 762 poll_threads(); 763 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 764 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 765 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 766 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 767 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 768 CU_ASSERT(qpair.ctrlr == NULL); 769 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 770 ctrlr.vcprop.cc.bits.iocqes = 4; 771 772 /* I/O connect with too many existing qpairs */ 773 memset(&rsp, 0, sizeof(rsp)); 774 spdk_bit_array_set(ctrlr.qpair_mask, 0); 775 spdk_bit_array_set(ctrlr.qpair_mask, 1); 776 spdk_bit_array_set(ctrlr.qpair_mask, 2); 777 sgroups[subsystem.id].mgmt_io_outstanding++; 778 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 779 rc = nvmf_ctrlr_cmd_connect(&req); 780 poll_threads(); 781 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 782 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 783 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 784 CU_ASSERT(qpair.ctrlr == NULL); 785 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 786 spdk_bit_array_clear(ctrlr.qpair_mask, 0); 787 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 788 spdk_bit_array_clear(ctrlr.qpair_mask, 2); 789 790 /* I/O connect with duplicate queue ID */ 791 memset(&rsp, 0, sizeof(rsp)); 792 memset(&qpair2, 0, sizeof(qpair2)); 793 qpair2.group = &group; 794 qpair2.qid = 1; 795 spdk_bit_array_set(ctrlr.qpair_mask, 1); 796 cmd.connect_cmd.qid = 1; 797 sgroups[subsystem.id].mgmt_io_outstanding++; 798 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 799 rc = nvmf_ctrlr_cmd_connect(&req); 800 poll_threads(); 801 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 802 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 803 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 804 CU_ASSERT(qpair.ctrlr == NULL); 805 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 806 807 /* I/O connect when admin qpair is being destroyed */ 808 admin_qpair.group = NULL; 809 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 810 memset(&rsp, 0, sizeof(rsp)); 811 sgroups[subsystem.id].mgmt_io_outstanding++; 812 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 813 rc = nvmf_ctrlr_cmd_connect(&req); 814 poll_threads(); 815 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 816 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 817 CU_ASSERT(qpair.ctrlr == NULL); 818 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 819 admin_qpair.group = &group; 820 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 821 822 /* Clean up globals */ 823 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 824 MOCK_CLEAR(spdk_nvmf_poll_group_create); 825 826 spdk_bit_array_free(&ctrlr.qpair_mask); 827 free(sgroups); 828 } 829 830 static void 831 test_get_ns_id_desc_list(void) 832 { 833 struct spdk_nvmf_subsystem subsystem; 834 struct spdk_nvmf_qpair qpair; 835 struct spdk_nvmf_ctrlr ctrlr; 836 struct spdk_nvmf_request req; 837 struct spdk_nvmf_ns *ns_ptrs[1]; 838 struct spdk_nvmf_ns ns; 839 union nvmf_h2c_msg cmd; 840 union nvmf_c2h_msg rsp; 841 struct spdk_bdev bdev; 842 uint8_t buf[4096]; 843 844 memset(&subsystem, 0, sizeof(subsystem)); 845 ns_ptrs[0] = &ns; 846 subsystem.ns = ns_ptrs; 847 subsystem.max_nsid = 1; 848 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 849 850 memset(&ns, 0, sizeof(ns)); 851 ns.opts.nsid = 1; 852 ns.bdev = &bdev; 853 854 memset(&qpair, 0, sizeof(qpair)); 855 qpair.ctrlr = &ctrlr; 856 857 memset(&ctrlr, 0, sizeof(ctrlr)); 858 ctrlr.subsys = &subsystem; 859 ctrlr.vcprop.cc.bits.en = 1; 860 ctrlr.thread = spdk_get_thread(); 861 862 memset(&req, 0, sizeof(req)); 863 req.qpair = &qpair; 864 req.cmd = &cmd; 865 req.rsp = &rsp; 866 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 867 req.data = buf; 868 req.length = sizeof(buf); 869 req.iovcnt = 1; 870 req.iov[0].iov_base = req.data; 871 req.iov[0].iov_len = req.length; 872 873 memset(&cmd, 0, sizeof(cmd)); 874 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 875 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 876 877 /* Invalid NSID */ 878 cmd.nvme_cmd.nsid = 0; 879 memset(&rsp, 0, sizeof(rsp)); 880 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 881 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 882 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 883 884 /* Valid NSID, but ns has no IDs defined */ 885 cmd.nvme_cmd.nsid = 1; 886 memset(&rsp, 0, sizeof(rsp)); 887 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 888 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 889 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 890 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 891 892 /* Valid NSID, only EUI64 defined */ 893 ns.opts.eui64[0] = 0x11; 894 ns.opts.eui64[7] = 0xFF; 895 memset(&rsp, 0, sizeof(rsp)); 896 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 897 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 898 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 899 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 900 CU_ASSERT(buf[1] == 8); 901 CU_ASSERT(buf[4] == 0x11); 902 CU_ASSERT(buf[11] == 0xFF); 903 CU_ASSERT(buf[13] == 0); 904 905 /* Valid NSID, only NGUID defined */ 906 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 907 ns.opts.nguid[0] = 0x22; 908 ns.opts.nguid[15] = 0xEE; 909 memset(&rsp, 0, sizeof(rsp)); 910 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 911 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 912 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 913 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 914 CU_ASSERT(buf[1] == 16); 915 CU_ASSERT(buf[4] == 0x22); 916 CU_ASSERT(buf[19] == 0xEE); 917 CU_ASSERT(buf[21] == 0); 918 919 /* Valid NSID, both EUI64 and NGUID defined */ 920 ns.opts.eui64[0] = 0x11; 921 ns.opts.eui64[7] = 0xFF; 922 ns.opts.nguid[0] = 0x22; 923 ns.opts.nguid[15] = 0xEE; 924 memset(&rsp, 0, sizeof(rsp)); 925 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 926 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 927 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 928 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 929 CU_ASSERT(buf[1] == 8); 930 CU_ASSERT(buf[4] == 0x11); 931 CU_ASSERT(buf[11] == 0xFF); 932 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 933 CU_ASSERT(buf[13] == 16); 934 CU_ASSERT(buf[16] == 0x22); 935 CU_ASSERT(buf[31] == 0xEE); 936 CU_ASSERT(buf[33] == 0); 937 938 /* Valid NSID, EUI64, NGUID, and UUID defined */ 939 ns.opts.eui64[0] = 0x11; 940 ns.opts.eui64[7] = 0xFF; 941 ns.opts.nguid[0] = 0x22; 942 ns.opts.nguid[15] = 0xEE; 943 ns.opts.uuid.u.raw[0] = 0x33; 944 ns.opts.uuid.u.raw[15] = 0xDD; 945 memset(&rsp, 0, sizeof(rsp)); 946 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 947 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 948 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 949 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 950 CU_ASSERT(buf[1] == 8); 951 CU_ASSERT(buf[4] == 0x11); 952 CU_ASSERT(buf[11] == 0xFF); 953 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 954 CU_ASSERT(buf[13] == 16); 955 CU_ASSERT(buf[16] == 0x22); 956 CU_ASSERT(buf[31] == 0xEE); 957 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 958 CU_ASSERT(buf[33] == 16); 959 CU_ASSERT(buf[36] == 0x33); 960 CU_ASSERT(buf[51] == 0xDD); 961 CU_ASSERT(buf[53] == 0); 962 } 963 964 static void 965 test_identify_ns(void) 966 { 967 struct spdk_nvmf_subsystem subsystem = {}; 968 struct spdk_nvmf_transport transport = {}; 969 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 970 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 971 struct spdk_nvme_cmd cmd = {}; 972 struct spdk_nvme_cpl rsp = {}; 973 struct spdk_nvme_ns_data nsdata = {}; 974 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 975 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 976 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 977 978 subsystem.ns = ns_arr; 979 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 980 981 /* Invalid NSID 0 */ 982 cmd.nsid = 0; 983 memset(&nsdata, 0, sizeof(nsdata)); 984 memset(&rsp, 0, sizeof(rsp)); 985 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 986 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 987 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 988 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 989 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 990 991 /* Valid NSID 1 */ 992 cmd.nsid = 1; 993 memset(&nsdata, 0, sizeof(nsdata)); 994 memset(&rsp, 0, sizeof(rsp)); 995 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 996 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 997 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 998 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 999 CU_ASSERT(nsdata.nsze == 1234); 1000 1001 /* Valid but inactive NSID 2 */ 1002 cmd.nsid = 2; 1003 memset(&nsdata, 0, sizeof(nsdata)); 1004 memset(&rsp, 0, sizeof(rsp)); 1005 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1006 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1007 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1008 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1009 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1010 1011 /* Valid NSID 3 */ 1012 cmd.nsid = 3; 1013 memset(&nsdata, 0, sizeof(nsdata)); 1014 memset(&rsp, 0, sizeof(rsp)); 1015 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1016 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1017 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1018 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1019 CU_ASSERT(nsdata.nsze == 5678); 1020 1021 /* Invalid NSID 4 */ 1022 cmd.nsid = 4; 1023 memset(&nsdata, 0, sizeof(nsdata)); 1024 memset(&rsp, 0, sizeof(rsp)); 1025 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1026 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1027 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1028 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1029 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1030 1031 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 1032 cmd.nsid = 0xFFFFFFFF; 1033 memset(&nsdata, 0, sizeof(nsdata)); 1034 memset(&rsp, 0, sizeof(rsp)); 1035 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1036 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1037 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1038 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1039 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1040 } 1041 1042 static void 1043 test_set_get_features(void) 1044 { 1045 struct spdk_nvmf_subsystem subsystem = {}; 1046 struct spdk_nvmf_qpair admin_qpair = {}; 1047 enum spdk_nvme_ana_state ana_state[3]; 1048 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1049 struct spdk_nvmf_ctrlr ctrlr = { 1050 .subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener 1051 }; 1052 union nvmf_h2c_msg cmd = {}; 1053 union nvmf_c2h_msg rsp = {}; 1054 struct spdk_nvmf_ns ns[3]; 1055 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1056 struct spdk_nvmf_request req; 1057 int rc; 1058 1059 ns[0].anagrpid = 1; 1060 ns[2].anagrpid = 3; 1061 subsystem.ns = ns_arr; 1062 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1063 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1064 listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1065 admin_qpair.ctrlr = &ctrlr; 1066 req.qpair = &admin_qpair; 1067 cmd.nvme_cmd.nsid = 1; 1068 req.cmd = &cmd; 1069 req.rsp = &rsp; 1070 1071 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1072 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1073 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1074 ns[0].ptpl_file = "testcfg"; 1075 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1076 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1077 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1078 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1079 CU_ASSERT(ns[0].ptpl_activated == true); 1080 1081 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1082 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1083 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1084 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1085 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1086 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1087 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1088 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1089 1090 1091 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1092 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1093 cmd.nvme_cmd.cdw11 = 0x42; 1094 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1095 1096 rc = nvmf_ctrlr_get_features(&req); 1097 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1098 1099 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1100 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1101 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1102 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1103 1104 rc = nvmf_ctrlr_get_features(&req); 1105 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1106 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1107 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1108 1109 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1110 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1111 cmd.nvme_cmd.cdw11 = 0x42; 1112 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1113 1114 rc = nvmf_ctrlr_set_features(&req); 1115 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1116 1117 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1118 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1119 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1120 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1121 1122 rc = nvmf_ctrlr_set_features(&req); 1123 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1124 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1125 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1126 1127 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1128 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1129 cmd.nvme_cmd.cdw11 = 0x42; 1130 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1131 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1132 1133 rc = nvmf_ctrlr_set_features(&req); 1134 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1135 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1136 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1137 1138 1139 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1140 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1141 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1142 1143 rc = nvmf_ctrlr_get_features(&req); 1144 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1145 1146 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1147 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1148 cmd.nvme_cmd.cdw11 = 0x42; 1149 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1150 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1151 1152 rc = nvmf_ctrlr_set_features(&req); 1153 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1154 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1155 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1156 1157 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1158 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1159 cmd.nvme_cmd.cdw11 = 0x42; 1160 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1161 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1162 1163 rc = nvmf_ctrlr_set_features(&req); 1164 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1165 } 1166 1167 /* 1168 * Reservation Unit Test Configuration 1169 * -------- -------- -------- 1170 * | Host A | | Host B | | Host C | 1171 * -------- -------- -------- 1172 * / \ | | 1173 * -------- -------- ------- ------- 1174 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1175 * -------- -------- ------- ------- 1176 * \ \ / / 1177 * \ \ / / 1178 * \ \ / / 1179 * -------------------------------------- 1180 * | NAMESPACE 1 | 1181 * -------------------------------------- 1182 */ 1183 1184 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1185 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1186 1187 static void 1188 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1189 { 1190 /* Host A has two controllers */ 1191 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1192 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1193 1194 /* Host B has 1 controller */ 1195 spdk_uuid_generate(&g_ctrlr_B.hostid); 1196 1197 /* Host C has 1 controller */ 1198 spdk_uuid_generate(&g_ctrlr_C.hostid); 1199 1200 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1201 g_ns_info.rtype = rtype; 1202 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1203 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1204 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1205 } 1206 1207 static void 1208 test_reservation_write_exclusive(void) 1209 { 1210 struct spdk_nvmf_request req = {}; 1211 union nvmf_h2c_msg cmd = {}; 1212 union nvmf_c2h_msg rsp = {}; 1213 int rc; 1214 1215 req.cmd = &cmd; 1216 req.rsp = &rsp; 1217 1218 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1219 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1220 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1221 1222 /* Test Case: Issue a Read command from Host A and Host B */ 1223 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1224 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1225 SPDK_CU_ASSERT_FATAL(rc == 0); 1226 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1227 SPDK_CU_ASSERT_FATAL(rc == 0); 1228 1229 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1230 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1231 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1232 SPDK_CU_ASSERT_FATAL(rc == 0); 1233 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1234 SPDK_CU_ASSERT_FATAL(rc < 0); 1235 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1236 1237 /* Test Case: Issue a Write command from Host C */ 1238 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1239 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1240 SPDK_CU_ASSERT_FATAL(rc < 0); 1241 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1242 1243 /* Test Case: Issue a Read command from Host B */ 1244 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1245 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1246 SPDK_CU_ASSERT_FATAL(rc == 0); 1247 1248 /* Unregister Host C */ 1249 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1250 1251 /* Test Case: Read and Write commands from non-registrant Host C */ 1252 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1253 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1254 SPDK_CU_ASSERT_FATAL(rc < 0); 1255 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1256 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1257 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1258 SPDK_CU_ASSERT_FATAL(rc == 0); 1259 } 1260 1261 static void 1262 test_reservation_exclusive_access(void) 1263 { 1264 struct spdk_nvmf_request req = {}; 1265 union nvmf_h2c_msg cmd = {}; 1266 union nvmf_c2h_msg rsp = {}; 1267 int rc; 1268 1269 req.cmd = &cmd; 1270 req.rsp = &rsp; 1271 1272 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1273 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1274 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1275 1276 /* Test Case: Issue a Read command from Host B */ 1277 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1278 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1279 SPDK_CU_ASSERT_FATAL(rc < 0); 1280 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1281 1282 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1283 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1284 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1285 SPDK_CU_ASSERT_FATAL(rc == 0); 1286 } 1287 1288 static void 1289 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1290 { 1291 struct spdk_nvmf_request req = {}; 1292 union nvmf_h2c_msg cmd = {}; 1293 union nvmf_c2h_msg rsp = {}; 1294 int rc; 1295 1296 req.cmd = &cmd; 1297 req.rsp = &rsp; 1298 1299 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1300 ut_reservation_init(rtype); 1301 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1302 1303 /* Test Case: Issue a Read command from Host A and Host C */ 1304 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1305 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1306 SPDK_CU_ASSERT_FATAL(rc == 0); 1307 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1308 SPDK_CU_ASSERT_FATAL(rc == 0); 1309 1310 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1311 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1312 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1313 SPDK_CU_ASSERT_FATAL(rc == 0); 1314 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1315 SPDK_CU_ASSERT_FATAL(rc == 0); 1316 1317 /* Unregister Host C */ 1318 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1319 1320 /* Test Case: Read and Write commands from non-registrant Host C */ 1321 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1322 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1323 SPDK_CU_ASSERT_FATAL(rc == 0); 1324 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1325 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1326 SPDK_CU_ASSERT_FATAL(rc < 0); 1327 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1328 } 1329 1330 static void 1331 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1332 { 1333 _test_reservation_write_exclusive_regs_only_and_all_regs( 1334 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1335 _test_reservation_write_exclusive_regs_only_and_all_regs( 1336 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1337 } 1338 1339 static void 1340 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1341 { 1342 struct spdk_nvmf_request req = {}; 1343 union nvmf_h2c_msg cmd = {}; 1344 union nvmf_c2h_msg rsp = {}; 1345 int rc; 1346 1347 req.cmd = &cmd; 1348 req.rsp = &rsp; 1349 1350 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1351 ut_reservation_init(rtype); 1352 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1353 1354 /* Test Case: Issue a Write command from Host B */ 1355 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1356 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1357 SPDK_CU_ASSERT_FATAL(rc == 0); 1358 1359 /* Unregister Host B */ 1360 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1361 1362 /* Test Case: Issue a Read command from Host B */ 1363 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1364 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1365 SPDK_CU_ASSERT_FATAL(rc < 0); 1366 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1367 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1368 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1369 SPDK_CU_ASSERT_FATAL(rc < 0); 1370 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1371 } 1372 1373 static void 1374 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1375 { 1376 _test_reservation_exclusive_access_regs_only_and_all_regs( 1377 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1378 _test_reservation_exclusive_access_regs_only_and_all_regs( 1379 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1380 } 1381 1382 static void 1383 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1384 { 1385 STAILQ_INIT(&ctrlr->async_events); 1386 } 1387 1388 static void 1389 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1390 { 1391 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1392 1393 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1394 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1395 free(event); 1396 } 1397 } 1398 1399 static int 1400 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1401 { 1402 int num = 0; 1403 struct spdk_nvmf_async_event_completion *event; 1404 1405 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1406 num++; 1407 } 1408 return num; 1409 } 1410 1411 static void 1412 test_reservation_notification_log_page(void) 1413 { 1414 struct spdk_nvmf_ctrlr ctrlr; 1415 struct spdk_nvmf_qpair qpair; 1416 struct spdk_nvmf_ns ns; 1417 struct spdk_nvmf_request req = {}; 1418 union nvmf_h2c_msg cmd = {}; 1419 union nvmf_c2h_msg rsp = {}; 1420 union spdk_nvme_async_event_completion event = {}; 1421 struct spdk_nvme_reservation_notification_log logs[3]; 1422 struct iovec iov; 1423 1424 memset(&ctrlr, 0, sizeof(ctrlr)); 1425 ctrlr.thread = spdk_get_thread(); 1426 TAILQ_INIT(&ctrlr.log_head); 1427 init_pending_async_events(&ctrlr); 1428 ns.nsid = 1; 1429 1430 /* Test Case: Mask all the reservation notifications */ 1431 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1432 SPDK_NVME_RESERVATION_RELEASED_MASK | 1433 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1434 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1435 SPDK_NVME_REGISTRATION_PREEMPTED); 1436 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1437 SPDK_NVME_RESERVATION_RELEASED); 1438 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1439 SPDK_NVME_RESERVATION_PREEMPTED); 1440 poll_threads(); 1441 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1442 1443 /* Test Case: Unmask all the reservation notifications, 1444 * 3 log pages are generated, and AER was triggered. 1445 */ 1446 ns.mask = 0; 1447 ctrlr.num_avail_log_pages = 0; 1448 req.cmd = &cmd; 1449 req.rsp = &rsp; 1450 ctrlr.aer_req[0] = &req; 1451 ctrlr.nr_aer_reqs = 1; 1452 req.qpair = &qpair; 1453 TAILQ_INIT(&qpair.outstanding); 1454 qpair.ctrlr = NULL; 1455 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1456 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1457 1458 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1459 SPDK_NVME_REGISTRATION_PREEMPTED); 1460 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1461 SPDK_NVME_RESERVATION_RELEASED); 1462 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1463 SPDK_NVME_RESERVATION_PREEMPTED); 1464 poll_threads(); 1465 event.raw = rsp.nvme_cpl.cdw0; 1466 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1467 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1468 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1469 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1470 1471 /* Test Case: Get Log Page to clear the log pages */ 1472 iov.iov_base = &logs[0]; 1473 iov.iov_len = sizeof(logs); 1474 nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0); 1475 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1476 1477 cleanup_pending_async_events(&ctrlr); 1478 } 1479 1480 static void 1481 test_get_dif_ctx(void) 1482 { 1483 struct spdk_nvmf_subsystem subsystem = {}; 1484 struct spdk_nvmf_request req = {}; 1485 struct spdk_nvmf_qpair qpair = {}; 1486 struct spdk_nvmf_ctrlr ctrlr = {}; 1487 struct spdk_nvmf_ns ns = {}; 1488 struct spdk_nvmf_ns *_ns = NULL; 1489 struct spdk_bdev bdev = {}; 1490 union nvmf_h2c_msg cmd = {}; 1491 struct spdk_dif_ctx dif_ctx = {}; 1492 bool ret; 1493 1494 ctrlr.subsys = &subsystem; 1495 1496 qpair.ctrlr = &ctrlr; 1497 1498 req.qpair = &qpair; 1499 req.cmd = &cmd; 1500 1501 ns.bdev = &bdev; 1502 1503 ctrlr.dif_insert_or_strip = false; 1504 1505 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1506 CU_ASSERT(ret == false); 1507 1508 ctrlr.dif_insert_or_strip = true; 1509 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1510 1511 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1512 CU_ASSERT(ret == false); 1513 1514 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1515 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1516 1517 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1518 CU_ASSERT(ret == false); 1519 1520 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1521 1522 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1523 CU_ASSERT(ret == false); 1524 1525 qpair.qid = 1; 1526 1527 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1528 CU_ASSERT(ret == false); 1529 1530 cmd.nvme_cmd.nsid = 1; 1531 1532 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1533 CU_ASSERT(ret == false); 1534 1535 subsystem.max_nsid = 1; 1536 subsystem.ns = &_ns; 1537 subsystem.ns[0] = &ns; 1538 1539 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1540 CU_ASSERT(ret == false); 1541 1542 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1543 1544 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1545 CU_ASSERT(ret == true); 1546 } 1547 1548 static void 1549 test_identify_ctrlr(void) 1550 { 1551 struct spdk_nvmf_tgt tgt = {}; 1552 struct spdk_nvmf_subsystem subsystem = { 1553 .subtype = SPDK_NVMF_SUBTYPE_NVME, 1554 .tgt = &tgt, 1555 }; 1556 struct spdk_nvmf_transport_ops tops = {}; 1557 struct spdk_nvmf_transport transport = { 1558 .ops = &tops, 1559 .opts = { 1560 .in_capsule_data_size = 4096, 1561 }, 1562 }; 1563 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1564 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1565 struct spdk_nvme_ctrlr_data cdata = {}; 1566 uint32_t expected_ioccsz; 1567 1568 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1569 1570 /* Check ioccsz, TCP transport */ 1571 tops.type = SPDK_NVME_TRANSPORT_TCP; 1572 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1573 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1574 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1575 1576 /* Check ioccsz, RDMA transport */ 1577 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1578 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1579 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1580 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1581 1582 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1583 tops.type = SPDK_NVME_TRANSPORT_TCP; 1584 ctrlr.dif_insert_or_strip = true; 1585 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1586 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1587 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1588 } 1589 1590 static int 1591 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1592 { 1593 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1594 1595 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1596 }; 1597 1598 static void 1599 test_custom_admin_cmd(void) 1600 { 1601 struct spdk_nvmf_subsystem subsystem; 1602 struct spdk_nvmf_qpair qpair; 1603 struct spdk_nvmf_ctrlr ctrlr; 1604 struct spdk_nvmf_request req; 1605 struct spdk_nvmf_ns *ns_ptrs[1]; 1606 struct spdk_nvmf_ns ns; 1607 union nvmf_h2c_msg cmd; 1608 union nvmf_c2h_msg rsp; 1609 struct spdk_bdev bdev; 1610 uint8_t buf[4096]; 1611 int rc; 1612 1613 memset(&subsystem, 0, sizeof(subsystem)); 1614 ns_ptrs[0] = &ns; 1615 subsystem.ns = ns_ptrs; 1616 subsystem.max_nsid = 1; 1617 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1618 1619 memset(&ns, 0, sizeof(ns)); 1620 ns.opts.nsid = 1; 1621 ns.bdev = &bdev; 1622 1623 memset(&qpair, 0, sizeof(qpair)); 1624 qpair.ctrlr = &ctrlr; 1625 1626 memset(&ctrlr, 0, sizeof(ctrlr)); 1627 ctrlr.subsys = &subsystem; 1628 ctrlr.vcprop.cc.bits.en = 1; 1629 ctrlr.thread = spdk_get_thread(); 1630 1631 memset(&req, 0, sizeof(req)); 1632 req.qpair = &qpair; 1633 req.cmd = &cmd; 1634 req.rsp = &rsp; 1635 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1636 req.data = buf; 1637 req.length = sizeof(buf); 1638 1639 memset(&cmd, 0, sizeof(cmd)); 1640 cmd.nvme_cmd.opc = 0xc1; 1641 cmd.nvme_cmd.nsid = 0; 1642 memset(&rsp, 0, sizeof(rsp)); 1643 1644 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1645 1646 /* Ensure that our hdlr is being called */ 1647 rc = nvmf_ctrlr_process_admin_cmd(&req); 1648 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1649 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1650 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1651 } 1652 1653 static void 1654 test_fused_compare_and_write(void) 1655 { 1656 struct spdk_nvmf_request req = {}; 1657 struct spdk_nvmf_qpair qpair = {}; 1658 struct spdk_nvme_cmd cmd = {}; 1659 union nvmf_c2h_msg rsp = {}; 1660 struct spdk_nvmf_ctrlr ctrlr = {}; 1661 struct spdk_nvmf_subsystem subsystem = {}; 1662 struct spdk_nvmf_ns ns = {}; 1663 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1664 enum spdk_nvme_ana_state ana_state[1]; 1665 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1666 struct spdk_bdev bdev = {}; 1667 1668 struct spdk_nvmf_poll_group group = {}; 1669 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1670 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1671 struct spdk_io_channel io_ch = {}; 1672 1673 ns.bdev = &bdev; 1674 ns.anagrpid = 1; 1675 1676 subsystem.id = 0; 1677 subsystem.max_nsid = 1; 1678 subsys_ns[0] = &ns; 1679 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1680 1681 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1682 1683 /* Enable controller */ 1684 ctrlr.vcprop.cc.bits.en = 1; 1685 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1686 ctrlr.listener = &listener; 1687 1688 group.num_sgroups = 1; 1689 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1690 sgroups.num_ns = 1; 1691 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1692 ns_info.channel = &io_ch; 1693 sgroups.ns_info = &ns_info; 1694 TAILQ_INIT(&sgroups.queued); 1695 group.sgroups = &sgroups; 1696 TAILQ_INIT(&qpair.outstanding); 1697 1698 qpair.ctrlr = &ctrlr; 1699 qpair.group = &group; 1700 qpair.qid = 1; 1701 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1702 1703 cmd.nsid = 1; 1704 1705 req.qpair = &qpair; 1706 req.cmd = (union nvmf_h2c_msg *)&cmd; 1707 req.rsp = &rsp; 1708 1709 /* SUCCESS/SUCCESS */ 1710 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1711 cmd.opc = SPDK_NVME_OPC_COMPARE; 1712 1713 spdk_nvmf_request_exec(&req); 1714 CU_ASSERT(qpair.first_fused_req != NULL); 1715 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1716 1717 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1718 cmd.opc = SPDK_NVME_OPC_WRITE; 1719 1720 spdk_nvmf_request_exec(&req); 1721 CU_ASSERT(qpair.first_fused_req == NULL); 1722 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1723 1724 /* Wrong sequence */ 1725 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1726 cmd.opc = SPDK_NVME_OPC_WRITE; 1727 1728 spdk_nvmf_request_exec(&req); 1729 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1730 CU_ASSERT(qpair.first_fused_req == NULL); 1731 1732 /* Write as FUSE_FIRST (Wrong op code) */ 1733 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1734 cmd.opc = SPDK_NVME_OPC_WRITE; 1735 1736 spdk_nvmf_request_exec(&req); 1737 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1738 CU_ASSERT(qpair.first_fused_req == NULL); 1739 1740 /* Compare as FUSE_SECOND (Wrong op code) */ 1741 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1742 cmd.opc = SPDK_NVME_OPC_COMPARE; 1743 1744 spdk_nvmf_request_exec(&req); 1745 CU_ASSERT(qpair.first_fused_req != NULL); 1746 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1747 1748 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1749 cmd.opc = SPDK_NVME_OPC_COMPARE; 1750 1751 spdk_nvmf_request_exec(&req); 1752 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1753 CU_ASSERT(qpair.first_fused_req == NULL); 1754 } 1755 1756 static void 1757 test_multi_async_event_reqs(void) 1758 { 1759 struct spdk_nvmf_subsystem subsystem = {}; 1760 struct spdk_nvmf_qpair qpair = {}; 1761 struct spdk_nvmf_ctrlr ctrlr = {}; 1762 struct spdk_nvmf_request req[5] = {}; 1763 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1764 struct spdk_nvmf_ns ns = {}; 1765 union nvmf_h2c_msg cmd[5] = {}; 1766 union nvmf_c2h_msg rsp[5] = {}; 1767 1768 struct spdk_nvmf_poll_group group = {}; 1769 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1770 1771 int i; 1772 1773 ns_ptrs[0] = &ns; 1774 subsystem.ns = ns_ptrs; 1775 subsystem.max_nsid = 1; 1776 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1777 1778 ns.opts.nsid = 1; 1779 group.sgroups = &sgroups; 1780 1781 qpair.ctrlr = &ctrlr; 1782 qpair.group = &group; 1783 TAILQ_INIT(&qpair.outstanding); 1784 1785 ctrlr.subsys = &subsystem; 1786 ctrlr.vcprop.cc.bits.en = 1; 1787 ctrlr.thread = spdk_get_thread(); 1788 1789 for (i = 0; i < 5; i++) { 1790 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1791 cmd[i].nvme_cmd.nsid = 1; 1792 cmd[i].nvme_cmd.cid = i; 1793 1794 req[i].qpair = &qpair; 1795 req[i].cmd = &cmd[i]; 1796 req[i].rsp = &rsp[i]; 1797 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1798 } 1799 1800 /* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */ 1801 sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS; 1802 for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) { 1803 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1804 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1805 } 1806 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 1807 1808 /* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */ 1809 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1810 CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS); 1811 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1812 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 1813 1814 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 1815 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 1816 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1817 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1818 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 1819 1820 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 1821 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1822 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1823 CU_ASSERT(ctrlr.aer_req[2] == NULL); 1824 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 1825 1826 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 1827 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 1828 } 1829 1830 static void 1831 test_get_ana_log_page_one_ns_per_anagrp(void) 1832 { 1833 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 1834 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 1835 uint32_t ana_group[3]; 1836 struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group }; 1837 struct spdk_nvmf_ctrlr ctrlr = {}; 1838 enum spdk_nvme_ana_state ana_state[3]; 1839 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1840 struct spdk_nvmf_ns ns[3]; 1841 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 1842 uint64_t offset; 1843 uint32_t length; 1844 int i; 1845 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1846 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1847 struct iovec iov, iovs[2]; 1848 struct spdk_nvme_ana_page *ana_hdr; 1849 char _ana_desc[UT_ANA_DESC_SIZE]; 1850 struct spdk_nvme_ana_group_descriptor *ana_desc; 1851 1852 subsystem.ns = ns_arr; 1853 subsystem.max_nsid = 3; 1854 for (i = 0; i < 3; i++) { 1855 subsystem.ana_group[i] = 1; 1856 } 1857 ctrlr.subsys = &subsystem; 1858 ctrlr.listener = &listener; 1859 1860 for (i = 0; i < 3; i++) { 1861 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1862 } 1863 1864 for (i = 0; i < 3; i++) { 1865 ns_arr[i]->nsid = i + 1; 1866 ns_arr[i]->anagrpid = i + 1; 1867 } 1868 1869 /* create expected page */ 1870 ana_hdr = (void *)&expected_page[0]; 1871 ana_hdr->num_ana_group_desc = 3; 1872 ana_hdr->change_count = 0; 1873 1874 /* descriptor may be unaligned. So create data and then copy it to the location. */ 1875 ana_desc = (void *)_ana_desc; 1876 offset = sizeof(struct spdk_nvme_ana_page); 1877 1878 for (i = 0; i < 3; i++) { 1879 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 1880 ana_desc->ana_group_id = ns_arr[i]->nsid; 1881 ana_desc->num_of_nsid = 1; 1882 ana_desc->change_count = 0; 1883 ana_desc->ana_state = ctrlr.listener->ana_state[i]; 1884 ana_desc->nsid[0] = ns_arr[i]->nsid; 1885 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 1886 offset += UT_ANA_DESC_SIZE; 1887 } 1888 1889 /* read entire actual log page */ 1890 offset = 0; 1891 while (offset < UT_ANA_LOG_PAGE_SIZE) { 1892 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 1893 iov.iov_base = &actual_page[offset]; 1894 iov.iov_len = length; 1895 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 1896 offset += length; 1897 } 1898 1899 /* compare expected page and actual page */ 1900 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1901 1902 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 1903 offset = 0; 1904 iovs[0].iov_base = &actual_page[offset]; 1905 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 1906 offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 1907 iovs[1].iov_base = &actual_page[offset]; 1908 iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset; 1909 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 1910 1911 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1912 1913 #undef UT_ANA_DESC_SIZE 1914 #undef UT_ANA_LOG_PAGE_SIZE 1915 } 1916 1917 static void 1918 test_get_ana_log_page_multi_ns_per_anagrp(void) 1919 { 1920 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + \ 1921 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 + \ 1922 sizeof(uint32_t) * 5) 1923 struct spdk_nvmf_ns ns[5]; 1924 struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]}; 1925 uint32_t ana_group[5] = {0}; 1926 struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, }; 1927 enum spdk_nvme_ana_state ana_state[5]; 1928 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, }; 1929 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, }; 1930 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1931 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 1932 struct iovec iov, iovs[2]; 1933 struct spdk_nvme_ana_page *ana_hdr; 1934 char _ana_desc[UT_ANA_LOG_PAGE_SIZE]; 1935 struct spdk_nvme_ana_group_descriptor *ana_desc; 1936 uint64_t offset; 1937 uint32_t length; 1938 int i; 1939 1940 subsystem.max_nsid = 5; 1941 subsystem.ana_group[1] = 3; 1942 subsystem.ana_group[2] = 2; 1943 for (i = 0; i < 5; i++) { 1944 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1945 } 1946 1947 for (i = 0; i < 5; i++) { 1948 ns_arr[i]->nsid = i + 1; 1949 } 1950 ns_arr[0]->anagrpid = 2; 1951 ns_arr[1]->anagrpid = 3; 1952 ns_arr[2]->anagrpid = 2; 1953 ns_arr[3]->anagrpid = 3; 1954 ns_arr[4]->anagrpid = 2; 1955 1956 /* create expected page */ 1957 ana_hdr = (void *)&expected_page[0]; 1958 ana_hdr->num_ana_group_desc = 2; 1959 ana_hdr->change_count = 0; 1960 1961 /* descriptor may be unaligned. So create data and then copy it to the location. */ 1962 ana_desc = (void *)_ana_desc; 1963 offset = sizeof(struct spdk_nvme_ana_page); 1964 1965 memset(_ana_desc, 0, sizeof(_ana_desc)); 1966 ana_desc->ana_group_id = 2; 1967 ana_desc->num_of_nsid = 3; 1968 ana_desc->change_count = 0; 1969 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1970 ana_desc->nsid[0] = 1; 1971 ana_desc->nsid[1] = 3; 1972 ana_desc->nsid[2] = 5; 1973 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 1974 sizeof(uint32_t) * 3); 1975 offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3; 1976 1977 memset(_ana_desc, 0, sizeof(_ana_desc)); 1978 ana_desc->ana_group_id = 3; 1979 ana_desc->num_of_nsid = 2; 1980 ana_desc->change_count = 0; 1981 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 1982 ana_desc->nsid[0] = 2; 1983 ana_desc->nsid[1] = 4; 1984 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 1985 sizeof(uint32_t) * 2); 1986 1987 /* read entire actual log page, and compare expected page and actual page. */ 1988 offset = 0; 1989 while (offset < UT_ANA_LOG_PAGE_SIZE) { 1990 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 1991 iov.iov_base = &actual_page[offset]; 1992 iov.iov_len = length; 1993 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 1994 offset += length; 1995 } 1996 1997 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 1998 1999 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2000 offset = 0; 2001 iovs[0].iov_base = &actual_page[offset]; 2002 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2003 offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2004 iovs[1].iov_base = &actual_page[offset]; 2005 iovs[1].iov_len = sizeof(uint32_t) * 5; 2006 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2007 2008 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2009 2010 #undef UT_ANA_LOG_PAGE_SIZE 2011 } 2012 static void 2013 test_multi_async_events(void) 2014 { 2015 struct spdk_nvmf_subsystem subsystem = {}; 2016 struct spdk_nvmf_qpair qpair = {}; 2017 struct spdk_nvmf_ctrlr ctrlr = {}; 2018 struct spdk_nvmf_request req[4] = {}; 2019 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2020 struct spdk_nvmf_ns ns = {}; 2021 union nvmf_h2c_msg cmd[4] = {}; 2022 union nvmf_c2h_msg rsp[4] = {}; 2023 union spdk_nvme_async_event_completion event = {}; 2024 struct spdk_nvmf_poll_group group = {}; 2025 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2026 int i; 2027 2028 ns_ptrs[0] = &ns; 2029 subsystem.ns = ns_ptrs; 2030 subsystem.max_nsid = 1; 2031 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2032 2033 ns.opts.nsid = 1; 2034 group.sgroups = &sgroups; 2035 2036 qpair.ctrlr = &ctrlr; 2037 qpair.group = &group; 2038 TAILQ_INIT(&qpair.outstanding); 2039 2040 ctrlr.subsys = &subsystem; 2041 ctrlr.vcprop.cc.bits.en = 1; 2042 ctrlr.thread = spdk_get_thread(); 2043 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2044 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 2045 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 2046 init_pending_async_events(&ctrlr); 2047 2048 /* Target queue pending events when there is no outstanding AER request */ 2049 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2050 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 2051 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 2052 2053 for (i = 0; i < 4; i++) { 2054 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2055 cmd[i].nvme_cmd.nsid = 1; 2056 cmd[i].nvme_cmd.cid = i; 2057 2058 req[i].qpair = &qpair; 2059 req[i].cmd = &cmd[i]; 2060 req[i].rsp = &rsp[i]; 2061 2062 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2063 2064 sgroups.mgmt_io_outstanding = 1; 2065 if (i < 3) { 2066 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2067 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2068 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 2069 } else { 2070 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2071 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2072 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 2073 } 2074 } 2075 2076 event.raw = rsp[0].nvme_cpl.cdw0; 2077 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2078 event.raw = rsp[1].nvme_cpl.cdw0; 2079 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 2080 event.raw = rsp[2].nvme_cpl.cdw0; 2081 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 2082 2083 cleanup_pending_async_events(&ctrlr); 2084 } 2085 2086 static void 2087 test_rae(void) 2088 { 2089 struct spdk_nvmf_subsystem subsystem = {}; 2090 struct spdk_nvmf_qpair qpair = {}; 2091 struct spdk_nvmf_ctrlr ctrlr = {}; 2092 struct spdk_nvmf_request req[3] = {}; 2093 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2094 struct spdk_nvmf_ns ns = {}; 2095 union nvmf_h2c_msg cmd[3] = {}; 2096 union nvmf_c2h_msg rsp[3] = {}; 2097 union spdk_nvme_async_event_completion event = {}; 2098 struct spdk_nvmf_poll_group group = {}; 2099 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2100 int i; 2101 char data[4096]; 2102 2103 ns_ptrs[0] = &ns; 2104 subsystem.ns = ns_ptrs; 2105 subsystem.max_nsid = 1; 2106 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2107 2108 ns.opts.nsid = 1; 2109 group.sgroups = &sgroups; 2110 2111 qpair.ctrlr = &ctrlr; 2112 qpair.group = &group; 2113 TAILQ_INIT(&qpair.outstanding); 2114 2115 ctrlr.subsys = &subsystem; 2116 ctrlr.vcprop.cc.bits.en = 1; 2117 ctrlr.thread = spdk_get_thread(); 2118 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2119 init_pending_async_events(&ctrlr); 2120 2121 /* Target queue pending events when there is no outstanding AER request */ 2122 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2123 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2124 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2125 /* only one event will be queued before RAE is clear */ 2126 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2127 2128 req[0].qpair = &qpair; 2129 req[0].cmd = &cmd[0]; 2130 req[0].rsp = &rsp[0]; 2131 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2132 cmd[0].nvme_cmd.nsid = 1; 2133 cmd[0].nvme_cmd.cid = 0; 2134 2135 for (i = 1; i < 3; i++) { 2136 req[i].qpair = &qpair; 2137 req[i].cmd = &cmd[i]; 2138 req[i].rsp = &rsp[i]; 2139 req[i].data = &data; 2140 req[i].length = sizeof(data); 2141 2142 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 2143 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 2144 SPDK_NVME_LOG_CHANGED_NS_LIST; 2145 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 2146 spdk_nvme_bytes_to_numd(req[i].length); 2147 cmd[i].nvme_cmd.cid = i; 2148 } 2149 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 2150 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 2151 2152 /* consume the pending event */ 2153 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 2154 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2155 event.raw = rsp[0].nvme_cpl.cdw0; 2156 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2157 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2158 2159 /* get log with RAE set */ 2160 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2161 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2162 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2163 2164 /* will not generate new event until RAE is clear */ 2165 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2166 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2167 2168 /* get log with RAE clear */ 2169 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2170 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2171 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2172 2173 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2174 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2175 2176 cleanup_pending_async_events(&ctrlr); 2177 } 2178 2179 static void 2180 test_nvmf_ctrlr_create_destruct(void) 2181 { 2182 struct spdk_nvmf_fabric_connect_data connect_data = {}; 2183 struct spdk_nvmf_poll_group group = {}; 2184 struct spdk_nvmf_subsystem_poll_group sgroups[2] = {}; 2185 struct spdk_nvmf_transport transport = {}; 2186 struct spdk_nvmf_transport_ops tops = {}; 2187 struct spdk_nvmf_subsystem subsystem = {}; 2188 struct spdk_nvmf_request req = {}; 2189 struct spdk_nvmf_qpair qpair = {}; 2190 struct spdk_nvmf_ctrlr *ctrlr = NULL; 2191 struct spdk_nvmf_tgt tgt = {}; 2192 union nvmf_h2c_msg cmd = {}; 2193 union nvmf_c2h_msg rsp = {}; 2194 const uint8_t hostid[16] = { 2195 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2196 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 2197 }; 2198 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 2199 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 2200 2201 group.thread = spdk_get_thread(); 2202 transport.ops = &tops; 2203 transport.opts.max_aq_depth = 32; 2204 transport.opts.max_queue_depth = 64; 2205 transport.opts.max_qpairs_per_ctrlr = 3; 2206 transport.opts.dif_insert_or_strip = true; 2207 transport.tgt = &tgt; 2208 qpair.transport = &transport; 2209 qpair.group = &group; 2210 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2211 TAILQ_INIT(&qpair.outstanding); 2212 2213 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 2214 connect_data.cntlid = 0xFFFF; 2215 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 2216 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 2217 2218 subsystem.thread = spdk_get_thread(); 2219 subsystem.id = 1; 2220 TAILQ_INIT(&subsystem.ctrlrs); 2221 subsystem.tgt = &tgt; 2222 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2223 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2224 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 2225 2226 group.sgroups = sgroups; 2227 2228 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2229 cmd.connect_cmd.cid = 1; 2230 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 2231 cmd.connect_cmd.recfmt = 0; 2232 cmd.connect_cmd.qid = 0; 2233 cmd.connect_cmd.sqsize = 31; 2234 cmd.connect_cmd.cattr = 0; 2235 cmd.connect_cmd.kato = 120000; 2236 2237 req.qpair = &qpair; 2238 req.length = sizeof(connect_data); 2239 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 2240 req.data = &connect_data; 2241 req.cmd = &cmd; 2242 req.rsp = &rsp; 2243 2244 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 2245 sgroups[subsystem.id].mgmt_io_outstanding++; 2246 2247 ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.data); 2248 poll_threads(); 2249 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2250 CU_ASSERT(req.qpair->ctrlr == ctrlr); 2251 CU_ASSERT(ctrlr->subsys == &subsystem); 2252 CU_ASSERT(ctrlr->thread == req.qpair->group->thread); 2253 CU_ASSERT(ctrlr->disconnect_in_progress == false); 2254 CU_ASSERT(ctrlr->qpair_mask != NULL); 2255 CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000); 2256 CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1); 2257 CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1); 2258 CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1); 2259 CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1); 2260 CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16)); 2261 CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1); 2262 CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63); 2263 CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0); 2264 CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500); 2265 CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0); 2266 CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM); 2267 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0); 2268 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0); 2269 CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1); 2270 CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3); 2271 CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0); 2272 CU_ASSERT(ctrlr->vcprop.cc.raw == 0); 2273 CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0); 2274 CU_ASSERT(ctrlr->vcprop.csts.raw == 0); 2275 CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0); 2276 CU_ASSERT(ctrlr->dif_insert_or_strip == true); 2277 2278 ctrlr->in_destruct = true; 2279 nvmf_ctrlr_destruct(ctrlr); 2280 poll_threads(); 2281 CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs)); 2282 CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding)); 2283 } 2284 2285 static void 2286 test_nvmf_ctrlr_use_zcopy(void) 2287 { 2288 struct spdk_nvmf_subsystem subsystem = {}; 2289 struct spdk_nvmf_transport transport = {}; 2290 struct spdk_nvmf_request req = {}; 2291 struct spdk_nvmf_qpair qpair = {}; 2292 struct spdk_nvmf_ctrlr ctrlr = {}; 2293 union nvmf_h2c_msg cmd = {}; 2294 struct spdk_nvmf_ns ns = {}; 2295 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2296 struct spdk_bdev bdev = {}; 2297 struct spdk_nvmf_poll_group group = {}; 2298 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2299 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2300 struct spdk_io_channel io_ch = {}; 2301 int opc; 2302 2303 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2304 ns.bdev = &bdev; 2305 2306 subsystem.id = 0; 2307 subsystem.max_nsid = 1; 2308 subsys_ns[0] = &ns; 2309 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2310 2311 ctrlr.subsys = &subsystem; 2312 2313 transport.opts.zcopy = true; 2314 2315 qpair.ctrlr = &ctrlr; 2316 qpair.group = &group; 2317 qpair.qid = 1; 2318 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2319 qpair.transport = &transport; 2320 2321 group.thread = spdk_get_thread(); 2322 group.num_sgroups = 1; 2323 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2324 sgroups.num_ns = 1; 2325 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2326 ns_info.channel = &io_ch; 2327 sgroups.ns_info = &ns_info; 2328 TAILQ_INIT(&sgroups.queued); 2329 group.sgroups = &sgroups; 2330 TAILQ_INIT(&qpair.outstanding); 2331 2332 req.qpair = &qpair; 2333 req.cmd = &cmd; 2334 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2335 2336 /* Admin queue */ 2337 qpair.qid = 0; 2338 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2339 qpair.qid = 1; 2340 2341 /* Invalid Opcodes */ 2342 for (opc = 0; opc <= 255; opc++) { 2343 cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc; 2344 if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) && 2345 (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) { 2346 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2347 } 2348 } 2349 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 2350 2351 /* Fused WRITE */ 2352 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2353 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2354 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE; 2355 2356 /* Non bdev */ 2357 cmd.nvme_cmd.nsid = 4; 2358 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2359 cmd.nvme_cmd.nsid = 1; 2360 2361 /* ZCOPY Not supported */ 2362 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2363 ns.zcopy = true; 2364 2365 /* ZCOPY disabled on transport level */ 2366 transport.opts.zcopy = false; 2367 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2368 transport.opts.zcopy = true; 2369 2370 /* Success */ 2371 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2372 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2373 } 2374 2375 static void 2376 qpair_state_change_done(void *cb_arg, int status) 2377 { 2378 } 2379 2380 static void 2381 test_spdk_nvmf_request_zcopy_start(void) 2382 { 2383 struct spdk_nvmf_request req = {}; 2384 struct spdk_nvmf_qpair qpair = {}; 2385 struct spdk_nvmf_transport transport = {}; 2386 struct spdk_nvme_cmd cmd = {}; 2387 union nvmf_c2h_msg rsp = {}; 2388 struct spdk_nvmf_ctrlr ctrlr = {}; 2389 struct spdk_nvmf_subsystem subsystem = {}; 2390 struct spdk_nvmf_ns ns = {}; 2391 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2392 enum spdk_nvme_ana_state ana_state[1]; 2393 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2394 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2395 2396 struct spdk_nvmf_poll_group group = {}; 2397 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2398 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2399 struct spdk_io_channel io_ch = {}; 2400 2401 ns.bdev = &bdev; 2402 ns.zcopy = true; 2403 ns.anagrpid = 1; 2404 2405 subsystem.id = 0; 2406 subsystem.max_nsid = 1; 2407 subsys_ns[0] = &ns; 2408 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2409 2410 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2411 2412 /* Enable controller */ 2413 ctrlr.vcprop.cc.bits.en = 1; 2414 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2415 ctrlr.listener = &listener; 2416 2417 transport.opts.zcopy = true; 2418 2419 group.thread = spdk_get_thread(); 2420 group.num_sgroups = 1; 2421 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2422 sgroups.num_ns = 1; 2423 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2424 ns_info.channel = &io_ch; 2425 sgroups.ns_info = &ns_info; 2426 TAILQ_INIT(&sgroups.queued); 2427 group.sgroups = &sgroups; 2428 TAILQ_INIT(&qpair.outstanding); 2429 2430 qpair.ctrlr = &ctrlr; 2431 qpair.group = &group; 2432 qpair.transport = &transport; 2433 qpair.qid = 1; 2434 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2435 2436 cmd.nsid = 1; 2437 2438 req.qpair = &qpair; 2439 req.cmd = (union nvmf_h2c_msg *)&cmd; 2440 req.rsp = &rsp; 2441 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2442 cmd.opc = SPDK_NVME_OPC_READ; 2443 2444 /* Fail because no controller */ 2445 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2446 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2447 qpair.ctrlr = NULL; 2448 spdk_nvmf_request_zcopy_start(&req); 2449 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2450 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2451 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 2452 qpair.ctrlr = &ctrlr; 2453 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2454 2455 /* Fail because bad NSID */ 2456 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2457 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2458 cmd.nsid = 0; 2459 spdk_nvmf_request_zcopy_start(&req); 2460 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2461 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2462 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2463 cmd.nsid = 1; 2464 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2465 2466 /* Fail because bad Channel */ 2467 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2468 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2469 ns_info.channel = NULL; 2470 spdk_nvmf_request_zcopy_start(&req); 2471 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2472 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2473 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2474 ns_info.channel = &io_ch; 2475 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2476 2477 /* Queue the requet because NSID is not active */ 2478 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2479 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2480 ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING; 2481 spdk_nvmf_request_zcopy_start(&req); 2482 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT); 2483 CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req); 2484 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2485 TAILQ_REMOVE(&sgroups.queued, &req, link); 2486 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2487 2488 /* Fail because QPair is not active */ 2489 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2490 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2491 qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 2492 qpair.state_cb = qpair_state_change_done; 2493 spdk_nvmf_request_zcopy_start(&req); 2494 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED); 2495 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2496 qpair.state_cb = NULL; 2497 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2498 2499 /* Fail because nvmf_bdev_ctrlr_zcopy_start fails */ 2500 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2501 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2502 cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */ 2503 cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 2504 req.length = (cmd.cdw12 + 1) * bdev.blocklen; 2505 spdk_nvmf_request_zcopy_start(&req); 2506 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2507 cmd.cdw10 = 0; 2508 cmd.cdw12 = 0; 2509 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2510 2511 /* Success */ 2512 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2513 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2514 spdk_nvmf_request_zcopy_start(&req); 2515 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2516 } 2517 2518 static void 2519 test_zcopy_read(void) 2520 { 2521 struct spdk_nvmf_request req = {}; 2522 struct spdk_nvmf_qpair qpair = {}; 2523 struct spdk_nvmf_transport transport = {}; 2524 struct spdk_nvme_cmd cmd = {}; 2525 union nvmf_c2h_msg rsp = {}; 2526 struct spdk_nvmf_ctrlr ctrlr = {}; 2527 struct spdk_nvmf_subsystem subsystem = {}; 2528 struct spdk_nvmf_ns ns = {}; 2529 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2530 enum spdk_nvme_ana_state ana_state[1]; 2531 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2532 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2533 2534 struct spdk_nvmf_poll_group group = {}; 2535 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2536 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2537 struct spdk_io_channel io_ch = {}; 2538 2539 ns.bdev = &bdev; 2540 ns.zcopy = true; 2541 ns.anagrpid = 1; 2542 2543 subsystem.id = 0; 2544 subsystem.max_nsid = 1; 2545 subsys_ns[0] = &ns; 2546 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2547 2548 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2549 2550 /* Enable controller */ 2551 ctrlr.vcprop.cc.bits.en = 1; 2552 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2553 ctrlr.listener = &listener; 2554 2555 transport.opts.zcopy = true; 2556 2557 group.thread = spdk_get_thread(); 2558 group.num_sgroups = 1; 2559 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2560 sgroups.num_ns = 1; 2561 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2562 ns_info.channel = &io_ch; 2563 sgroups.ns_info = &ns_info; 2564 TAILQ_INIT(&sgroups.queued); 2565 group.sgroups = &sgroups; 2566 TAILQ_INIT(&qpair.outstanding); 2567 2568 qpair.ctrlr = &ctrlr; 2569 qpair.group = &group; 2570 qpair.transport = &transport; 2571 qpair.qid = 1; 2572 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2573 2574 cmd.nsid = 1; 2575 2576 req.qpair = &qpair; 2577 req.cmd = (union nvmf_h2c_msg *)&cmd; 2578 req.rsp = &rsp; 2579 cmd.opc = SPDK_NVME_OPC_READ; 2580 2581 /* Prepare for zcopy */ 2582 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2583 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2584 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2585 CU_ASSERT(ns_info.io_outstanding == 0); 2586 2587 /* Perform the zcopy start */ 2588 spdk_nvmf_request_zcopy_start(&req); 2589 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2590 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2591 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2592 CU_ASSERT(ns_info.io_outstanding == 1); 2593 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2594 2595 /* Perform the zcopy end */ 2596 spdk_nvmf_request_zcopy_end(&req, false); 2597 CU_ASSERT(req.zcopy_bdev_io == NULL); 2598 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2599 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2600 CU_ASSERT(ns_info.io_outstanding == 0); 2601 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2602 } 2603 2604 static void 2605 test_zcopy_write(void) 2606 { 2607 struct spdk_nvmf_request req = {}; 2608 struct spdk_nvmf_qpair qpair = {}; 2609 struct spdk_nvmf_transport transport = {}; 2610 struct spdk_nvme_cmd cmd = {}; 2611 union nvmf_c2h_msg rsp = {}; 2612 struct spdk_nvmf_ctrlr ctrlr = {}; 2613 struct spdk_nvmf_subsystem subsystem = {}; 2614 struct spdk_nvmf_ns ns = {}; 2615 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2616 enum spdk_nvme_ana_state ana_state[1]; 2617 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2618 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2619 2620 struct spdk_nvmf_poll_group group = {}; 2621 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2622 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2623 struct spdk_io_channel io_ch = {}; 2624 2625 ns.bdev = &bdev; 2626 ns.zcopy = true; 2627 ns.anagrpid = 1; 2628 2629 subsystem.id = 0; 2630 subsystem.max_nsid = 1; 2631 subsys_ns[0] = &ns; 2632 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2633 2634 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2635 2636 /* Enable controller */ 2637 ctrlr.vcprop.cc.bits.en = 1; 2638 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2639 ctrlr.listener = &listener; 2640 2641 transport.opts.zcopy = true; 2642 2643 group.thread = spdk_get_thread(); 2644 group.num_sgroups = 1; 2645 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2646 sgroups.num_ns = 1; 2647 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2648 ns_info.channel = &io_ch; 2649 sgroups.ns_info = &ns_info; 2650 TAILQ_INIT(&sgroups.queued); 2651 group.sgroups = &sgroups; 2652 TAILQ_INIT(&qpair.outstanding); 2653 2654 qpair.ctrlr = &ctrlr; 2655 qpair.group = &group; 2656 qpair.transport = &transport; 2657 qpair.qid = 1; 2658 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2659 2660 cmd.nsid = 1; 2661 2662 req.qpair = &qpair; 2663 req.cmd = (union nvmf_h2c_msg *)&cmd; 2664 req.rsp = &rsp; 2665 cmd.opc = SPDK_NVME_OPC_WRITE; 2666 2667 /* Prepare for zcopy */ 2668 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2669 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2670 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2671 CU_ASSERT(ns_info.io_outstanding == 0); 2672 2673 /* Perform the zcopy start */ 2674 spdk_nvmf_request_zcopy_start(&req); 2675 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2676 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 2677 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2678 CU_ASSERT(ns_info.io_outstanding == 1); 2679 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2680 2681 /* Perform the zcopy end */ 2682 spdk_nvmf_request_zcopy_end(&req, true); 2683 CU_ASSERT(req.zcopy_bdev_io == NULL); 2684 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2685 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2686 CU_ASSERT(ns_info.io_outstanding == 0); 2687 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2688 } 2689 2690 static void 2691 test_nvmf_property_set(void) 2692 { 2693 int rc; 2694 struct spdk_nvmf_request req = {}; 2695 struct spdk_nvmf_qpair qpair = {}; 2696 struct spdk_nvmf_ctrlr ctrlr = {}; 2697 union nvmf_h2c_msg cmd = {}; 2698 union nvmf_c2h_msg rsp = {}; 2699 2700 req.qpair = &qpair; 2701 qpair.ctrlr = &ctrlr; 2702 req.cmd = &cmd; 2703 req.rsp = &rsp; 2704 2705 /* Invalid parameters */ 2706 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2707 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs); 2708 2709 rc = nvmf_property_set(&req); 2710 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2711 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2712 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2713 2714 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms); 2715 2716 rc = nvmf_property_get(&req); 2717 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2718 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2719 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2720 2721 /* Set cc with same property size */ 2722 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2723 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc); 2724 2725 rc = nvmf_property_set(&req); 2726 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2727 2728 /* Emulate cc data */ 2729 ctrlr.vcprop.cc.raw = 0xDEADBEEF; 2730 2731 rc = nvmf_property_get(&req); 2732 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2733 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF); 2734 2735 /* Set asq with different property size */ 2736 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2737 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2738 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq); 2739 2740 rc = nvmf_property_set(&req); 2741 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2742 2743 /* Emulate asq data */ 2744 ctrlr.vcprop.asq = 0xAADDADBEEF; 2745 2746 rc = nvmf_property_get(&req); 2747 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2748 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF); 2749 } 2750 2751 static void 2752 test_nvmf_ctrlr_get_features_host_behavior_support(void) 2753 { 2754 int rc; 2755 struct spdk_nvmf_request req = {}; 2756 struct spdk_nvmf_qpair qpair = {}; 2757 struct spdk_nvmf_ctrlr ctrlr = {}; 2758 struct spdk_nvme_host_behavior *host_behavior; 2759 struct spdk_nvme_host_behavior behavior = {}; 2760 union nvmf_h2c_msg cmd = {}; 2761 union nvmf_c2h_msg rsp = {}; 2762 2763 qpair.ctrlr = &ctrlr; 2764 req.qpair = &qpair; 2765 req.cmd = &cmd; 2766 req.rsp = &rsp; 2767 2768 /* Invalid data */ 2769 req.data = NULL; 2770 req.length = sizeof(struct spdk_nvme_host_behavior); 2771 2772 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 2773 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2774 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2775 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2776 CU_ASSERT(req.data == NULL); 2777 2778 /* Wrong structure length */ 2779 req.data = &behavior; 2780 req.length = sizeof(struct spdk_nvme_host_behavior) - 1; 2781 2782 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 2783 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2784 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2785 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2786 2787 /* Get Features Host Behavior Support Success */ 2788 req.data = &behavior; 2789 req.length = sizeof(struct spdk_nvme_host_behavior); 2790 ctrlr.acre_enabled = true; 2791 host_behavior = (struct spdk_nvme_host_behavior *)req.data; 2792 host_behavior->acre = false; 2793 2794 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 2795 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2796 CU_ASSERT(host_behavior->acre == true); 2797 } 2798 2799 static void 2800 test_nvmf_ctrlr_set_features_host_behavior_support(void) 2801 { 2802 int rc; 2803 struct spdk_nvmf_request req = {}; 2804 struct spdk_nvmf_qpair qpair = {}; 2805 struct spdk_nvmf_ctrlr ctrlr = {}; 2806 struct spdk_nvme_host_behavior host_behavior = {}; 2807 union nvmf_h2c_msg cmd = {}; 2808 union nvmf_c2h_msg rsp = {}; 2809 2810 qpair.ctrlr = &ctrlr; 2811 req.qpair = &qpair; 2812 req.cmd = &cmd; 2813 req.rsp = &rsp; 2814 req.iov[0].iov_base = &host_behavior; 2815 req.iov[0].iov_len = sizeof(host_behavior); 2816 2817 /* Invalid iovcnt */ 2818 req.iovcnt = 0; 2819 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2820 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2821 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 2822 2823 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 2824 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2825 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2826 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2827 2828 /* Invalid iov_len */ 2829 req.iovcnt = 1; 2830 req.iov[0].iov_len = 0; 2831 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2832 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2833 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 2834 2835 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 2836 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2837 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2838 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2839 2840 /* acre is false */ 2841 host_behavior.acre = 0; 2842 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 2843 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2844 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2845 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 2846 2847 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 2848 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2849 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2850 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2851 CU_ASSERT(ctrlr.acre_enabled == false); 2852 2853 /* acre is true */ 2854 host_behavior.acre = 1; 2855 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 2856 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2857 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2858 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 2859 2860 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 2861 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2862 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2863 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2864 CU_ASSERT(ctrlr.acre_enabled == true); 2865 2866 /* Invalid acre */ 2867 host_behavior.acre = 2; 2868 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2869 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2870 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 2871 2872 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 2873 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2874 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2875 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2876 } 2877 2878 int 2879 main(int argc, char **argv) 2880 { 2881 CU_pSuite suite = NULL; 2882 unsigned int num_failures; 2883 2884 CU_set_error_action(CUEA_ABORT); 2885 CU_initialize_registry(); 2886 2887 suite = CU_add_suite("nvmf", NULL, NULL); 2888 CU_ADD_TEST(suite, test_get_log_page); 2889 CU_ADD_TEST(suite, test_process_fabrics_cmd); 2890 CU_ADD_TEST(suite, test_connect); 2891 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 2892 CU_ADD_TEST(suite, test_identify_ns); 2893 CU_ADD_TEST(suite, test_reservation_write_exclusive); 2894 CU_ADD_TEST(suite, test_reservation_exclusive_access); 2895 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 2896 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 2897 CU_ADD_TEST(suite, test_reservation_notification_log_page); 2898 CU_ADD_TEST(suite, test_get_dif_ctx); 2899 CU_ADD_TEST(suite, test_set_get_features); 2900 CU_ADD_TEST(suite, test_identify_ctrlr); 2901 CU_ADD_TEST(suite, test_custom_admin_cmd); 2902 CU_ADD_TEST(suite, test_fused_compare_and_write); 2903 CU_ADD_TEST(suite, test_multi_async_event_reqs); 2904 CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp); 2905 CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp); 2906 CU_ADD_TEST(suite, test_multi_async_events); 2907 CU_ADD_TEST(suite, test_rae); 2908 CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct); 2909 CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy); 2910 CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start); 2911 CU_ADD_TEST(suite, test_zcopy_read); 2912 CU_ADD_TEST(suite, test_zcopy_write); 2913 CU_ADD_TEST(suite, test_nvmf_property_set); 2914 CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support); 2915 CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support); 2916 2917 allocate_threads(1); 2918 set_thread(0); 2919 2920 CU_basic_set_mode(CU_BRM_VERBOSE); 2921 CU_basic_run_tests(); 2922 num_failures = CU_get_number_of_failures(); 2923 CU_cleanup_registry(); 2924 2925 free_threads(); 2926 2927 return num_failures; 2928 } 2929