1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/bdev_zone.h" 8 #include "spdk/nvme_spec.h" 9 #include "spdk/stdinc.h" 10 11 #include "spdk_internal/cunit.h" 12 #include "spdk_internal/mock.h" 13 #include "thread/thread_internal.h" 14 15 #include "common/lib/ut_multithread.c" 16 #include "nvmf/ctrlr.c" 17 18 SPDK_LOG_REGISTER_COMPONENT(nvmf) 19 20 struct spdk_bdev { 21 int ut_mock; 22 uint64_t blockcnt; 23 uint32_t blocklen; 24 bool zoned; 25 uint32_t zone_size; 26 uint32_t max_open_zones; 27 uint32_t max_active_zones; 28 }; 29 30 #define MAX_OPEN_ZONES 12 31 #define MAX_ACTIVE_ZONES 34 32 #define ZONE_SIZE 56 33 34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 36 37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL; 38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *) 39 0x8877665544332211UL; 40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL; 41 42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 43 struct spdk_nvmf_subsystem *, 44 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 45 NULL); 46 47 DEFINE_STUB(spdk_nvmf_poll_group_create, 48 struct spdk_nvmf_poll_group *, 49 (struct spdk_nvmf_tgt *tgt), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 53 const char *, 54 (const struct spdk_nvmf_subsystem *subsystem), 55 subsystem_default_sn); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 58 const char *, 59 (const struct spdk_nvmf_subsystem *subsystem), 60 subsystem_default_mn); 61 62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 63 bool, 64 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 65 true); 66 67 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 68 int, 69 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 70 0); 71 72 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 73 struct spdk_nvmf_ctrlr *, 74 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 75 NULL); 76 77 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 78 bool, 79 (struct spdk_nvmf_ctrlr *ctrlr), 80 false); 81 82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 83 bool, 84 (struct spdk_nvmf_ctrlr *ctrlr), 85 false); 86 87 DEFINE_STUB(nvmf_ctrlr_copy_supported, 88 bool, 89 (struct spdk_nvmf_ctrlr *ctrlr), 90 false); 91 92 DEFINE_STUB_V(nvmf_get_discovery_log_page, 93 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 94 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 95 96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 97 int, 98 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 99 0); 100 101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 102 bool, 103 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 104 true); 105 106 DEFINE_STUB(nvmf_subsystem_find_listener, 107 struct spdk_nvmf_subsystem_listener *, 108 (struct spdk_nvmf_subsystem *subsystem, 109 const struct spdk_nvme_transport_id *trid), 110 (void *)0x1); 111 112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 113 int, 114 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 115 struct spdk_nvmf_request *req), 116 0); 117 118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 119 int, 120 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 121 struct spdk_nvmf_request *req), 122 0); 123 124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 125 int, 126 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 127 struct spdk_nvmf_request *req), 128 0); 129 130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 131 int, 132 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 134 0); 135 136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 137 int, 138 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 139 struct spdk_nvmf_request *req), 140 0); 141 142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 143 int, 144 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 145 struct spdk_nvmf_request *req), 146 0); 147 148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 149 int, 150 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 151 struct spdk_nvmf_request *req), 152 0); 153 154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 155 int, 156 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 157 struct spdk_nvmf_request *req), 158 0); 159 160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 161 int, 162 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 163 struct spdk_nvmf_request *req), 164 0); 165 166 DEFINE_STUB(nvmf_transport_req_complete, 167 int, 168 (struct spdk_nvmf_request *req), 169 0); 170 171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 172 173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 174 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 175 struct spdk_dif_ctx *dif_ctx), 176 true); 177 178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 179 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 180 181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 183 184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 185 struct spdk_nvmf_ctrlr *ctrlr)); 186 187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 188 int, 189 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 190 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 191 0); 192 193 DEFINE_STUB(nvmf_transport_req_free, 194 int, 195 (struct spdk_nvmf_request *req), 196 0); 197 198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 199 int, 200 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 201 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 202 0); 203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 204 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 206 207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev), 208 MAX_ACTIVE_ZONES); 209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES); 210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE); 211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 212 213 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 214 (const struct spdk_nvme_ns_data *nsdata), 0); 215 216 int 217 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 218 { 219 return 0; 220 } 221 222 void 223 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 224 bool dif_insert_or_strip) 225 { 226 uint64_t num_blocks; 227 228 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 229 num_blocks = ns->bdev->blockcnt; 230 nsdata->nsze = num_blocks; 231 nsdata->ncap = num_blocks; 232 nsdata->nuse = num_blocks; 233 nsdata->nlbaf = 0; 234 nsdata->flbas.format = 0; 235 nsdata->flbas.msb_format = 0; 236 nsdata->lbaf[0].lbads = spdk_u32log2(512); 237 } 238 239 struct spdk_nvmf_ns * 240 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 241 { 242 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 243 return subsystem->ns[0]; 244 } 245 246 struct spdk_nvmf_ns * 247 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 248 struct spdk_nvmf_ns *prev_ns) 249 { 250 uint32_t nsid; 251 252 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 253 nsid = prev_ns->nsid; 254 255 if (nsid >= subsystem->max_nsid) { 256 return NULL; 257 } 258 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 259 if (subsystem->ns[nsid - 1]) { 260 return subsystem->ns[nsid - 1]; 261 } 262 } 263 return NULL; 264 } 265 266 bool 267 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 268 { 269 return true; 270 } 271 272 int 273 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 274 struct spdk_bdev_desc *desc, 275 struct spdk_io_channel *ch, 276 struct spdk_nvmf_request *req) 277 { 278 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 279 uint64_t start_lba; 280 uint64_t num_blocks; 281 282 start_lba = from_le64(&req->cmd->nvme_cmd.cdw10); 283 num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1; 284 285 if ((start_lba + num_blocks) > bdev->blockcnt) { 286 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 287 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 288 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 289 } 290 291 if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) { 292 req->zcopy_bdev_io = zcopy_start_bdev_io_write; 293 } else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) { 294 req->zcopy_bdev_io = zcopy_start_bdev_io_read; 295 } else { 296 req->zcopy_bdev_io = zcopy_start_bdev_io_fail; 297 } 298 299 300 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 301 } 302 303 void 304 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 305 { 306 req->zcopy_bdev_io = NULL; 307 spdk_nvmf_request_complete(req); 308 } 309 310 static void 311 test_get_log_page(void) 312 { 313 struct spdk_nvmf_subsystem subsystem = {}; 314 struct spdk_nvmf_request req = {}; 315 struct spdk_nvmf_qpair qpair = {}; 316 struct spdk_nvmf_ctrlr ctrlr = {}; 317 union nvmf_h2c_msg cmd = {}; 318 union nvmf_c2h_msg rsp = {}; 319 char data[4096]; 320 321 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 322 323 ctrlr.subsys = &subsystem; 324 325 qpair.ctrlr = &ctrlr; 326 327 req.qpair = &qpair; 328 req.cmd = &cmd; 329 req.rsp = &rsp; 330 req.data = &data; 331 req.length = sizeof(data); 332 spdk_iov_one(req.iov, &req.iovcnt, &data, req.length); 333 334 /* Get Log Page - all valid */ 335 memset(&cmd, 0, sizeof(cmd)); 336 memset(&rsp, 0, sizeof(rsp)); 337 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 338 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 339 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 340 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 341 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 342 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 343 344 /* Get Log Page with invalid log ID */ 345 memset(&cmd, 0, sizeof(cmd)); 346 memset(&rsp, 0, sizeof(rsp)); 347 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 348 cmd.nvme_cmd.cdw10 = 0; 349 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 350 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 351 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 352 353 /* Get Log Page with invalid offset (not dword aligned) */ 354 memset(&cmd, 0, sizeof(cmd)); 355 memset(&rsp, 0, sizeof(rsp)); 356 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 357 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 358 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 359 cmd.nvme_cmd.cdw12 = 2; 360 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 361 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 362 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 363 364 /* Get Log Page without data buffer */ 365 memset(&cmd, 0, sizeof(cmd)); 366 memset(&rsp, 0, sizeof(rsp)); 367 req.data = NULL; 368 req.iovcnt = 0; 369 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 370 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 371 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 372 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 373 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 374 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 375 req.data = data; 376 } 377 378 static void 379 test_process_fabrics_cmd(void) 380 { 381 struct spdk_nvmf_request req = {}; 382 int ret; 383 struct spdk_nvmf_qpair req_qpair = {}; 384 union nvmf_h2c_msg req_cmd = {}; 385 union nvmf_c2h_msg req_rsp = {}; 386 387 req.qpair = &req_qpair; 388 req.cmd = &req_cmd; 389 req.rsp = &req_rsp; 390 req.qpair->ctrlr = NULL; 391 392 /* No ctrlr and invalid command check */ 393 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 394 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 395 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 396 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 397 } 398 399 static bool 400 nvme_status_success(const struct spdk_nvme_status *status) 401 { 402 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 403 } 404 405 static void 406 test_connect(void) 407 { 408 struct spdk_nvmf_fabric_connect_data connect_data; 409 struct spdk_nvmf_poll_group group; 410 struct spdk_nvmf_subsystem_poll_group *sgroups; 411 struct spdk_nvmf_transport transport; 412 struct spdk_nvmf_transport_ops tops = {}; 413 struct spdk_nvmf_subsystem subsystem; 414 struct spdk_nvmf_request req; 415 struct spdk_nvmf_qpair admin_qpair; 416 struct spdk_nvmf_qpair qpair; 417 struct spdk_nvmf_ctrlr ctrlr; 418 struct spdk_nvmf_tgt tgt; 419 union nvmf_h2c_msg cmd; 420 union nvmf_c2h_msg rsp; 421 const uint8_t hostid[16] = { 422 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 423 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 424 }; 425 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 426 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 427 int rc; 428 429 memset(&group, 0, sizeof(group)); 430 group.thread = spdk_get_thread(); 431 432 memset(&ctrlr, 0, sizeof(ctrlr)); 433 ctrlr.subsys = &subsystem; 434 ctrlr.qpair_mask = spdk_bit_array_create(3); 435 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 436 ctrlr.vcprop.cc.bits.en = 1; 437 ctrlr.vcprop.cc.bits.iosqes = 6; 438 ctrlr.vcprop.cc.bits.iocqes = 4; 439 440 memset(&admin_qpair, 0, sizeof(admin_qpair)); 441 admin_qpair.group = &group; 442 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 443 444 memset(&tgt, 0, sizeof(tgt)); 445 memset(&transport, 0, sizeof(transport)); 446 transport.ops = &tops; 447 transport.opts.max_aq_depth = 32; 448 transport.opts.max_queue_depth = 64; 449 transport.opts.max_qpairs_per_ctrlr = 3; 450 transport.tgt = &tgt; 451 452 memset(&qpair, 0, sizeof(qpair)); 453 qpair.transport = &transport; 454 qpair.group = &group; 455 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 456 TAILQ_INIT(&qpair.outstanding); 457 458 memset(&connect_data, 0, sizeof(connect_data)); 459 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 460 connect_data.cntlid = 0xFFFF; 461 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 462 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 463 464 memset(&subsystem, 0, sizeof(subsystem)); 465 subsystem.thread = spdk_get_thread(); 466 subsystem.id = 1; 467 TAILQ_INIT(&subsystem.ctrlrs); 468 subsystem.tgt = &tgt; 469 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 470 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 471 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 472 473 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 474 group.sgroups = sgroups; 475 476 memset(&cmd, 0, sizeof(cmd)); 477 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 478 cmd.connect_cmd.cid = 1; 479 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 480 cmd.connect_cmd.recfmt = 0; 481 cmd.connect_cmd.qid = 0; 482 cmd.connect_cmd.sqsize = 31; 483 cmd.connect_cmd.cattr = 0; 484 cmd.connect_cmd.kato = 120000; 485 486 memset(&req, 0, sizeof(req)); 487 req.qpair = &qpair; 488 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 489 req.data = &connect_data; 490 req.length = sizeof(connect_data); 491 spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length); 492 req.cmd = &cmd; 493 req.rsp = &rsp; 494 495 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 496 MOCK_SET(spdk_nvmf_poll_group_create, &group); 497 498 /* Valid admin connect command */ 499 memset(&rsp, 0, sizeof(rsp)); 500 sgroups[subsystem.id].mgmt_io_outstanding++; 501 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 502 rc = nvmf_ctrlr_cmd_connect(&req); 503 poll_threads(); 504 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 505 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 506 CU_ASSERT(qpair.ctrlr != NULL); 507 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 508 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 509 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 510 free(qpair.ctrlr); 511 qpair.ctrlr = NULL; 512 513 /* Valid admin connect command with kato = 0 */ 514 cmd.connect_cmd.kato = 0; 515 memset(&rsp, 0, sizeof(rsp)); 516 sgroups[subsystem.id].mgmt_io_outstanding++; 517 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 518 rc = nvmf_ctrlr_cmd_connect(&req); 519 poll_threads(); 520 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 521 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 522 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 523 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 524 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 525 free(qpair.ctrlr); 526 qpair.ctrlr = NULL; 527 cmd.connect_cmd.kato = 120000; 528 529 /* Invalid data length */ 530 memset(&rsp, 0, sizeof(rsp)); 531 req.length = sizeof(connect_data) - 1; 532 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 533 rc = nvmf_ctrlr_cmd_connect(&req); 534 poll_threads(); 535 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 536 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 537 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 538 CU_ASSERT(qpair.ctrlr == NULL); 539 req.length = sizeof(connect_data); 540 541 /* Invalid recfmt */ 542 memset(&rsp, 0, sizeof(rsp)); 543 cmd.connect_cmd.recfmt = 1234; 544 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 545 rc = nvmf_ctrlr_cmd_connect(&req); 546 poll_threads(); 547 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 548 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 549 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 550 CU_ASSERT(qpair.ctrlr == NULL); 551 cmd.connect_cmd.recfmt = 0; 552 553 /* Subsystem not found */ 554 memset(&rsp, 0, sizeof(rsp)); 555 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 556 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 557 rc = nvmf_ctrlr_cmd_connect(&req); 558 poll_threads(); 559 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 560 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 561 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 562 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 563 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 564 CU_ASSERT(qpair.ctrlr == NULL); 565 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 566 567 /* Unterminated hostnqn */ 568 memset(&rsp, 0, sizeof(rsp)); 569 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 570 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 571 rc = nvmf_ctrlr_cmd_connect(&req); 572 poll_threads(); 573 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 574 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 575 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 576 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 577 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 578 CU_ASSERT(qpair.ctrlr == NULL); 579 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 580 581 /* Host not allowed */ 582 memset(&rsp, 0, sizeof(rsp)); 583 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 584 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 585 rc = nvmf_ctrlr_cmd_connect(&req); 586 poll_threads(); 587 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 588 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 589 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 590 CU_ASSERT(qpair.ctrlr == NULL); 591 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 592 593 /* Invalid sqsize == 0 */ 594 memset(&rsp, 0, sizeof(rsp)); 595 cmd.connect_cmd.sqsize = 0; 596 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 597 rc = nvmf_ctrlr_cmd_connect(&req); 598 poll_threads(); 599 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 600 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 601 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 602 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 603 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 604 CU_ASSERT(qpair.ctrlr == NULL); 605 cmd.connect_cmd.sqsize = 31; 606 607 /* Invalid admin sqsize > max_aq_depth */ 608 memset(&rsp, 0, sizeof(rsp)); 609 cmd.connect_cmd.sqsize = 32; 610 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 611 rc = nvmf_ctrlr_cmd_connect(&req); 612 poll_threads(); 613 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 614 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 615 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 616 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 617 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 618 CU_ASSERT(qpair.ctrlr == NULL); 619 cmd.connect_cmd.sqsize = 31; 620 621 /* Invalid I/O sqsize > max_queue_depth */ 622 memset(&rsp, 0, sizeof(rsp)); 623 cmd.connect_cmd.qid = 1; 624 cmd.connect_cmd.sqsize = 64; 625 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 626 rc = nvmf_ctrlr_cmd_connect(&req); 627 poll_threads(); 628 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 629 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 630 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 631 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 632 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 633 CU_ASSERT(qpair.ctrlr == NULL); 634 cmd.connect_cmd.qid = 0; 635 cmd.connect_cmd.sqsize = 31; 636 637 /* Invalid cntlid for admin queue */ 638 memset(&rsp, 0, sizeof(rsp)); 639 connect_data.cntlid = 0x1234; 640 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 641 rc = nvmf_ctrlr_cmd_connect(&req); 642 poll_threads(); 643 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 644 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 645 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 646 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 647 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 648 CU_ASSERT(qpair.ctrlr == NULL); 649 connect_data.cntlid = 0xFFFF; 650 651 ctrlr.admin_qpair = &admin_qpair; 652 ctrlr.subsys = &subsystem; 653 654 /* Valid I/O queue connect command */ 655 memset(&rsp, 0, sizeof(rsp)); 656 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 657 cmd.connect_cmd.qid = 1; 658 cmd.connect_cmd.sqsize = 63; 659 sgroups[subsystem.id].mgmt_io_outstanding++; 660 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 661 rc = nvmf_ctrlr_cmd_connect(&req); 662 poll_threads(); 663 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 664 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 665 CU_ASSERT(qpair.ctrlr == &ctrlr); 666 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 667 qpair.ctrlr = NULL; 668 cmd.connect_cmd.sqsize = 31; 669 670 /* Non-existent controller */ 671 memset(&rsp, 0, sizeof(rsp)); 672 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 673 sgroups[subsystem.id].mgmt_io_outstanding++; 674 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 675 rc = nvmf_ctrlr_cmd_connect(&req); 676 poll_threads(); 677 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 678 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 679 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 680 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 681 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 682 CU_ASSERT(qpair.ctrlr == NULL); 683 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 684 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 685 686 /* I/O connect to discovery controller */ 687 memset(&rsp, 0, sizeof(rsp)); 688 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 689 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 690 sgroups[subsystem.id].mgmt_io_outstanding++; 691 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 692 rc = nvmf_ctrlr_cmd_connect(&req); 693 poll_threads(); 694 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 695 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 696 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 697 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 698 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 699 CU_ASSERT(qpair.ctrlr == NULL); 700 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 701 702 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 703 cmd.connect_cmd.qid = 0; 704 cmd.connect_cmd.kato = 120000; 705 memset(&rsp, 0, sizeof(rsp)); 706 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 707 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 708 sgroups[subsystem.id].mgmt_io_outstanding++; 709 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 710 rc = nvmf_ctrlr_cmd_connect(&req); 711 poll_threads(); 712 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 713 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 714 CU_ASSERT(qpair.ctrlr != NULL); 715 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 716 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 717 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 718 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 719 free(qpair.ctrlr); 720 qpair.ctrlr = NULL; 721 722 /* I/O connect to discovery controller with keep-alive-timeout == 0. 723 * Then, a fixed timeout value is set to keep-alive-timeout. 724 */ 725 cmd.connect_cmd.kato = 0; 726 memset(&rsp, 0, sizeof(rsp)); 727 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 728 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 729 sgroups[subsystem.id].mgmt_io_outstanding++; 730 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 731 rc = nvmf_ctrlr_cmd_connect(&req); 732 poll_threads(); 733 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 734 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 735 CU_ASSERT(qpair.ctrlr != NULL); 736 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 737 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 738 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 739 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 740 free(qpair.ctrlr); 741 qpair.ctrlr = NULL; 742 cmd.connect_cmd.qid = 1; 743 cmd.connect_cmd.kato = 120000; 744 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 745 746 /* I/O connect to disabled controller */ 747 memset(&rsp, 0, sizeof(rsp)); 748 ctrlr.vcprop.cc.bits.en = 0; 749 sgroups[subsystem.id].mgmt_io_outstanding++; 750 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 751 rc = nvmf_ctrlr_cmd_connect(&req); 752 poll_threads(); 753 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 754 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 755 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 756 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 757 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 758 CU_ASSERT(qpair.ctrlr == NULL); 759 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 760 ctrlr.vcprop.cc.bits.en = 1; 761 762 /* I/O connect with invalid IOSQES */ 763 memset(&rsp, 0, sizeof(rsp)); 764 ctrlr.vcprop.cc.bits.iosqes = 3; 765 sgroups[subsystem.id].mgmt_io_outstanding++; 766 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 767 rc = nvmf_ctrlr_cmd_connect(&req); 768 poll_threads(); 769 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 770 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 771 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 772 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 773 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 774 CU_ASSERT(qpair.ctrlr == NULL); 775 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 776 ctrlr.vcprop.cc.bits.iosqes = 6; 777 778 /* I/O connect with invalid IOCQES */ 779 memset(&rsp, 0, sizeof(rsp)); 780 ctrlr.vcprop.cc.bits.iocqes = 3; 781 sgroups[subsystem.id].mgmt_io_outstanding++; 782 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 783 rc = nvmf_ctrlr_cmd_connect(&req); 784 poll_threads(); 785 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 786 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 787 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 788 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 789 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 790 CU_ASSERT(qpair.ctrlr == NULL); 791 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 792 ctrlr.vcprop.cc.bits.iocqes = 4; 793 794 /* I/O connect with qid that is too large */ 795 memset(&rsp, 0, sizeof(rsp)); 796 cmd.connect_cmd.qid = 3; 797 sgroups[subsystem.id].mgmt_io_outstanding++; 798 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 799 rc = nvmf_ctrlr_cmd_connect(&req); 800 poll_threads(); 801 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 802 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 803 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 804 CU_ASSERT(qpair.ctrlr == NULL); 805 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 806 807 /* I/O connect with duplicate queue ID */ 808 memset(&rsp, 0, sizeof(rsp)); 809 spdk_bit_array_set(ctrlr.qpair_mask, 1); 810 cmd.connect_cmd.qid = 1; 811 sgroups[subsystem.id].mgmt_io_outstanding++; 812 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 813 rc = nvmf_ctrlr_cmd_connect(&req); 814 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 815 poll_threads(); 816 /* First time, it will detect duplicate QID and schedule a retry. So for 817 * now we should expect the response to still be all zeroes. 818 */ 819 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 820 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 821 822 /* Now advance the clock, so that the retry poller executes. */ 823 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 824 poll_threads(); 825 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 826 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 827 CU_ASSERT(qpair.ctrlr == NULL); 828 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 829 830 /* I/O connect with temporarily duplicate queue ID. This covers race 831 * where qpair_mask bit may not yet be cleared, even though initiator 832 * has closed the connection. See issue #2955. */ 833 memset(&rsp, 0, sizeof(rsp)); 834 sgroups[subsystem.id].mgmt_io_outstanding++; 835 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 836 rc = nvmf_ctrlr_cmd_connect(&req); 837 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 838 poll_threads(); 839 /* First time, it will detect duplicate QID and schedule a retry. So for 840 * now we should expect the response to still be all zeroes. 841 */ 842 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 843 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 844 845 /* Now advance the clock, so that the retry poller executes. */ 846 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 847 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 848 poll_threads(); 849 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 850 CU_ASSERT(qpair.ctrlr == &ctrlr); 851 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 852 qpair.ctrlr = NULL; 853 854 /* I/O connect when admin qpair is being destroyed */ 855 admin_qpair.group = NULL; 856 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 857 memset(&rsp, 0, sizeof(rsp)); 858 sgroups[subsystem.id].mgmt_io_outstanding++; 859 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 860 rc = nvmf_ctrlr_cmd_connect(&req); 861 poll_threads(); 862 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 863 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 864 CU_ASSERT(qpair.ctrlr == NULL); 865 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 866 admin_qpair.group = &group; 867 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 868 869 /* Clean up globals */ 870 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 871 MOCK_CLEAR(spdk_nvmf_poll_group_create); 872 873 spdk_bit_array_free(&ctrlr.qpair_mask); 874 free(sgroups); 875 } 876 877 static void 878 test_get_ns_id_desc_list(void) 879 { 880 struct spdk_nvmf_subsystem subsystem; 881 struct spdk_nvmf_qpair qpair; 882 struct spdk_nvmf_ctrlr ctrlr; 883 struct spdk_nvmf_request req; 884 struct spdk_nvmf_ns *ns_ptrs[1]; 885 struct spdk_nvmf_ns ns; 886 union nvmf_h2c_msg cmd; 887 union nvmf_c2h_msg rsp; 888 struct spdk_bdev bdev; 889 uint8_t buf[4096]; 890 891 memset(&subsystem, 0, sizeof(subsystem)); 892 ns_ptrs[0] = &ns; 893 subsystem.ns = ns_ptrs; 894 subsystem.max_nsid = 1; 895 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 896 897 memset(&ns, 0, sizeof(ns)); 898 ns.opts.nsid = 1; 899 ns.bdev = &bdev; 900 901 memset(&qpair, 0, sizeof(qpair)); 902 qpair.ctrlr = &ctrlr; 903 904 memset(&ctrlr, 0, sizeof(ctrlr)); 905 ctrlr.subsys = &subsystem; 906 ctrlr.vcprop.cc.bits.en = 1; 907 ctrlr.thread = spdk_get_thread(); 908 909 memset(&req, 0, sizeof(req)); 910 req.qpair = &qpair; 911 req.cmd = &cmd; 912 req.rsp = &rsp; 913 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 914 req.data = buf; 915 req.length = sizeof(buf); 916 spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length); 917 918 memset(&cmd, 0, sizeof(cmd)); 919 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 920 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 921 922 /* Invalid NSID */ 923 cmd.nvme_cmd.nsid = 0; 924 memset(&rsp, 0, sizeof(rsp)); 925 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 926 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 927 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 928 929 /* Valid NSID, but ns has no IDs defined */ 930 cmd.nvme_cmd.nsid = 1; 931 memset(&rsp, 0, sizeof(rsp)); 932 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 933 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 934 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 935 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 936 937 /* Valid NSID, only EUI64 defined */ 938 ns.opts.eui64[0] = 0x11; 939 ns.opts.eui64[7] = 0xFF; 940 memset(&rsp, 0, sizeof(rsp)); 941 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 942 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 943 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 944 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 945 CU_ASSERT(buf[1] == 8); 946 CU_ASSERT(buf[4] == 0x11); 947 CU_ASSERT(buf[11] == 0xFF); 948 CU_ASSERT(buf[13] == 0); 949 950 /* Valid NSID, only NGUID defined */ 951 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 952 ns.opts.nguid[0] = 0x22; 953 ns.opts.nguid[15] = 0xEE; 954 memset(&rsp, 0, sizeof(rsp)); 955 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 956 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 957 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 958 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 959 CU_ASSERT(buf[1] == 16); 960 CU_ASSERT(buf[4] == 0x22); 961 CU_ASSERT(buf[19] == 0xEE); 962 CU_ASSERT(buf[21] == 0); 963 964 /* Valid NSID, both EUI64 and NGUID defined */ 965 ns.opts.eui64[0] = 0x11; 966 ns.opts.eui64[7] = 0xFF; 967 ns.opts.nguid[0] = 0x22; 968 ns.opts.nguid[15] = 0xEE; 969 memset(&rsp, 0, sizeof(rsp)); 970 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 971 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 972 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 973 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 974 CU_ASSERT(buf[1] == 8); 975 CU_ASSERT(buf[4] == 0x11); 976 CU_ASSERT(buf[11] == 0xFF); 977 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 978 CU_ASSERT(buf[13] == 16); 979 CU_ASSERT(buf[16] == 0x22); 980 CU_ASSERT(buf[31] == 0xEE); 981 CU_ASSERT(buf[33] == 0); 982 983 /* Valid NSID, EUI64, NGUID, and UUID defined */ 984 ns.opts.eui64[0] = 0x11; 985 ns.opts.eui64[7] = 0xFF; 986 ns.opts.nguid[0] = 0x22; 987 ns.opts.nguid[15] = 0xEE; 988 ns.opts.uuid.u.raw[0] = 0x33; 989 ns.opts.uuid.u.raw[15] = 0xDD; 990 memset(&rsp, 0, sizeof(rsp)); 991 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 992 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 993 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 994 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 995 CU_ASSERT(buf[1] == 8); 996 CU_ASSERT(buf[4] == 0x11); 997 CU_ASSERT(buf[11] == 0xFF); 998 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 999 CU_ASSERT(buf[13] == 16); 1000 CU_ASSERT(buf[16] == 0x22); 1001 CU_ASSERT(buf[31] == 0xEE); 1002 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 1003 CU_ASSERT(buf[33] == 16); 1004 CU_ASSERT(buf[36] == 0x33); 1005 CU_ASSERT(buf[51] == 0xDD); 1006 CU_ASSERT(buf[53] == 0); 1007 } 1008 1009 static void 1010 test_identify_ns(void) 1011 { 1012 struct spdk_nvmf_subsystem subsystem = {}; 1013 struct spdk_nvmf_transport transport = {}; 1014 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1015 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1016 struct spdk_nvme_cmd cmd = {}; 1017 struct spdk_nvme_cpl rsp = {}; 1018 struct spdk_nvme_ns_data nsdata = {}; 1019 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 1020 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 1021 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1022 1023 subsystem.ns = ns_arr; 1024 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1025 1026 /* Invalid NSID 0 */ 1027 cmd.nsid = 0; 1028 memset(&nsdata, 0, sizeof(nsdata)); 1029 memset(&rsp, 0, sizeof(rsp)); 1030 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1031 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1032 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1033 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1034 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1035 1036 /* Valid NSID 1 */ 1037 cmd.nsid = 1; 1038 memset(&nsdata, 0, sizeof(nsdata)); 1039 memset(&rsp, 0, sizeof(rsp)); 1040 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1041 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1042 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1043 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1044 CU_ASSERT(nsdata.nsze == 1234); 1045 1046 /* Valid but inactive NSID 2 */ 1047 cmd.nsid = 2; 1048 memset(&nsdata, 0, sizeof(nsdata)); 1049 memset(&rsp, 0, sizeof(rsp)); 1050 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1051 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1052 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1053 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1054 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1055 1056 /* Valid NSID 3 */ 1057 cmd.nsid = 3; 1058 memset(&nsdata, 0, sizeof(nsdata)); 1059 memset(&rsp, 0, sizeof(rsp)); 1060 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1061 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1062 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1063 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1064 CU_ASSERT(nsdata.nsze == 5678); 1065 1066 /* Invalid NSID 4 */ 1067 cmd.nsid = 4; 1068 memset(&nsdata, 0, sizeof(nsdata)); 1069 memset(&rsp, 0, sizeof(rsp)); 1070 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1071 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1072 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1073 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1074 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1075 1076 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 1077 cmd.nsid = 0xFFFFFFFF; 1078 memset(&nsdata, 0, sizeof(nsdata)); 1079 memset(&rsp, 0, sizeof(rsp)); 1080 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1081 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1082 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1083 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1084 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1085 } 1086 1087 static void 1088 test_identify_ns_iocs_specific(void) 1089 { 1090 struct spdk_nvmf_subsystem subsystem = {}; 1091 struct spdk_nvmf_transport transport = {}; 1092 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport }; 1093 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1094 struct spdk_nvme_cmd cmd = {}; 1095 struct spdk_nvme_cpl rsp = {}; 1096 struct spdk_nvme_zns_ns_data nsdata = {}; 1097 struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}}; 1098 struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}}; 1099 struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]}; 1100 1101 subsystem.ns = ns_arr; 1102 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1103 1104 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1105 1106 /* Invalid ZNS NSID 0 */ 1107 cmd.nsid = 0; 1108 memset(&nsdata, 0xFF, sizeof(nsdata)); 1109 memset(&rsp, 0, sizeof(rsp)); 1110 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1111 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1112 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1113 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1114 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1115 1116 /* Valid ZNS NSID 1 */ 1117 cmd.nsid = 1; 1118 memset(&nsdata, 0xFF, sizeof(nsdata)); 1119 memset(&rsp, 0, sizeof(rsp)); 1120 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1121 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1122 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1123 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1124 CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1); 1125 CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1); 1126 CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1); 1127 CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE); 1128 nsdata.ozcs.read_across_zone_boundaries = 0; 1129 nsdata.mar = 0; 1130 nsdata.mor = 0; 1131 nsdata.lbafe[0].zsze = 0; 1132 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1133 1134 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1135 1136 /* Valid NVM NSID 2 */ 1137 cmd.nsid = 2; 1138 memset(&nsdata, 0xFF, sizeof(nsdata)); 1139 memset(&rsp, 0, sizeof(rsp)); 1140 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1141 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1142 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1143 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1144 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1145 1146 /* Invalid NVM NSID 3 */ 1147 cmd.nsid = 0; 1148 memset(&nsdata, 0xFF, sizeof(nsdata)); 1149 memset(&rsp, 0, sizeof(rsp)); 1150 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1151 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1152 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1153 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1154 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1155 } 1156 1157 static void 1158 test_set_get_features(void) 1159 { 1160 struct spdk_nvmf_subsystem subsystem = {}; 1161 struct spdk_nvmf_qpair admin_qpair = {}; 1162 enum spdk_nvme_ana_state ana_state[3]; 1163 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1164 struct spdk_nvmf_ctrlr ctrlr = { 1165 .subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener 1166 }; 1167 union nvmf_h2c_msg cmd = {}; 1168 union nvmf_c2h_msg rsp = {}; 1169 struct spdk_nvmf_ns ns[3]; 1170 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1171 struct spdk_nvmf_request req; 1172 int rc; 1173 1174 ns[0].anagrpid = 1; 1175 ns[2].anagrpid = 3; 1176 subsystem.ns = ns_arr; 1177 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1178 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1179 listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1180 admin_qpair.ctrlr = &ctrlr; 1181 req.qpair = &admin_qpair; 1182 cmd.nvme_cmd.nsid = 1; 1183 req.cmd = &cmd; 1184 req.rsp = &rsp; 1185 1186 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1187 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1188 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1189 ns[0].ptpl_file = "testcfg"; 1190 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1191 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1192 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1193 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1194 CU_ASSERT(ns[0].ptpl_activated == true); 1195 1196 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1197 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1198 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1199 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1200 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1201 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1202 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1203 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1204 1205 1206 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1207 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1208 cmd.nvme_cmd.cdw11 = 0x42; 1209 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1210 1211 rc = nvmf_ctrlr_get_features(&req); 1212 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1213 1214 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1215 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1216 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1217 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1218 1219 rc = nvmf_ctrlr_get_features(&req); 1220 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1221 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1222 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1223 1224 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1225 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1226 cmd.nvme_cmd.cdw11 = 0x42; 1227 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1228 1229 rc = nvmf_ctrlr_set_features(&req); 1230 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1231 1232 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1233 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1234 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1235 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1236 1237 rc = nvmf_ctrlr_set_features(&req); 1238 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1239 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1240 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1241 1242 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1243 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1244 cmd.nvme_cmd.cdw11 = 0x42; 1245 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1246 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1247 1248 rc = nvmf_ctrlr_set_features(&req); 1249 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1250 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1251 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1252 1253 1254 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1255 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1256 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1257 1258 rc = nvmf_ctrlr_get_features(&req); 1259 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1260 1261 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1262 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1263 cmd.nvme_cmd.cdw11 = 0x42; 1264 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1265 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1266 1267 rc = nvmf_ctrlr_set_features(&req); 1268 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1269 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1270 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1271 1272 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1273 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1274 cmd.nvme_cmd.cdw11 = 0x42; 1275 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1276 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1277 1278 rc = nvmf_ctrlr_set_features(&req); 1279 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1280 } 1281 1282 /* 1283 * Reservation Unit Test Configuration 1284 * -------- -------- -------- 1285 * | Host A | | Host B | | Host C | 1286 * -------- -------- -------- 1287 * / \ | | 1288 * -------- -------- ------- ------- 1289 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1290 * -------- -------- ------- ------- 1291 * \ \ / / 1292 * \ \ / / 1293 * \ \ / / 1294 * -------------------------------------- 1295 * | NAMESPACE 1 | 1296 * -------------------------------------- 1297 */ 1298 1299 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1300 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1301 1302 static void 1303 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1304 { 1305 /* Host A has two controllers */ 1306 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1307 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1308 1309 /* Host B has 1 controller */ 1310 spdk_uuid_generate(&g_ctrlr_B.hostid); 1311 1312 /* Host C has 1 controller */ 1313 spdk_uuid_generate(&g_ctrlr_C.hostid); 1314 1315 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1316 g_ns_info.rtype = rtype; 1317 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1318 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1319 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1320 } 1321 1322 static void 1323 test_reservation_write_exclusive(void) 1324 { 1325 struct spdk_nvmf_request req = {}; 1326 union nvmf_h2c_msg cmd = {}; 1327 union nvmf_c2h_msg rsp = {}; 1328 int rc; 1329 1330 req.cmd = &cmd; 1331 req.rsp = &rsp; 1332 1333 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1334 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1335 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1336 1337 /* Test Case: Issue a Read command from Host A and Host B */ 1338 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1339 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1340 SPDK_CU_ASSERT_FATAL(rc == 0); 1341 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1342 SPDK_CU_ASSERT_FATAL(rc == 0); 1343 1344 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1345 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1346 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1347 SPDK_CU_ASSERT_FATAL(rc == 0); 1348 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1349 SPDK_CU_ASSERT_FATAL(rc < 0); 1350 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1351 1352 /* Test Case: Issue a Write command from Host C */ 1353 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1354 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1355 SPDK_CU_ASSERT_FATAL(rc < 0); 1356 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1357 1358 /* Test Case: Issue a Read command from Host B */ 1359 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1360 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1361 SPDK_CU_ASSERT_FATAL(rc == 0); 1362 1363 /* Unregister Host C */ 1364 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1365 1366 /* Test Case: Read and Write commands from non-registrant Host C */ 1367 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1368 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1369 SPDK_CU_ASSERT_FATAL(rc < 0); 1370 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1371 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1372 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1373 SPDK_CU_ASSERT_FATAL(rc == 0); 1374 } 1375 1376 static void 1377 test_reservation_exclusive_access(void) 1378 { 1379 struct spdk_nvmf_request req = {}; 1380 union nvmf_h2c_msg cmd = {}; 1381 union nvmf_c2h_msg rsp = {}; 1382 int rc; 1383 1384 req.cmd = &cmd; 1385 req.rsp = &rsp; 1386 1387 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1388 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1389 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1390 1391 /* Test Case: Issue a Read command from Host B */ 1392 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1393 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1394 SPDK_CU_ASSERT_FATAL(rc < 0); 1395 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1396 1397 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1398 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1399 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1400 SPDK_CU_ASSERT_FATAL(rc == 0); 1401 } 1402 1403 static void 1404 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1405 { 1406 struct spdk_nvmf_request req = {}; 1407 union nvmf_h2c_msg cmd = {}; 1408 union nvmf_c2h_msg rsp = {}; 1409 int rc; 1410 1411 req.cmd = &cmd; 1412 req.rsp = &rsp; 1413 1414 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1415 ut_reservation_init(rtype); 1416 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1417 1418 /* Test Case: Issue a Read command from Host A and Host C */ 1419 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1420 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1421 SPDK_CU_ASSERT_FATAL(rc == 0); 1422 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1423 SPDK_CU_ASSERT_FATAL(rc == 0); 1424 1425 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1426 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1427 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1428 SPDK_CU_ASSERT_FATAL(rc == 0); 1429 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1430 SPDK_CU_ASSERT_FATAL(rc == 0); 1431 1432 /* Unregister Host C */ 1433 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1434 1435 /* Test Case: Read and Write commands from non-registrant Host C */ 1436 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1437 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1438 SPDK_CU_ASSERT_FATAL(rc == 0); 1439 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1440 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1441 SPDK_CU_ASSERT_FATAL(rc < 0); 1442 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1443 } 1444 1445 static void 1446 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1447 { 1448 _test_reservation_write_exclusive_regs_only_and_all_regs( 1449 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1450 _test_reservation_write_exclusive_regs_only_and_all_regs( 1451 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1452 } 1453 1454 static void 1455 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1456 { 1457 struct spdk_nvmf_request req = {}; 1458 union nvmf_h2c_msg cmd = {}; 1459 union nvmf_c2h_msg rsp = {}; 1460 int rc; 1461 1462 req.cmd = &cmd; 1463 req.rsp = &rsp; 1464 1465 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1466 ut_reservation_init(rtype); 1467 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1468 1469 /* Test Case: Issue a Write command from Host B */ 1470 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1471 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1472 SPDK_CU_ASSERT_FATAL(rc == 0); 1473 1474 /* Unregister Host B */ 1475 spdk_uuid_set_null(&g_ns_info.reg_hostid[1]); 1476 1477 /* Test Case: Issue a Read command from Host B */ 1478 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1479 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1480 SPDK_CU_ASSERT_FATAL(rc < 0); 1481 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1482 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1483 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1484 SPDK_CU_ASSERT_FATAL(rc < 0); 1485 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1486 } 1487 1488 static void 1489 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1490 { 1491 _test_reservation_exclusive_access_regs_only_and_all_regs( 1492 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1493 _test_reservation_exclusive_access_regs_only_and_all_regs( 1494 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1495 } 1496 1497 static void 1498 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1499 { 1500 STAILQ_INIT(&ctrlr->async_events); 1501 } 1502 1503 static void 1504 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1505 { 1506 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1507 1508 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1509 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1510 free(event); 1511 } 1512 } 1513 1514 static int 1515 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1516 { 1517 int num = 0; 1518 struct spdk_nvmf_async_event_completion *event; 1519 1520 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1521 num++; 1522 } 1523 return num; 1524 } 1525 1526 static void 1527 test_reservation_notification_log_page(void) 1528 { 1529 struct spdk_nvmf_ctrlr ctrlr; 1530 struct spdk_nvmf_qpair qpair; 1531 struct spdk_nvmf_ns ns; 1532 struct spdk_nvmf_request req = {}; 1533 union nvmf_h2c_msg cmd = {}; 1534 union nvmf_c2h_msg rsp = {}; 1535 union spdk_nvme_async_event_completion event = {}; 1536 struct spdk_nvme_reservation_notification_log logs[3]; 1537 struct iovec iov; 1538 1539 memset(&ctrlr, 0, sizeof(ctrlr)); 1540 ctrlr.thread = spdk_get_thread(); 1541 TAILQ_INIT(&ctrlr.log_head); 1542 init_pending_async_events(&ctrlr); 1543 ns.nsid = 1; 1544 1545 /* Test Case: Mask all the reservation notifications */ 1546 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1547 SPDK_NVME_RESERVATION_RELEASED_MASK | 1548 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1549 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1550 SPDK_NVME_REGISTRATION_PREEMPTED); 1551 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1552 SPDK_NVME_RESERVATION_RELEASED); 1553 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1554 SPDK_NVME_RESERVATION_PREEMPTED); 1555 poll_threads(); 1556 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1557 1558 /* Test Case: Unmask all the reservation notifications, 1559 * 3 log pages are generated, and AER was triggered. 1560 */ 1561 ns.mask = 0; 1562 ctrlr.num_avail_log_pages = 0; 1563 req.cmd = &cmd; 1564 req.rsp = &rsp; 1565 ctrlr.aer_req[0] = &req; 1566 ctrlr.nr_aer_reqs = 1; 1567 req.qpair = &qpair; 1568 TAILQ_INIT(&qpair.outstanding); 1569 qpair.ctrlr = NULL; 1570 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1571 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1572 1573 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1574 SPDK_NVME_REGISTRATION_PREEMPTED); 1575 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1576 SPDK_NVME_RESERVATION_RELEASED); 1577 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1578 SPDK_NVME_RESERVATION_PREEMPTED); 1579 poll_threads(); 1580 event.raw = rsp.nvme_cpl.cdw0; 1581 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1582 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1583 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1584 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1585 1586 /* Test Case: Get Log Page to clear the log pages */ 1587 iov.iov_base = &logs[0]; 1588 iov.iov_len = sizeof(logs); 1589 nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0); 1590 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1591 1592 cleanup_pending_async_events(&ctrlr); 1593 } 1594 1595 static void 1596 test_get_dif_ctx(void) 1597 { 1598 struct spdk_nvmf_subsystem subsystem = {}; 1599 struct spdk_nvmf_request req = {}; 1600 struct spdk_nvmf_qpair qpair = {}; 1601 struct spdk_nvmf_ctrlr ctrlr = {}; 1602 struct spdk_nvmf_ns ns = {}; 1603 struct spdk_nvmf_ns *_ns = NULL; 1604 struct spdk_bdev bdev = {}; 1605 union nvmf_h2c_msg cmd = {}; 1606 struct spdk_dif_ctx dif_ctx = {}; 1607 bool ret; 1608 1609 ctrlr.subsys = &subsystem; 1610 1611 qpair.ctrlr = &ctrlr; 1612 1613 req.qpair = &qpair; 1614 req.cmd = &cmd; 1615 1616 ns.bdev = &bdev; 1617 1618 ctrlr.dif_insert_or_strip = false; 1619 1620 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1621 CU_ASSERT(ret == false); 1622 1623 ctrlr.dif_insert_or_strip = true; 1624 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1625 1626 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1627 CU_ASSERT(ret == false); 1628 1629 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1630 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1631 1632 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1633 CU_ASSERT(ret == false); 1634 1635 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1636 1637 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1638 CU_ASSERT(ret == false); 1639 1640 qpair.qid = 1; 1641 1642 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1643 CU_ASSERT(ret == false); 1644 1645 cmd.nvme_cmd.nsid = 1; 1646 1647 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1648 CU_ASSERT(ret == false); 1649 1650 subsystem.max_nsid = 1; 1651 subsystem.ns = &_ns; 1652 subsystem.ns[0] = &ns; 1653 1654 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1655 CU_ASSERT(ret == false); 1656 1657 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1658 1659 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1660 CU_ASSERT(ret == true); 1661 } 1662 1663 static void 1664 test_identify_ctrlr(void) 1665 { 1666 struct spdk_nvmf_tgt tgt = {}; 1667 struct spdk_nvmf_subsystem subsystem = { 1668 .subtype = SPDK_NVMF_SUBTYPE_NVME, 1669 .tgt = &tgt, 1670 }; 1671 struct spdk_nvmf_transport_ops tops = {}; 1672 struct spdk_nvmf_transport transport = { 1673 .ops = &tops, 1674 .opts = { 1675 .in_capsule_data_size = 4096, 1676 }, 1677 }; 1678 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1679 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1680 struct spdk_nvme_ctrlr_data cdata = {}; 1681 uint32_t expected_ioccsz; 1682 1683 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1684 1685 /* Check ioccsz, TCP transport */ 1686 tops.type = SPDK_NVME_TRANSPORT_TCP; 1687 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1688 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1689 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1690 1691 /* Check ioccsz, RDMA transport */ 1692 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1693 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1694 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1695 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1696 1697 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1698 tops.type = SPDK_NVME_TRANSPORT_TCP; 1699 ctrlr.dif_insert_or_strip = true; 1700 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1701 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1702 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1703 } 1704 1705 static void 1706 test_identify_ctrlr_iocs_specific(void) 1707 { 1708 struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 }; 1709 struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 }; 1710 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop }; 1711 struct spdk_nvme_cmd cmd = {}; 1712 struct spdk_nvme_cpl rsp = {}; 1713 struct spdk_nvme_zns_ctrlr_data ctrlr_data = {}; 1714 1715 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1716 1717 /* ZNS max_zone_append_size_kib no limit */ 1718 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1719 memset(&rsp, 0, sizeof(rsp)); 1720 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1721 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1722 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1723 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1724 CU_ASSERT(ctrlr_data.zasl == 0); 1725 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1726 1727 /* ZNS max_zone_append_size_kib = 4096 */ 1728 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1729 memset(&rsp, 0, sizeof(rsp)); 1730 subsystem.max_zone_append_size_kib = 4096; 1731 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1732 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1733 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1734 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1735 CU_ASSERT(ctrlr_data.zasl == 0); 1736 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1737 1738 /* ZNS max_zone_append_size_kib = 60000 */ 1739 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1740 memset(&rsp, 0, sizeof(rsp)); 1741 subsystem.max_zone_append_size_kib = 60000; 1742 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1743 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1744 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1745 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1746 CU_ASSERT(ctrlr_data.zasl == 3); 1747 ctrlr_data.zasl = 0; 1748 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1749 1750 /* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */ 1751 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1752 memset(&rsp, 0, sizeof(rsp)); 1753 ctrlr.vcprop.cap.bits.mpsmin = 2; 1754 subsystem.max_zone_append_size_kib = 60000; 1755 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1756 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1757 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1758 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1759 CU_ASSERT(ctrlr_data.zasl == 1); 1760 ctrlr_data.zasl = 0; 1761 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1762 ctrlr.vcprop.cap.bits.mpsmin = 0; 1763 1764 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1765 1766 /* NVM */ 1767 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1768 memset(&rsp, 0, sizeof(rsp)); 1769 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1770 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1771 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1772 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1773 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1774 } 1775 1776 static int 1777 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1778 { 1779 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1780 1781 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1782 }; 1783 1784 static void 1785 test_custom_admin_cmd(void) 1786 { 1787 struct spdk_nvmf_subsystem subsystem; 1788 struct spdk_nvmf_qpair qpair; 1789 struct spdk_nvmf_ctrlr ctrlr; 1790 struct spdk_nvmf_request req; 1791 struct spdk_nvmf_ns *ns_ptrs[1]; 1792 struct spdk_nvmf_ns ns; 1793 union nvmf_h2c_msg cmd; 1794 union nvmf_c2h_msg rsp; 1795 struct spdk_bdev bdev; 1796 uint8_t buf[4096]; 1797 int rc; 1798 1799 memset(&subsystem, 0, sizeof(subsystem)); 1800 ns_ptrs[0] = &ns; 1801 subsystem.ns = ns_ptrs; 1802 subsystem.max_nsid = 1; 1803 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1804 1805 memset(&ns, 0, sizeof(ns)); 1806 ns.opts.nsid = 1; 1807 ns.bdev = &bdev; 1808 1809 memset(&qpair, 0, sizeof(qpair)); 1810 qpair.ctrlr = &ctrlr; 1811 1812 memset(&ctrlr, 0, sizeof(ctrlr)); 1813 ctrlr.subsys = &subsystem; 1814 ctrlr.vcprop.cc.bits.en = 1; 1815 ctrlr.thread = spdk_get_thread(); 1816 1817 memset(&req, 0, sizeof(req)); 1818 req.qpair = &qpair; 1819 req.cmd = &cmd; 1820 req.rsp = &rsp; 1821 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1822 req.data = buf; 1823 req.length = sizeof(buf); 1824 spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length); 1825 1826 memset(&cmd, 0, sizeof(cmd)); 1827 cmd.nvme_cmd.opc = 0xc1; 1828 cmd.nvme_cmd.nsid = 0; 1829 memset(&rsp, 0, sizeof(rsp)); 1830 1831 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1832 1833 /* Ensure that our hdlr is being called */ 1834 rc = nvmf_ctrlr_process_admin_cmd(&req); 1835 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1836 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1837 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1838 } 1839 1840 static void 1841 test_fused_compare_and_write(void) 1842 { 1843 struct spdk_nvmf_request req = {}; 1844 struct spdk_nvmf_qpair qpair = {}; 1845 struct spdk_nvme_cmd cmd = {}; 1846 union nvmf_c2h_msg rsp = {}; 1847 struct spdk_nvmf_ctrlr ctrlr = {}; 1848 struct spdk_nvmf_subsystem subsystem = {}; 1849 struct spdk_nvmf_ns ns = {}; 1850 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1851 enum spdk_nvme_ana_state ana_state[1]; 1852 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1853 struct spdk_bdev bdev = {}; 1854 1855 struct spdk_nvmf_poll_group group = {}; 1856 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1857 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1858 struct spdk_io_channel io_ch = {}; 1859 1860 ns.bdev = &bdev; 1861 ns.anagrpid = 1; 1862 1863 subsystem.id = 0; 1864 subsystem.max_nsid = 1; 1865 subsys_ns[0] = &ns; 1866 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1867 1868 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1869 1870 /* Enable controller */ 1871 ctrlr.vcprop.cc.bits.en = 1; 1872 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1873 ctrlr.listener = &listener; 1874 1875 group.num_sgroups = 1; 1876 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1877 sgroups.num_ns = 1; 1878 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1879 ns_info.channel = &io_ch; 1880 sgroups.ns_info = &ns_info; 1881 TAILQ_INIT(&sgroups.queued); 1882 group.sgroups = &sgroups; 1883 TAILQ_INIT(&qpair.outstanding); 1884 1885 qpair.ctrlr = &ctrlr; 1886 qpair.group = &group; 1887 qpair.qid = 1; 1888 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1889 1890 cmd.nsid = 1; 1891 1892 req.qpair = &qpair; 1893 req.cmd = (union nvmf_h2c_msg *)&cmd; 1894 req.rsp = &rsp; 1895 1896 /* SUCCESS/SUCCESS */ 1897 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1898 cmd.opc = SPDK_NVME_OPC_COMPARE; 1899 1900 spdk_nvmf_request_exec(&req); 1901 CU_ASSERT(qpair.first_fused_req != NULL); 1902 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1903 1904 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1905 cmd.opc = SPDK_NVME_OPC_WRITE; 1906 1907 spdk_nvmf_request_exec(&req); 1908 CU_ASSERT(qpair.first_fused_req == NULL); 1909 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1910 1911 /* Wrong sequence */ 1912 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1913 cmd.opc = SPDK_NVME_OPC_WRITE; 1914 1915 spdk_nvmf_request_exec(&req); 1916 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1917 CU_ASSERT(qpair.first_fused_req == NULL); 1918 1919 /* Write as FUSE_FIRST (Wrong op code) */ 1920 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1921 cmd.opc = SPDK_NVME_OPC_WRITE; 1922 1923 spdk_nvmf_request_exec(&req); 1924 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1925 CU_ASSERT(qpair.first_fused_req == NULL); 1926 1927 /* Compare as FUSE_SECOND (Wrong op code) */ 1928 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1929 cmd.opc = SPDK_NVME_OPC_COMPARE; 1930 1931 spdk_nvmf_request_exec(&req); 1932 CU_ASSERT(qpair.first_fused_req != NULL); 1933 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1934 1935 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1936 cmd.opc = SPDK_NVME_OPC_COMPARE; 1937 1938 spdk_nvmf_request_exec(&req); 1939 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1940 CU_ASSERT(qpair.first_fused_req == NULL); 1941 } 1942 1943 static void 1944 test_multi_async_event_reqs(void) 1945 { 1946 struct spdk_nvmf_subsystem subsystem = {}; 1947 struct spdk_nvmf_qpair qpair = {}; 1948 struct spdk_nvmf_ctrlr ctrlr = {}; 1949 struct spdk_nvmf_request req[5] = {}; 1950 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1951 struct spdk_nvmf_ns ns = {}; 1952 union nvmf_h2c_msg cmd[5] = {}; 1953 union nvmf_c2h_msg rsp[5] = {}; 1954 1955 struct spdk_nvmf_poll_group group = {}; 1956 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1957 1958 int i; 1959 1960 ns_ptrs[0] = &ns; 1961 subsystem.ns = ns_ptrs; 1962 subsystem.max_nsid = 1; 1963 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1964 1965 ns.opts.nsid = 1; 1966 group.sgroups = &sgroups; 1967 1968 qpair.ctrlr = &ctrlr; 1969 qpair.group = &group; 1970 TAILQ_INIT(&qpair.outstanding); 1971 1972 ctrlr.subsys = &subsystem; 1973 ctrlr.vcprop.cc.bits.en = 1; 1974 ctrlr.thread = spdk_get_thread(); 1975 1976 for (i = 0; i < 5; i++) { 1977 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1978 cmd[i].nvme_cmd.nsid = 1; 1979 cmd[i].nvme_cmd.cid = i; 1980 1981 req[i].qpair = &qpair; 1982 req[i].cmd = &cmd[i]; 1983 req[i].rsp = &rsp[i]; 1984 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1985 } 1986 1987 /* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */ 1988 sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS; 1989 for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) { 1990 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1991 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1992 } 1993 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 1994 1995 /* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */ 1996 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1997 CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS); 1998 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1999 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 2000 2001 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 2002 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 2003 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2004 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2005 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 2006 2007 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 2008 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2009 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2010 CU_ASSERT(ctrlr.aer_req[2] == NULL); 2011 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 2012 2013 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 2014 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 2015 } 2016 2017 static void 2018 test_get_ana_log_page_one_ns_per_anagrp(void) 2019 { 2020 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 2021 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 2022 uint32_t ana_group[3]; 2023 struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group }; 2024 struct spdk_nvmf_ctrlr ctrlr = {}; 2025 enum spdk_nvme_ana_state ana_state[3]; 2026 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2027 struct spdk_nvmf_ns ns[3]; 2028 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 2029 uint64_t offset; 2030 uint32_t length; 2031 int i; 2032 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2033 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2034 struct iovec iov, iovs[2]; 2035 struct spdk_nvme_ana_page *ana_hdr; 2036 char _ana_desc[UT_ANA_DESC_SIZE]; 2037 struct spdk_nvme_ana_group_descriptor *ana_desc; 2038 2039 subsystem.ns = ns_arr; 2040 subsystem.max_nsid = 3; 2041 for (i = 0; i < 3; i++) { 2042 subsystem.ana_group[i] = 1; 2043 } 2044 ctrlr.subsys = &subsystem; 2045 ctrlr.listener = &listener; 2046 2047 for (i = 0; i < 3; i++) { 2048 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2049 } 2050 2051 for (i = 0; i < 3; i++) { 2052 ns_arr[i]->nsid = i + 1; 2053 ns_arr[i]->anagrpid = i + 1; 2054 } 2055 2056 /* create expected page */ 2057 ana_hdr = (void *)&expected_page[0]; 2058 ana_hdr->num_ana_group_desc = 3; 2059 ana_hdr->change_count = 0; 2060 2061 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2062 ana_desc = (void *)_ana_desc; 2063 offset = sizeof(struct spdk_nvme_ana_page); 2064 2065 for (i = 0; i < 3; i++) { 2066 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 2067 ana_desc->ana_group_id = ns_arr[i]->nsid; 2068 ana_desc->num_of_nsid = 1; 2069 ana_desc->change_count = 0; 2070 ana_desc->ana_state = ctrlr.listener->ana_state[i]; 2071 ana_desc->nsid[0] = ns_arr[i]->nsid; 2072 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 2073 offset += UT_ANA_DESC_SIZE; 2074 } 2075 2076 /* read entire actual log page */ 2077 offset = 0; 2078 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2079 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2080 iov.iov_base = &actual_page[offset]; 2081 iov.iov_len = length; 2082 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2083 offset += length; 2084 } 2085 2086 /* compare expected page and actual page */ 2087 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2088 2089 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2090 offset = 0; 2091 iovs[0].iov_base = &actual_page[offset]; 2092 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2093 offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2094 iovs[1].iov_base = &actual_page[offset]; 2095 iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset; 2096 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2097 2098 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2099 2100 #undef UT_ANA_DESC_SIZE 2101 #undef UT_ANA_LOG_PAGE_SIZE 2102 } 2103 2104 static void 2105 test_get_ana_log_page_multi_ns_per_anagrp(void) 2106 { 2107 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + \ 2108 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 + \ 2109 sizeof(uint32_t) * 5) 2110 struct spdk_nvmf_ns ns[5]; 2111 struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]}; 2112 uint32_t ana_group[5] = {0}; 2113 struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, }; 2114 enum spdk_nvme_ana_state ana_state[5]; 2115 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, }; 2116 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, }; 2117 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2118 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2119 struct iovec iov, iovs[2]; 2120 struct spdk_nvme_ana_page *ana_hdr; 2121 char _ana_desc[UT_ANA_LOG_PAGE_SIZE]; 2122 struct spdk_nvme_ana_group_descriptor *ana_desc; 2123 uint64_t offset; 2124 uint32_t length; 2125 int i; 2126 2127 subsystem.max_nsid = 5; 2128 subsystem.ana_group[1] = 3; 2129 subsystem.ana_group[2] = 2; 2130 for (i = 0; i < 5; i++) { 2131 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2132 } 2133 2134 for (i = 0; i < 5; i++) { 2135 ns_arr[i]->nsid = i + 1; 2136 } 2137 ns_arr[0]->anagrpid = 2; 2138 ns_arr[1]->anagrpid = 3; 2139 ns_arr[2]->anagrpid = 2; 2140 ns_arr[3]->anagrpid = 3; 2141 ns_arr[4]->anagrpid = 2; 2142 2143 /* create expected page */ 2144 ana_hdr = (void *)&expected_page[0]; 2145 ana_hdr->num_ana_group_desc = 2; 2146 ana_hdr->change_count = 0; 2147 2148 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2149 ana_desc = (void *)_ana_desc; 2150 offset = sizeof(struct spdk_nvme_ana_page); 2151 2152 memset(_ana_desc, 0, sizeof(_ana_desc)); 2153 ana_desc->ana_group_id = 2; 2154 ana_desc->num_of_nsid = 3; 2155 ana_desc->change_count = 0; 2156 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2157 ana_desc->nsid[0] = 1; 2158 ana_desc->nsid[1] = 3; 2159 ana_desc->nsid[2] = 5; 2160 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2161 sizeof(uint32_t) * 3); 2162 offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3; 2163 2164 memset(_ana_desc, 0, sizeof(_ana_desc)); 2165 ana_desc->ana_group_id = 3; 2166 ana_desc->num_of_nsid = 2; 2167 ana_desc->change_count = 0; 2168 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2169 ana_desc->nsid[0] = 2; 2170 ana_desc->nsid[1] = 4; 2171 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2172 sizeof(uint32_t) * 2); 2173 2174 /* read entire actual log page, and compare expected page and actual page. */ 2175 offset = 0; 2176 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2177 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2178 iov.iov_base = &actual_page[offset]; 2179 iov.iov_len = length; 2180 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2181 offset += length; 2182 } 2183 2184 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2185 2186 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2187 offset = 0; 2188 iovs[0].iov_base = &actual_page[offset]; 2189 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2190 offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2191 iovs[1].iov_base = &actual_page[offset]; 2192 iovs[1].iov_len = sizeof(uint32_t) * 5; 2193 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2194 2195 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2196 2197 #undef UT_ANA_LOG_PAGE_SIZE 2198 } 2199 static void 2200 test_multi_async_events(void) 2201 { 2202 struct spdk_nvmf_subsystem subsystem = {}; 2203 struct spdk_nvmf_qpair qpair = {}; 2204 struct spdk_nvmf_ctrlr ctrlr = {}; 2205 struct spdk_nvmf_request req[4] = {}; 2206 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2207 struct spdk_nvmf_ns ns = {}; 2208 union nvmf_h2c_msg cmd[4] = {}; 2209 union nvmf_c2h_msg rsp[4] = {}; 2210 union spdk_nvme_async_event_completion event = {}; 2211 struct spdk_nvmf_poll_group group = {}; 2212 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2213 int i; 2214 2215 ns_ptrs[0] = &ns; 2216 subsystem.ns = ns_ptrs; 2217 subsystem.max_nsid = 1; 2218 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2219 2220 ns.opts.nsid = 1; 2221 group.sgroups = &sgroups; 2222 2223 qpair.ctrlr = &ctrlr; 2224 qpair.group = &group; 2225 TAILQ_INIT(&qpair.outstanding); 2226 2227 ctrlr.subsys = &subsystem; 2228 ctrlr.vcprop.cc.bits.en = 1; 2229 ctrlr.thread = spdk_get_thread(); 2230 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2231 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 2232 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 2233 init_pending_async_events(&ctrlr); 2234 2235 /* Target queue pending events when there is no outstanding AER request */ 2236 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2237 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 2238 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 2239 2240 for (i = 0; i < 4; i++) { 2241 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2242 cmd[i].nvme_cmd.nsid = 1; 2243 cmd[i].nvme_cmd.cid = i; 2244 2245 req[i].qpair = &qpair; 2246 req[i].cmd = &cmd[i]; 2247 req[i].rsp = &rsp[i]; 2248 2249 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2250 2251 sgroups.mgmt_io_outstanding = 1; 2252 if (i < 3) { 2253 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2254 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2255 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 2256 } else { 2257 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2258 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2259 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 2260 } 2261 } 2262 2263 event.raw = rsp[0].nvme_cpl.cdw0; 2264 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2265 event.raw = rsp[1].nvme_cpl.cdw0; 2266 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 2267 event.raw = rsp[2].nvme_cpl.cdw0; 2268 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 2269 2270 cleanup_pending_async_events(&ctrlr); 2271 } 2272 2273 static void 2274 test_rae(void) 2275 { 2276 struct spdk_nvmf_subsystem subsystem = {}; 2277 struct spdk_nvmf_qpair qpair = {}; 2278 struct spdk_nvmf_ctrlr ctrlr = {}; 2279 struct spdk_nvmf_request req[3] = {}; 2280 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2281 struct spdk_nvmf_ns ns = {}; 2282 union nvmf_h2c_msg cmd[3] = {}; 2283 union nvmf_c2h_msg rsp[3] = {}; 2284 union spdk_nvme_async_event_completion event = {}; 2285 struct spdk_nvmf_poll_group group = {}; 2286 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2287 int i; 2288 char data[4096]; 2289 2290 ns_ptrs[0] = &ns; 2291 subsystem.ns = ns_ptrs; 2292 subsystem.max_nsid = 1; 2293 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2294 2295 ns.opts.nsid = 1; 2296 group.sgroups = &sgroups; 2297 2298 qpair.ctrlr = &ctrlr; 2299 qpair.group = &group; 2300 TAILQ_INIT(&qpair.outstanding); 2301 2302 ctrlr.subsys = &subsystem; 2303 ctrlr.vcprop.cc.bits.en = 1; 2304 ctrlr.thread = spdk_get_thread(); 2305 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2306 init_pending_async_events(&ctrlr); 2307 2308 /* Target queue pending events when there is no outstanding AER request */ 2309 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2310 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2311 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2312 /* only one event will be queued before RAE is clear */ 2313 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2314 2315 req[0].qpair = &qpair; 2316 req[0].cmd = &cmd[0]; 2317 req[0].rsp = &rsp[0]; 2318 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2319 cmd[0].nvme_cmd.nsid = 1; 2320 cmd[0].nvme_cmd.cid = 0; 2321 2322 for (i = 1; i < 3; i++) { 2323 req[i].qpair = &qpair; 2324 req[i].cmd = &cmd[i]; 2325 req[i].rsp = &rsp[i]; 2326 req[i].data = &data; 2327 req[i].length = sizeof(data); 2328 spdk_iov_one(req[i].iov, &req[i].iovcnt, &data, req[i].length); 2329 2330 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 2331 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 2332 SPDK_NVME_LOG_CHANGED_NS_LIST; 2333 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 2334 spdk_nvme_bytes_to_numd(req[i].length); 2335 cmd[i].nvme_cmd.cid = i; 2336 } 2337 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 2338 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 2339 2340 /* consume the pending event */ 2341 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 2342 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2343 event.raw = rsp[0].nvme_cpl.cdw0; 2344 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2345 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2346 2347 /* get log with RAE set */ 2348 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2349 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2350 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2351 2352 /* will not generate new event until RAE is clear */ 2353 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2354 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2355 2356 /* get log with RAE clear */ 2357 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2358 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2359 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2360 2361 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2362 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2363 2364 cleanup_pending_async_events(&ctrlr); 2365 } 2366 2367 static void 2368 test_nvmf_ctrlr_create_destruct(void) 2369 { 2370 struct spdk_nvmf_fabric_connect_data connect_data = {}; 2371 struct spdk_nvmf_poll_group group = {}; 2372 struct spdk_nvmf_subsystem_poll_group sgroups[2] = {}; 2373 struct spdk_nvmf_transport transport = {}; 2374 struct spdk_nvmf_transport_ops tops = {}; 2375 struct spdk_nvmf_subsystem subsystem = {}; 2376 struct spdk_nvmf_request req = {}; 2377 struct spdk_nvmf_qpair qpair = {}; 2378 struct spdk_nvmf_ctrlr *ctrlr = NULL; 2379 struct spdk_nvmf_tgt tgt = {}; 2380 union nvmf_h2c_msg cmd = {}; 2381 union nvmf_c2h_msg rsp = {}; 2382 const uint8_t hostid[16] = { 2383 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2384 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 2385 }; 2386 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 2387 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 2388 2389 group.thread = spdk_get_thread(); 2390 transport.ops = &tops; 2391 transport.opts.max_aq_depth = 32; 2392 transport.opts.max_queue_depth = 64; 2393 transport.opts.max_qpairs_per_ctrlr = 3; 2394 transport.opts.dif_insert_or_strip = true; 2395 transport.tgt = &tgt; 2396 qpair.transport = &transport; 2397 qpair.group = &group; 2398 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2399 TAILQ_INIT(&qpair.outstanding); 2400 2401 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 2402 connect_data.cntlid = 0xFFFF; 2403 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 2404 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 2405 2406 subsystem.thread = spdk_get_thread(); 2407 subsystem.id = 1; 2408 TAILQ_INIT(&subsystem.ctrlrs); 2409 subsystem.tgt = &tgt; 2410 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2411 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2412 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 2413 2414 group.sgroups = sgroups; 2415 2416 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2417 cmd.connect_cmd.cid = 1; 2418 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 2419 cmd.connect_cmd.recfmt = 0; 2420 cmd.connect_cmd.qid = 0; 2421 cmd.connect_cmd.sqsize = 31; 2422 cmd.connect_cmd.cattr = 0; 2423 cmd.connect_cmd.kato = 120000; 2424 2425 req.qpair = &qpair; 2426 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 2427 req.data = &connect_data; 2428 req.length = sizeof(connect_data); 2429 spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length); 2430 req.cmd = &cmd; 2431 req.rsp = &rsp; 2432 2433 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 2434 sgroups[subsystem.id].mgmt_io_outstanding++; 2435 2436 ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base); 2437 poll_threads(); 2438 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2439 CU_ASSERT(req.qpair->ctrlr == ctrlr); 2440 CU_ASSERT(ctrlr->subsys == &subsystem); 2441 CU_ASSERT(ctrlr->thread == req.qpair->group->thread); 2442 CU_ASSERT(ctrlr->disconnect_in_progress == false); 2443 CU_ASSERT(ctrlr->qpair_mask != NULL); 2444 CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000); 2445 CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1); 2446 CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1); 2447 CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1); 2448 CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1); 2449 CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16)); 2450 CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1); 2451 CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63); 2452 CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0); 2453 CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500); 2454 CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0); 2455 CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM); 2456 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0); 2457 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0); 2458 CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1); 2459 CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3); 2460 CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0); 2461 CU_ASSERT(ctrlr->vcprop.cc.raw == 0); 2462 CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0); 2463 CU_ASSERT(ctrlr->vcprop.csts.raw == 0); 2464 CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0); 2465 CU_ASSERT(ctrlr->dif_insert_or_strip == true); 2466 2467 ctrlr->in_destruct = true; 2468 nvmf_ctrlr_destruct(ctrlr); 2469 poll_threads(); 2470 CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs)); 2471 CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding)); 2472 } 2473 2474 static void 2475 test_nvmf_ctrlr_use_zcopy(void) 2476 { 2477 struct spdk_nvmf_subsystem subsystem = {}; 2478 struct spdk_nvmf_transport transport = {}; 2479 struct spdk_nvmf_request req = {}; 2480 struct spdk_nvmf_qpair qpair = {}; 2481 struct spdk_nvmf_ctrlr ctrlr = {}; 2482 union nvmf_h2c_msg cmd = {}; 2483 struct spdk_nvmf_ns ns = {}; 2484 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2485 struct spdk_bdev bdev = {}; 2486 struct spdk_nvmf_poll_group group = {}; 2487 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2488 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2489 struct spdk_io_channel io_ch = {}; 2490 int opc; 2491 2492 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2493 ns.bdev = &bdev; 2494 2495 subsystem.id = 0; 2496 subsystem.max_nsid = 1; 2497 subsys_ns[0] = &ns; 2498 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2499 2500 ctrlr.subsys = &subsystem; 2501 2502 transport.opts.zcopy = true; 2503 2504 qpair.ctrlr = &ctrlr; 2505 qpair.group = &group; 2506 qpair.qid = 1; 2507 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2508 qpair.transport = &transport; 2509 2510 group.thread = spdk_get_thread(); 2511 group.num_sgroups = 1; 2512 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2513 sgroups.num_ns = 1; 2514 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2515 ns_info.channel = &io_ch; 2516 sgroups.ns_info = &ns_info; 2517 TAILQ_INIT(&sgroups.queued); 2518 group.sgroups = &sgroups; 2519 TAILQ_INIT(&qpair.outstanding); 2520 2521 req.qpair = &qpair; 2522 req.cmd = &cmd; 2523 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2524 2525 /* Admin queue */ 2526 qpair.qid = 0; 2527 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2528 qpair.qid = 1; 2529 2530 /* Invalid Opcodes */ 2531 for (opc = 0; opc <= 255; opc++) { 2532 cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc; 2533 if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) && 2534 (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) { 2535 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2536 } 2537 } 2538 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 2539 2540 /* Fused WRITE */ 2541 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2542 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2543 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE; 2544 2545 /* Non bdev */ 2546 cmd.nvme_cmd.nsid = 4; 2547 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2548 cmd.nvme_cmd.nsid = 1; 2549 2550 /* ZCOPY Not supported */ 2551 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2552 ns.zcopy = true; 2553 2554 /* ZCOPY disabled on transport level */ 2555 transport.opts.zcopy = false; 2556 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2557 transport.opts.zcopy = true; 2558 2559 /* Success */ 2560 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2561 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2562 } 2563 2564 static void 2565 qpair_state_change_done(void *cb_arg, int status) 2566 { 2567 } 2568 2569 static void 2570 test_spdk_nvmf_request_zcopy_start(void) 2571 { 2572 struct spdk_nvmf_request req = {}; 2573 struct spdk_nvmf_qpair qpair = {}; 2574 struct spdk_nvmf_transport transport = {}; 2575 struct spdk_nvme_cmd cmd = {}; 2576 union nvmf_c2h_msg rsp = {}; 2577 struct spdk_nvmf_ctrlr ctrlr = {}; 2578 struct spdk_nvmf_subsystem subsystem = {}; 2579 struct spdk_nvmf_ns ns = {}; 2580 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2581 enum spdk_nvme_ana_state ana_state[1]; 2582 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2583 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2584 2585 struct spdk_nvmf_poll_group group = {}; 2586 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2587 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2588 struct spdk_io_channel io_ch = {}; 2589 2590 ns.bdev = &bdev; 2591 ns.zcopy = true; 2592 ns.anagrpid = 1; 2593 2594 subsystem.id = 0; 2595 subsystem.max_nsid = 1; 2596 subsys_ns[0] = &ns; 2597 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2598 2599 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2600 2601 /* Enable controller */ 2602 ctrlr.vcprop.cc.bits.en = 1; 2603 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2604 ctrlr.listener = &listener; 2605 2606 transport.opts.zcopy = true; 2607 2608 group.thread = spdk_get_thread(); 2609 group.num_sgroups = 1; 2610 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2611 sgroups.num_ns = 1; 2612 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2613 ns_info.channel = &io_ch; 2614 sgroups.ns_info = &ns_info; 2615 TAILQ_INIT(&sgroups.queued); 2616 group.sgroups = &sgroups; 2617 TAILQ_INIT(&qpair.outstanding); 2618 2619 qpair.ctrlr = &ctrlr; 2620 qpair.group = &group; 2621 qpair.transport = &transport; 2622 qpair.qid = 1; 2623 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2624 2625 cmd.nsid = 1; 2626 2627 req.qpair = &qpair; 2628 req.cmd = (union nvmf_h2c_msg *)&cmd; 2629 req.rsp = &rsp; 2630 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2631 cmd.opc = SPDK_NVME_OPC_READ; 2632 2633 /* Fail because no controller */ 2634 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2635 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2636 qpair.ctrlr = NULL; 2637 spdk_nvmf_request_zcopy_start(&req); 2638 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2639 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2640 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 2641 qpair.ctrlr = &ctrlr; 2642 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2643 2644 /* Fail because bad NSID */ 2645 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2646 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2647 cmd.nsid = 0; 2648 spdk_nvmf_request_zcopy_start(&req); 2649 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2650 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2651 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2652 cmd.nsid = 1; 2653 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2654 2655 /* Fail because bad Channel */ 2656 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2657 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2658 ns_info.channel = NULL; 2659 spdk_nvmf_request_zcopy_start(&req); 2660 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2661 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2662 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2663 ns_info.channel = &io_ch; 2664 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2665 2666 /* Queue the requet because NSID is not active */ 2667 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2668 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2669 ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING; 2670 spdk_nvmf_request_zcopy_start(&req); 2671 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT); 2672 CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req); 2673 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2674 TAILQ_REMOVE(&sgroups.queued, &req, link); 2675 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2676 2677 /* Fail because QPair is not active */ 2678 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2679 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2680 qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 2681 qpair.state_cb = qpair_state_change_done; 2682 spdk_nvmf_request_zcopy_start(&req); 2683 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED); 2684 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2685 qpair.state_cb = NULL; 2686 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2687 2688 /* Fail because nvmf_bdev_ctrlr_zcopy_start fails */ 2689 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2690 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2691 cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */ 2692 cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 2693 req.length = (cmd.cdw12 + 1) * bdev.blocklen; 2694 spdk_nvmf_request_zcopy_start(&req); 2695 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2696 cmd.cdw10 = 0; 2697 cmd.cdw12 = 0; 2698 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2699 2700 /* Success */ 2701 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2702 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2703 spdk_nvmf_request_zcopy_start(&req); 2704 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2705 } 2706 2707 static void 2708 test_zcopy_read(void) 2709 { 2710 struct spdk_nvmf_request req = {}; 2711 struct spdk_nvmf_qpair qpair = {}; 2712 struct spdk_nvmf_transport transport = {}; 2713 struct spdk_nvme_cmd cmd = {}; 2714 union nvmf_c2h_msg rsp = {}; 2715 struct spdk_nvmf_ctrlr ctrlr = {}; 2716 struct spdk_nvmf_subsystem subsystem = {}; 2717 struct spdk_nvmf_ns ns = {}; 2718 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2719 enum spdk_nvme_ana_state ana_state[1]; 2720 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2721 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2722 2723 struct spdk_nvmf_poll_group group = {}; 2724 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2725 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2726 struct spdk_io_channel io_ch = {}; 2727 2728 ns.bdev = &bdev; 2729 ns.zcopy = true; 2730 ns.anagrpid = 1; 2731 2732 subsystem.id = 0; 2733 subsystem.max_nsid = 1; 2734 subsys_ns[0] = &ns; 2735 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2736 2737 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2738 2739 /* Enable controller */ 2740 ctrlr.vcprop.cc.bits.en = 1; 2741 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2742 ctrlr.listener = &listener; 2743 2744 transport.opts.zcopy = true; 2745 2746 group.thread = spdk_get_thread(); 2747 group.num_sgroups = 1; 2748 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2749 sgroups.num_ns = 1; 2750 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2751 ns_info.channel = &io_ch; 2752 sgroups.ns_info = &ns_info; 2753 TAILQ_INIT(&sgroups.queued); 2754 group.sgroups = &sgroups; 2755 TAILQ_INIT(&qpair.outstanding); 2756 2757 qpair.ctrlr = &ctrlr; 2758 qpair.group = &group; 2759 qpair.transport = &transport; 2760 qpair.qid = 1; 2761 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2762 2763 cmd.nsid = 1; 2764 2765 req.qpair = &qpair; 2766 req.cmd = (union nvmf_h2c_msg *)&cmd; 2767 req.rsp = &rsp; 2768 cmd.opc = SPDK_NVME_OPC_READ; 2769 2770 /* Prepare for zcopy */ 2771 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2772 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2773 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2774 CU_ASSERT(ns_info.io_outstanding == 0); 2775 2776 /* Perform the zcopy start */ 2777 spdk_nvmf_request_zcopy_start(&req); 2778 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2779 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2780 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2781 CU_ASSERT(ns_info.io_outstanding == 1); 2782 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2783 2784 /* Perform the zcopy end */ 2785 spdk_nvmf_request_zcopy_end(&req, false); 2786 CU_ASSERT(req.zcopy_bdev_io == NULL); 2787 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2788 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2789 CU_ASSERT(ns_info.io_outstanding == 0); 2790 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2791 } 2792 2793 static void 2794 test_zcopy_write(void) 2795 { 2796 struct spdk_nvmf_request req = {}; 2797 struct spdk_nvmf_qpair qpair = {}; 2798 struct spdk_nvmf_transport transport = {}; 2799 struct spdk_nvme_cmd cmd = {}; 2800 union nvmf_c2h_msg rsp = {}; 2801 struct spdk_nvmf_ctrlr ctrlr = {}; 2802 struct spdk_nvmf_subsystem subsystem = {}; 2803 struct spdk_nvmf_ns ns = {}; 2804 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2805 enum spdk_nvme_ana_state ana_state[1]; 2806 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2807 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2808 2809 struct spdk_nvmf_poll_group group = {}; 2810 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2811 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2812 struct spdk_io_channel io_ch = {}; 2813 2814 ns.bdev = &bdev; 2815 ns.zcopy = true; 2816 ns.anagrpid = 1; 2817 2818 subsystem.id = 0; 2819 subsystem.max_nsid = 1; 2820 subsys_ns[0] = &ns; 2821 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2822 2823 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2824 2825 /* Enable controller */ 2826 ctrlr.vcprop.cc.bits.en = 1; 2827 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2828 ctrlr.listener = &listener; 2829 2830 transport.opts.zcopy = true; 2831 2832 group.thread = spdk_get_thread(); 2833 group.num_sgroups = 1; 2834 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2835 sgroups.num_ns = 1; 2836 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2837 ns_info.channel = &io_ch; 2838 sgroups.ns_info = &ns_info; 2839 TAILQ_INIT(&sgroups.queued); 2840 group.sgroups = &sgroups; 2841 TAILQ_INIT(&qpair.outstanding); 2842 2843 qpair.ctrlr = &ctrlr; 2844 qpair.group = &group; 2845 qpair.transport = &transport; 2846 qpair.qid = 1; 2847 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2848 2849 cmd.nsid = 1; 2850 2851 req.qpair = &qpair; 2852 req.cmd = (union nvmf_h2c_msg *)&cmd; 2853 req.rsp = &rsp; 2854 cmd.opc = SPDK_NVME_OPC_WRITE; 2855 2856 /* Prepare for zcopy */ 2857 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2858 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2859 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2860 CU_ASSERT(ns_info.io_outstanding == 0); 2861 2862 /* Perform the zcopy start */ 2863 spdk_nvmf_request_zcopy_start(&req); 2864 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2865 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 2866 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2867 CU_ASSERT(ns_info.io_outstanding == 1); 2868 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2869 2870 /* Perform the zcopy end */ 2871 spdk_nvmf_request_zcopy_end(&req, true); 2872 CU_ASSERT(req.zcopy_bdev_io == NULL); 2873 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2874 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2875 CU_ASSERT(ns_info.io_outstanding == 0); 2876 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2877 } 2878 2879 static void 2880 test_nvmf_property_set(void) 2881 { 2882 int rc; 2883 struct spdk_nvmf_request req = {}; 2884 struct spdk_nvmf_qpair qpair = {}; 2885 struct spdk_nvmf_ctrlr ctrlr = {}; 2886 union nvmf_h2c_msg cmd = {}; 2887 union nvmf_c2h_msg rsp = {}; 2888 2889 req.qpair = &qpair; 2890 qpair.ctrlr = &ctrlr; 2891 req.cmd = &cmd; 2892 req.rsp = &rsp; 2893 2894 /* Invalid parameters */ 2895 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2896 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs); 2897 2898 rc = nvmf_property_set(&req); 2899 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2900 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2901 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2902 2903 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms); 2904 2905 rc = nvmf_property_get(&req); 2906 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2907 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2908 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2909 2910 /* Set cc with same property size */ 2911 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2912 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc); 2913 2914 rc = nvmf_property_set(&req); 2915 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2916 2917 /* Emulate cc data */ 2918 ctrlr.vcprop.cc.raw = 0xDEADBEEF; 2919 2920 rc = nvmf_property_get(&req); 2921 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2922 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF); 2923 2924 /* Set asq with different property size */ 2925 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2926 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2927 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq); 2928 2929 rc = nvmf_property_set(&req); 2930 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2931 2932 /* Emulate asq data */ 2933 ctrlr.vcprop.asq = 0xAADDADBEEF; 2934 2935 rc = nvmf_property_get(&req); 2936 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2937 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF); 2938 } 2939 2940 static void 2941 test_nvmf_ctrlr_get_features_host_behavior_support(void) 2942 { 2943 int rc; 2944 struct spdk_nvmf_request req = {}; 2945 struct spdk_nvmf_qpair qpair = {}; 2946 struct spdk_nvmf_ctrlr ctrlr = {}; 2947 struct spdk_nvme_host_behavior behavior = {}; 2948 union nvmf_h2c_msg cmd = {}; 2949 union nvmf_c2h_msg rsp = {}; 2950 2951 qpair.ctrlr = &ctrlr; 2952 req.qpair = &qpair; 2953 req.cmd = &cmd; 2954 req.rsp = &rsp; 2955 2956 /* Invalid data */ 2957 req.data = NULL; 2958 req.length = sizeof(struct spdk_nvme_host_behavior); 2959 req.iovcnt = 0; 2960 2961 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 2962 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2963 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2964 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2965 CU_ASSERT(req.data == NULL); 2966 2967 /* Wrong structure length */ 2968 req.data = &behavior; 2969 req.length = sizeof(struct spdk_nvme_host_behavior) - 1; 2970 spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length); 2971 2972 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 2973 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2974 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2975 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2976 2977 /* Get Features Host Behavior Support Success */ 2978 req.data = &behavior; 2979 req.length = sizeof(struct spdk_nvme_host_behavior); 2980 spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length); 2981 2982 ctrlr.acre_enabled = true; 2983 behavior.acre = false; 2984 2985 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 2986 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2987 CU_ASSERT(behavior.acre == true); 2988 } 2989 2990 static void 2991 test_nvmf_ctrlr_set_features_host_behavior_support(void) 2992 { 2993 int rc; 2994 struct spdk_nvmf_request req = {}; 2995 struct spdk_nvmf_qpair qpair = {}; 2996 struct spdk_nvmf_ctrlr ctrlr = {}; 2997 struct spdk_nvme_host_behavior host_behavior = {}; 2998 union nvmf_h2c_msg cmd = {}; 2999 union nvmf_c2h_msg rsp = {}; 3000 3001 qpair.ctrlr = &ctrlr; 3002 req.qpair = &qpair; 3003 req.cmd = &cmd; 3004 req.rsp = &rsp; 3005 req.iov[0].iov_base = &host_behavior; 3006 req.iov[0].iov_len = sizeof(host_behavior); 3007 3008 /* Invalid iovcnt */ 3009 req.iovcnt = 0; 3010 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3011 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3012 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3013 3014 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3015 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3016 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3017 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3018 3019 /* Invalid iov_len */ 3020 req.iovcnt = 1; 3021 req.iov[0].iov_len = 0; 3022 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3023 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3024 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3025 3026 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3027 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3028 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3029 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3030 3031 /* acre is false */ 3032 host_behavior.acre = 0; 3033 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3034 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3035 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3036 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3037 3038 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3039 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3040 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3041 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3042 CU_ASSERT(ctrlr.acre_enabled == false); 3043 3044 /* acre is true */ 3045 host_behavior.acre = 1; 3046 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3047 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3048 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3049 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3050 3051 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3052 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3053 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3054 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3055 CU_ASSERT(ctrlr.acre_enabled == true); 3056 3057 /* Invalid acre */ 3058 host_behavior.acre = 2; 3059 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3060 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3061 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3062 3063 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3064 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3065 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3066 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3067 } 3068 3069 int 3070 main(int argc, char **argv) 3071 { 3072 CU_pSuite suite = NULL; 3073 unsigned int num_failures; 3074 3075 CU_initialize_registry(); 3076 3077 suite = CU_add_suite("nvmf", NULL, NULL); 3078 CU_ADD_TEST(suite, test_get_log_page); 3079 CU_ADD_TEST(suite, test_process_fabrics_cmd); 3080 CU_ADD_TEST(suite, test_connect); 3081 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 3082 CU_ADD_TEST(suite, test_identify_ns); 3083 CU_ADD_TEST(suite, test_identify_ns_iocs_specific); 3084 CU_ADD_TEST(suite, test_reservation_write_exclusive); 3085 CU_ADD_TEST(suite, test_reservation_exclusive_access); 3086 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 3087 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 3088 CU_ADD_TEST(suite, test_reservation_notification_log_page); 3089 CU_ADD_TEST(suite, test_get_dif_ctx); 3090 CU_ADD_TEST(suite, test_set_get_features); 3091 CU_ADD_TEST(suite, test_identify_ctrlr); 3092 CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific); 3093 CU_ADD_TEST(suite, test_custom_admin_cmd); 3094 CU_ADD_TEST(suite, test_fused_compare_and_write); 3095 CU_ADD_TEST(suite, test_multi_async_event_reqs); 3096 CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp); 3097 CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp); 3098 CU_ADD_TEST(suite, test_multi_async_events); 3099 CU_ADD_TEST(suite, test_rae); 3100 CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct); 3101 CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy); 3102 CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start); 3103 CU_ADD_TEST(suite, test_zcopy_read); 3104 CU_ADD_TEST(suite, test_zcopy_write); 3105 CU_ADD_TEST(suite, test_nvmf_property_set); 3106 CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support); 3107 CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support); 3108 3109 allocate_threads(1); 3110 set_thread(0); 3111 3112 num_failures = spdk_ut_run_tests(argc, argv, NULL); 3113 CU_cleanup_registry(); 3114 3115 free_threads(); 3116 3117 return num_failures; 3118 } 3119