1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/bdev_zone.h" 8 #include "spdk/nvme_spec.h" 9 #include "spdk/stdinc.h" 10 11 #include "spdk_internal/cunit.h" 12 #include "spdk_internal/mock.h" 13 #include "thread/thread_internal.h" 14 15 #include "common/lib/ut_multithread.c" 16 #include "nvmf/ctrlr.c" 17 18 SPDK_LOG_REGISTER_COMPONENT(nvmf) 19 20 struct spdk_bdev { 21 int ut_mock; 22 uint64_t blockcnt; 23 uint32_t blocklen; 24 bool zoned; 25 uint32_t zone_size; 26 uint32_t max_open_zones; 27 uint32_t max_active_zones; 28 }; 29 30 #define MAX_OPEN_ZONES 12 31 #define MAX_ACTIVE_ZONES 34 32 #define ZONE_SIZE 56 33 34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 36 37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL; 38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *) 39 0x8877665544332211UL; 40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL; 41 42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 43 struct spdk_nvmf_subsystem *, 44 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 45 NULL); 46 47 DEFINE_STUB(spdk_nvmf_poll_group_create, 48 struct spdk_nvmf_poll_group *, 49 (struct spdk_nvmf_tgt *tgt), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 53 const char *, 54 (const struct spdk_nvmf_subsystem *subsystem), 55 subsystem_default_sn); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 58 const char *, 59 (const struct spdk_nvmf_subsystem *subsystem), 60 subsystem_default_mn); 61 62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 63 bool, 64 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 65 true); 66 67 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 68 int, 69 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 70 0); 71 72 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 73 struct spdk_nvmf_ctrlr *, 74 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 75 NULL); 76 77 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 78 bool, 79 (struct spdk_nvmf_ctrlr *ctrlr), 80 false); 81 82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 83 bool, 84 (struct spdk_nvmf_ctrlr *ctrlr), 85 false); 86 87 DEFINE_STUB(nvmf_ctrlr_copy_supported, 88 bool, 89 (struct spdk_nvmf_ctrlr *ctrlr), 90 false); 91 92 DEFINE_STUB_V(nvmf_get_discovery_log_page, 93 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 94 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 95 96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 97 int, 98 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 99 0); 100 101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 102 bool, 103 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 104 true); 105 106 DEFINE_STUB(nvmf_subsystem_find_listener, 107 struct spdk_nvmf_subsystem_listener *, 108 (struct spdk_nvmf_subsystem *subsystem, 109 const struct spdk_nvme_transport_id *trid), 110 (void *)0x1); 111 112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 113 int, 114 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 115 struct spdk_nvmf_request *req), 116 0); 117 118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 119 int, 120 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 121 struct spdk_nvmf_request *req), 122 0); 123 124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 125 int, 126 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 127 struct spdk_nvmf_request *req), 128 0); 129 130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 131 int, 132 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 134 0); 135 136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 137 int, 138 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 139 struct spdk_nvmf_request *req), 140 0); 141 142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 143 int, 144 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 145 struct spdk_nvmf_request *req), 146 0); 147 148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 149 int, 150 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 151 struct spdk_nvmf_request *req), 152 0); 153 154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 155 int, 156 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 157 struct spdk_nvmf_request *req), 158 0); 159 160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 161 int, 162 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 163 struct spdk_nvmf_request *req), 164 0); 165 166 DEFINE_STUB(nvmf_transport_req_complete, 167 int, 168 (struct spdk_nvmf_request *req), 169 0); 170 171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 172 173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 174 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 175 struct spdk_dif_ctx *dif_ctx), 176 true); 177 178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 179 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 180 181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 183 184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 185 struct spdk_nvmf_ctrlr *ctrlr)); 186 187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 188 int, 189 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 190 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 191 0); 192 193 DEFINE_STUB(nvmf_transport_req_free, 194 int, 195 (struct spdk_nvmf_request *req), 196 0); 197 198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 199 int, 200 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 201 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 202 0); 203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 204 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 206 207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev), 208 MAX_ACTIVE_ZONES); 209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES); 210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE); 211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 212 213 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 214 (const struct spdk_nvme_ns_data *nsdata), 0); 215 216 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 217 218 int 219 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 220 { 221 return 0; 222 } 223 224 void 225 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 226 bool dif_insert_or_strip) 227 { 228 uint64_t num_blocks; 229 230 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 231 num_blocks = ns->bdev->blockcnt; 232 nsdata->nsze = num_blocks; 233 nsdata->ncap = num_blocks; 234 nsdata->nuse = num_blocks; 235 nsdata->nlbaf = 0; 236 nsdata->flbas.format = 0; 237 nsdata->flbas.msb_format = 0; 238 nsdata->lbaf[0].lbads = spdk_u32log2(512); 239 } 240 241 struct spdk_nvmf_ns * 242 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 243 { 244 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 245 return subsystem->ns[0]; 246 } 247 248 struct spdk_nvmf_ns * 249 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 250 struct spdk_nvmf_ns *prev_ns) 251 { 252 uint32_t nsid; 253 254 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 255 nsid = prev_ns->nsid; 256 257 if (nsid >= subsystem->max_nsid) { 258 return NULL; 259 } 260 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 261 if (subsystem->ns[nsid - 1]) { 262 return subsystem->ns[nsid - 1]; 263 } 264 } 265 return NULL; 266 } 267 268 bool 269 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 270 { 271 return true; 272 } 273 274 int 275 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 276 struct spdk_bdev_desc *desc, 277 struct spdk_io_channel *ch, 278 struct spdk_nvmf_request *req) 279 { 280 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 281 uint64_t start_lba; 282 uint64_t num_blocks; 283 284 start_lba = from_le64(&req->cmd->nvme_cmd.cdw10); 285 num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1; 286 287 if ((start_lba + num_blocks) > bdev->blockcnt) { 288 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 289 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 290 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 291 } 292 293 if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) { 294 req->zcopy_bdev_io = zcopy_start_bdev_io_write; 295 } else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) { 296 req->zcopy_bdev_io = zcopy_start_bdev_io_read; 297 } else { 298 req->zcopy_bdev_io = zcopy_start_bdev_io_fail; 299 } 300 301 302 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 303 } 304 305 void 306 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 307 { 308 req->zcopy_bdev_io = NULL; 309 spdk_nvmf_request_complete(req); 310 } 311 312 bool 313 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns) 314 { 315 return ns->ptpl_file != NULL; 316 } 317 318 static void 319 test_get_log_page(void) 320 { 321 struct spdk_nvmf_subsystem subsystem = {}; 322 struct spdk_nvmf_request req = {}; 323 struct spdk_nvmf_qpair qpair = {}; 324 struct spdk_nvmf_ctrlr ctrlr = {}; 325 union nvmf_h2c_msg cmd = {}; 326 union nvmf_c2h_msg rsp = {}; 327 char data[4096]; 328 329 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 330 331 ctrlr.subsys = &subsystem; 332 333 qpair.ctrlr = &ctrlr; 334 335 req.qpair = &qpair; 336 req.cmd = &cmd; 337 req.rsp = &rsp; 338 req.length = sizeof(data); 339 SPDK_IOV_ONE(req.iov, &req.iovcnt, &data, req.length); 340 341 /* Get Log Page - all valid */ 342 memset(&cmd, 0, sizeof(cmd)); 343 memset(&rsp, 0, sizeof(rsp)); 344 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 345 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 346 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 347 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 348 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 349 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 350 351 /* Get Log Page with invalid log ID */ 352 memset(&cmd, 0, sizeof(cmd)); 353 memset(&rsp, 0, sizeof(rsp)); 354 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 355 cmd.nvme_cmd.cdw10 = 0; 356 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 357 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 358 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 359 360 /* Get Log Page with invalid offset (not dword aligned) */ 361 memset(&cmd, 0, sizeof(cmd)); 362 memset(&rsp, 0, sizeof(rsp)); 363 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 364 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 365 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 366 cmd.nvme_cmd.cdw12 = 2; 367 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 368 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 369 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 370 371 /* Get Log Page without data buffer */ 372 memset(&cmd, 0, sizeof(cmd)); 373 memset(&rsp, 0, sizeof(rsp)); 374 req.iovcnt = 0; 375 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 376 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 377 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 378 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 379 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 380 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 381 } 382 383 static void 384 test_process_fabrics_cmd(void) 385 { 386 struct spdk_nvmf_request req = {}; 387 int ret; 388 struct spdk_nvmf_qpair req_qpair = {}; 389 union nvmf_h2c_msg req_cmd = {}; 390 union nvmf_c2h_msg req_rsp = {}; 391 392 req.qpair = &req_qpair; 393 req.cmd = &req_cmd; 394 req.rsp = &req_rsp; 395 req.qpair->ctrlr = NULL; 396 397 /* No ctrlr and invalid command check */ 398 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 399 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 400 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 401 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 402 } 403 404 static bool 405 nvme_status_success(const struct spdk_nvme_status *status) 406 { 407 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 408 } 409 410 static void 411 test_connect(void) 412 { 413 struct spdk_nvmf_fabric_connect_data connect_data; 414 struct spdk_nvmf_poll_group group; 415 struct spdk_nvmf_subsystem_poll_group *sgroups; 416 struct spdk_nvmf_transport transport; 417 struct spdk_nvmf_transport_ops tops = {}; 418 struct spdk_nvmf_subsystem subsystem; 419 struct spdk_nvmf_ns *ns_arr[1] = { NULL }; 420 struct spdk_nvmf_request req; 421 struct spdk_nvmf_qpair admin_qpair; 422 struct spdk_nvmf_qpair qpair; 423 struct spdk_nvmf_ctrlr ctrlr; 424 struct spdk_nvmf_tgt tgt; 425 union nvmf_h2c_msg cmd; 426 union nvmf_c2h_msg rsp; 427 const uint8_t hostid[16] = { 428 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 429 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 430 }; 431 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 432 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 433 int rc; 434 435 memset(&group, 0, sizeof(group)); 436 group.thread = spdk_get_thread(); 437 438 memset(&ctrlr, 0, sizeof(ctrlr)); 439 ctrlr.subsys = &subsystem; 440 ctrlr.qpair_mask = spdk_bit_array_create(3); 441 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 442 ctrlr.vcprop.cc.bits.en = 1; 443 ctrlr.vcprop.cc.bits.iosqes = 6; 444 ctrlr.vcprop.cc.bits.iocqes = 4; 445 446 memset(&admin_qpair, 0, sizeof(admin_qpair)); 447 admin_qpair.group = &group; 448 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 449 450 memset(&tgt, 0, sizeof(tgt)); 451 memset(&transport, 0, sizeof(transport)); 452 transport.ops = &tops; 453 transport.opts.max_aq_depth = 32; 454 transport.opts.max_queue_depth = 64; 455 transport.opts.max_qpairs_per_ctrlr = 3; 456 transport.tgt = &tgt; 457 458 memset(&qpair, 0, sizeof(qpair)); 459 qpair.transport = &transport; 460 qpair.group = &group; 461 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 462 TAILQ_INIT(&qpair.outstanding); 463 464 memset(&connect_data, 0, sizeof(connect_data)); 465 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 466 connect_data.cntlid = 0xFFFF; 467 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 468 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 469 470 memset(&subsystem, 0, sizeof(subsystem)); 471 subsystem.thread = spdk_get_thread(); 472 subsystem.id = 1; 473 TAILQ_INIT(&subsystem.ctrlrs); 474 subsystem.tgt = &tgt; 475 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 476 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 477 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 478 subsystem.ns = ns_arr; 479 subsystem.max_nsid = 1; 480 481 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 482 group.sgroups = sgroups; 483 484 memset(&cmd, 0, sizeof(cmd)); 485 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 486 cmd.connect_cmd.cid = 1; 487 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 488 cmd.connect_cmd.recfmt = 0; 489 cmd.connect_cmd.qid = 0; 490 cmd.connect_cmd.sqsize = 31; 491 cmd.connect_cmd.cattr = 0; 492 cmd.connect_cmd.kato = 120000; 493 494 memset(&req, 0, sizeof(req)); 495 req.qpair = &qpair; 496 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 497 req.length = sizeof(connect_data); 498 SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length); 499 req.cmd = &cmd; 500 req.rsp = &rsp; 501 502 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 503 MOCK_SET(spdk_nvmf_poll_group_create, &group); 504 505 /* Valid admin connect command */ 506 memset(&rsp, 0, sizeof(rsp)); 507 sgroups[subsystem.id].mgmt_io_outstanding++; 508 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 509 rc = nvmf_ctrlr_cmd_connect(&req); 510 poll_threads(); 511 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 512 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 513 CU_ASSERT(qpair.ctrlr != NULL); 514 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 515 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 516 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 517 free(qpair.ctrlr->visible_ns); 518 free(qpair.ctrlr); 519 qpair.ctrlr = NULL; 520 521 /* Valid admin connect command with kato = 0 */ 522 cmd.connect_cmd.kato = 0; 523 memset(&rsp, 0, sizeof(rsp)); 524 sgroups[subsystem.id].mgmt_io_outstanding++; 525 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 526 rc = nvmf_ctrlr_cmd_connect(&req); 527 poll_threads(); 528 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 529 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 530 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 531 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 532 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 533 free(qpair.ctrlr->visible_ns); 534 free(qpair.ctrlr); 535 qpair.ctrlr = NULL; 536 cmd.connect_cmd.kato = 120000; 537 538 /* Invalid data length */ 539 memset(&rsp, 0, sizeof(rsp)); 540 req.length = sizeof(connect_data) - 1; 541 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 542 rc = nvmf_ctrlr_cmd_connect(&req); 543 poll_threads(); 544 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 545 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 546 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 547 CU_ASSERT(qpair.ctrlr == NULL); 548 req.length = sizeof(connect_data); 549 550 /* Invalid recfmt */ 551 memset(&rsp, 0, sizeof(rsp)); 552 cmd.connect_cmd.recfmt = 1234; 553 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 554 rc = nvmf_ctrlr_cmd_connect(&req); 555 poll_threads(); 556 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 557 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 558 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 559 CU_ASSERT(qpair.ctrlr == NULL); 560 cmd.connect_cmd.recfmt = 0; 561 562 /* Subsystem not found */ 563 memset(&rsp, 0, sizeof(rsp)); 564 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 565 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 566 rc = nvmf_ctrlr_cmd_connect(&req); 567 poll_threads(); 568 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 569 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 570 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 571 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 572 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 573 CU_ASSERT(qpair.ctrlr == NULL); 574 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 575 576 /* Unterminated hostnqn */ 577 memset(&rsp, 0, sizeof(rsp)); 578 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 579 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 580 rc = nvmf_ctrlr_cmd_connect(&req); 581 poll_threads(); 582 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 583 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 584 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 585 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 586 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 587 CU_ASSERT(qpair.ctrlr == NULL); 588 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 589 590 /* Host not allowed */ 591 memset(&rsp, 0, sizeof(rsp)); 592 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 593 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 594 rc = nvmf_ctrlr_cmd_connect(&req); 595 poll_threads(); 596 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 597 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 598 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 599 CU_ASSERT(qpair.ctrlr == NULL); 600 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 601 602 /* Invalid sqsize == 0 */ 603 memset(&rsp, 0, sizeof(rsp)); 604 cmd.connect_cmd.sqsize = 0; 605 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 606 rc = nvmf_ctrlr_cmd_connect(&req); 607 poll_threads(); 608 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 609 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 610 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 611 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 612 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 613 CU_ASSERT(qpair.ctrlr == NULL); 614 cmd.connect_cmd.sqsize = 31; 615 616 /* Invalid admin sqsize > max_aq_depth */ 617 memset(&rsp, 0, sizeof(rsp)); 618 cmd.connect_cmd.sqsize = 32; 619 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 620 rc = nvmf_ctrlr_cmd_connect(&req); 621 poll_threads(); 622 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 623 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 624 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 625 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 626 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 627 CU_ASSERT(qpair.ctrlr == NULL); 628 cmd.connect_cmd.sqsize = 31; 629 630 /* Invalid I/O sqsize > max_queue_depth */ 631 memset(&rsp, 0, sizeof(rsp)); 632 cmd.connect_cmd.qid = 1; 633 cmd.connect_cmd.sqsize = 64; 634 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 635 rc = nvmf_ctrlr_cmd_connect(&req); 636 poll_threads(); 637 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 638 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 639 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 640 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 641 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 642 CU_ASSERT(qpair.ctrlr == NULL); 643 cmd.connect_cmd.qid = 0; 644 cmd.connect_cmd.sqsize = 31; 645 646 /* Invalid cntlid for admin queue */ 647 memset(&rsp, 0, sizeof(rsp)); 648 connect_data.cntlid = 0x1234; 649 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 650 rc = nvmf_ctrlr_cmd_connect(&req); 651 poll_threads(); 652 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 653 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 654 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 655 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 656 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 657 CU_ASSERT(qpair.ctrlr == NULL); 658 connect_data.cntlid = 0xFFFF; 659 660 ctrlr.admin_qpair = &admin_qpair; 661 ctrlr.subsys = &subsystem; 662 663 /* Valid I/O queue connect command */ 664 memset(&rsp, 0, sizeof(rsp)); 665 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 666 cmd.connect_cmd.qid = 1; 667 cmd.connect_cmd.sqsize = 63; 668 sgroups[subsystem.id].mgmt_io_outstanding++; 669 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 670 rc = nvmf_ctrlr_cmd_connect(&req); 671 poll_threads(); 672 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 673 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 674 CU_ASSERT(qpair.ctrlr == &ctrlr); 675 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 676 qpair.ctrlr = NULL; 677 cmd.connect_cmd.sqsize = 31; 678 679 /* Non-existent controller */ 680 memset(&rsp, 0, sizeof(rsp)); 681 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 682 sgroups[subsystem.id].mgmt_io_outstanding++; 683 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 684 rc = nvmf_ctrlr_cmd_connect(&req); 685 poll_threads(); 686 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 687 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 688 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 689 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 690 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 691 CU_ASSERT(qpair.ctrlr == NULL); 692 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 693 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 694 695 /* I/O connect to discovery controller */ 696 memset(&rsp, 0, sizeof(rsp)); 697 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 698 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 699 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 700 sgroups[subsystem.id].mgmt_io_outstanding++; 701 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 702 rc = nvmf_ctrlr_cmd_connect(&req); 703 poll_threads(); 704 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 705 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 706 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 707 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 708 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 709 CU_ASSERT(qpair.ctrlr == NULL); 710 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 711 712 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 713 cmd.connect_cmd.qid = 0; 714 cmd.connect_cmd.kato = 120000; 715 memset(&rsp, 0, sizeof(rsp)); 716 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 717 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 718 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 719 sgroups[subsystem.id].mgmt_io_outstanding++; 720 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 721 rc = nvmf_ctrlr_cmd_connect(&req); 722 poll_threads(); 723 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 724 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 725 CU_ASSERT(qpair.ctrlr != NULL); 726 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 727 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 728 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 729 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 730 free(qpair.ctrlr->visible_ns); 731 free(qpair.ctrlr); 732 qpair.ctrlr = NULL; 733 734 /* I/O connect to discovery controller with keep-alive-timeout == 0. 735 * Then, a fixed timeout value is set to keep-alive-timeout. 736 */ 737 cmd.connect_cmd.kato = 0; 738 memset(&rsp, 0, sizeof(rsp)); 739 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 740 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 741 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 742 sgroups[subsystem.id].mgmt_io_outstanding++; 743 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 744 rc = nvmf_ctrlr_cmd_connect(&req); 745 poll_threads(); 746 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 747 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 748 CU_ASSERT(qpair.ctrlr != NULL); 749 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 750 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 751 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 752 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 753 free(qpair.ctrlr->visible_ns); 754 free(qpair.ctrlr); 755 qpair.ctrlr = NULL; 756 cmd.connect_cmd.qid = 1; 757 cmd.connect_cmd.kato = 120000; 758 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 759 MOCK_SET(spdk_nvmf_subsystem_is_discovery, false); 760 761 /* I/O connect to disabled controller */ 762 memset(&rsp, 0, sizeof(rsp)); 763 ctrlr.vcprop.cc.bits.en = 0; 764 sgroups[subsystem.id].mgmt_io_outstanding++; 765 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 766 rc = nvmf_ctrlr_cmd_connect(&req); 767 poll_threads(); 768 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 769 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 770 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 771 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 772 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 773 CU_ASSERT(qpair.ctrlr == NULL); 774 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 775 ctrlr.vcprop.cc.bits.en = 1; 776 777 /* I/O connect with invalid IOSQES */ 778 memset(&rsp, 0, sizeof(rsp)); 779 ctrlr.vcprop.cc.bits.iosqes = 3; 780 sgroups[subsystem.id].mgmt_io_outstanding++; 781 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 782 rc = nvmf_ctrlr_cmd_connect(&req); 783 poll_threads(); 784 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 785 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 786 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 787 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 788 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 789 CU_ASSERT(qpair.ctrlr == NULL); 790 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 791 ctrlr.vcprop.cc.bits.iosqes = 6; 792 793 /* I/O connect with invalid IOCQES */ 794 memset(&rsp, 0, sizeof(rsp)); 795 ctrlr.vcprop.cc.bits.iocqes = 3; 796 sgroups[subsystem.id].mgmt_io_outstanding++; 797 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 798 rc = nvmf_ctrlr_cmd_connect(&req); 799 poll_threads(); 800 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 801 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 802 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 803 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 804 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 805 CU_ASSERT(qpair.ctrlr == NULL); 806 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 807 ctrlr.vcprop.cc.bits.iocqes = 4; 808 809 /* I/O connect with qid that is too large */ 810 memset(&rsp, 0, sizeof(rsp)); 811 cmd.connect_cmd.qid = 3; 812 sgroups[subsystem.id].mgmt_io_outstanding++; 813 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 814 rc = nvmf_ctrlr_cmd_connect(&req); 815 poll_threads(); 816 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 817 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 818 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 819 CU_ASSERT(qpair.ctrlr == NULL); 820 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 821 822 /* I/O connect with duplicate queue ID */ 823 memset(&rsp, 0, sizeof(rsp)); 824 spdk_bit_array_set(ctrlr.qpair_mask, 1); 825 cmd.connect_cmd.qid = 1; 826 sgroups[subsystem.id].mgmt_io_outstanding++; 827 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 828 rc = nvmf_ctrlr_cmd_connect(&req); 829 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 830 poll_threads(); 831 /* First time, it will detect duplicate QID and schedule a retry. So for 832 * now we should expect the response to still be all zeroes. 833 */ 834 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 835 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 836 837 /* Now advance the clock, so that the retry poller executes. */ 838 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 839 poll_threads(); 840 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 841 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 842 CU_ASSERT(qpair.ctrlr == NULL); 843 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 844 845 /* I/O connect with temporarily duplicate queue ID. This covers race 846 * where qpair_mask bit may not yet be cleared, even though initiator 847 * has closed the connection. See issue #2955. */ 848 memset(&rsp, 0, sizeof(rsp)); 849 sgroups[subsystem.id].mgmt_io_outstanding++; 850 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 851 rc = nvmf_ctrlr_cmd_connect(&req); 852 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 853 poll_threads(); 854 /* First time, it will detect duplicate QID and schedule a retry. So for 855 * now we should expect the response to still be all zeroes. 856 */ 857 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 858 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 859 860 /* Now advance the clock, so that the retry poller executes. */ 861 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 862 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 863 poll_threads(); 864 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 865 CU_ASSERT(qpair.ctrlr == &ctrlr); 866 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 867 qpair.ctrlr = NULL; 868 869 /* I/O connect when admin qpair is being destroyed */ 870 admin_qpair.group = NULL; 871 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 872 memset(&rsp, 0, sizeof(rsp)); 873 sgroups[subsystem.id].mgmt_io_outstanding++; 874 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 875 rc = nvmf_ctrlr_cmd_connect(&req); 876 poll_threads(); 877 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 878 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 879 CU_ASSERT(qpair.ctrlr == NULL); 880 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 881 admin_qpair.group = &group; 882 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 883 884 /* I/O connect when admin qpair was destroyed */ 885 ctrlr.admin_qpair = NULL; 886 memset(&rsp, 0, sizeof(rsp)); 887 sgroups[subsystem.id].mgmt_io_outstanding++; 888 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 889 rc = nvmf_ctrlr_cmd_connect(&req); 890 poll_threads(); 891 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 892 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 893 CU_ASSERT(qpair.ctrlr == NULL); 894 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 895 ctrlr.admin_qpair = &admin_qpair; 896 897 /* Clean up globals */ 898 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 899 MOCK_CLEAR(spdk_nvmf_poll_group_create); 900 901 spdk_bit_array_free(&ctrlr.qpair_mask); 902 free(sgroups); 903 } 904 905 static void 906 test_get_ns_id_desc_list(void) 907 { 908 struct spdk_nvmf_subsystem subsystem; 909 struct spdk_nvmf_qpair qpair; 910 struct spdk_nvmf_ctrlr ctrlr; 911 struct spdk_nvmf_request req; 912 struct spdk_nvmf_ns *ns_ptrs[1]; 913 struct spdk_nvmf_ns ns; 914 union nvmf_h2c_msg cmd; 915 union nvmf_c2h_msg rsp; 916 struct spdk_bdev bdev; 917 uint8_t buf[4096]; 918 919 memset(&subsystem, 0, sizeof(subsystem)); 920 ns_ptrs[0] = &ns; 921 subsystem.ns = ns_ptrs; 922 subsystem.max_nsid = 1; 923 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 924 925 memset(&ns, 0, sizeof(ns)); 926 ns.opts.nsid = 1; 927 ns.bdev = &bdev; 928 929 memset(&qpair, 0, sizeof(qpair)); 930 qpair.ctrlr = &ctrlr; 931 932 memset(&ctrlr, 0, sizeof(ctrlr)); 933 ctrlr.subsys = &subsystem; 934 ctrlr.vcprop.cc.bits.en = 1; 935 ctrlr.thread = spdk_get_thread(); 936 ctrlr.visible_ns = spdk_bit_array_create(1); 937 938 memset(&req, 0, sizeof(req)); 939 req.qpair = &qpair; 940 req.cmd = &cmd; 941 req.rsp = &rsp; 942 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 943 req.length = sizeof(buf); 944 SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length); 945 946 memset(&cmd, 0, sizeof(cmd)); 947 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 948 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 949 950 /* Invalid NSID */ 951 cmd.nvme_cmd.nsid = 0; 952 memset(&rsp, 0, sizeof(rsp)); 953 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 954 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 955 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 956 957 /* Valid NSID, but ns is inactive */ 958 spdk_bit_array_clear(ctrlr.visible_ns, 0); 959 cmd.nvme_cmd.nsid = 1; 960 memset(&rsp, 0, sizeof(rsp)); 961 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 962 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 963 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 964 965 /* Valid NSID, but ns has no IDs defined */ 966 spdk_bit_array_set(ctrlr.visible_ns, 0); 967 cmd.nvme_cmd.nsid = 1; 968 memset(&rsp, 0, sizeof(rsp)); 969 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 970 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 971 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 972 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 973 974 /* Valid NSID, only EUI64 defined */ 975 ns.opts.eui64[0] = 0x11; 976 ns.opts.eui64[7] = 0xFF; 977 memset(&rsp, 0, sizeof(rsp)); 978 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 979 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 980 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 981 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 982 CU_ASSERT(buf[1] == 8); 983 CU_ASSERT(buf[4] == 0x11); 984 CU_ASSERT(buf[11] == 0xFF); 985 CU_ASSERT(buf[13] == 0); 986 987 /* Valid NSID, only NGUID defined */ 988 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 989 ns.opts.nguid[0] = 0x22; 990 ns.opts.nguid[15] = 0xEE; 991 memset(&rsp, 0, sizeof(rsp)); 992 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 993 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 994 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 995 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 996 CU_ASSERT(buf[1] == 16); 997 CU_ASSERT(buf[4] == 0x22); 998 CU_ASSERT(buf[19] == 0xEE); 999 CU_ASSERT(buf[21] == 0); 1000 1001 /* Valid NSID, both EUI64 and NGUID defined */ 1002 ns.opts.eui64[0] = 0x11; 1003 ns.opts.eui64[7] = 0xFF; 1004 ns.opts.nguid[0] = 0x22; 1005 ns.opts.nguid[15] = 0xEE; 1006 memset(&rsp, 0, sizeof(rsp)); 1007 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1008 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1009 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1010 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1011 CU_ASSERT(buf[1] == 8); 1012 CU_ASSERT(buf[4] == 0x11); 1013 CU_ASSERT(buf[11] == 0xFF); 1014 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 1015 CU_ASSERT(buf[13] == 16); 1016 CU_ASSERT(buf[16] == 0x22); 1017 CU_ASSERT(buf[31] == 0xEE); 1018 CU_ASSERT(buf[33] == 0); 1019 1020 /* Valid NSID, EUI64, NGUID, and UUID defined */ 1021 ns.opts.eui64[0] = 0x11; 1022 ns.opts.eui64[7] = 0xFF; 1023 ns.opts.nguid[0] = 0x22; 1024 ns.opts.nguid[15] = 0xEE; 1025 ns.opts.uuid.u.raw[0] = 0x33; 1026 ns.opts.uuid.u.raw[15] = 0xDD; 1027 memset(&rsp, 0, sizeof(rsp)); 1028 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1029 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1030 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1031 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1032 CU_ASSERT(buf[1] == 8); 1033 CU_ASSERT(buf[4] == 0x11); 1034 CU_ASSERT(buf[11] == 0xFF); 1035 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 1036 CU_ASSERT(buf[13] == 16); 1037 CU_ASSERT(buf[16] == 0x22); 1038 CU_ASSERT(buf[31] == 0xEE); 1039 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 1040 CU_ASSERT(buf[33] == 16); 1041 CU_ASSERT(buf[36] == 0x33); 1042 CU_ASSERT(buf[51] == 0xDD); 1043 CU_ASSERT(buf[53] == 0); 1044 1045 spdk_bit_array_free(&ctrlr.visible_ns); 1046 } 1047 1048 static void 1049 test_identify_ns(void) 1050 { 1051 struct spdk_nvmf_subsystem subsystem = {}; 1052 struct spdk_nvmf_transport transport = {}; 1053 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1054 struct spdk_nvmf_ctrlr ctrlr = { 1055 .subsys = &subsystem, 1056 .admin_qpair = &admin_qpair, 1057 }; 1058 struct spdk_nvme_cmd cmd = {}; 1059 struct spdk_nvme_cpl rsp = {}; 1060 struct spdk_nvme_ns_data nsdata = {}; 1061 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 1062 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 1063 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1064 1065 ctrlr.visible_ns = spdk_bit_array_create(3); 1066 spdk_bit_array_set(ctrlr.visible_ns, 0); 1067 spdk_bit_array_set(ctrlr.visible_ns, 2); 1068 1069 subsystem.ns = ns_arr; 1070 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1071 1072 /* Invalid NSID 0 */ 1073 cmd.nsid = 0; 1074 memset(&nsdata, 0, sizeof(nsdata)); 1075 memset(&rsp, 0, sizeof(rsp)); 1076 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1077 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1078 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1079 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1080 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1081 1082 /* Valid NSID 1 */ 1083 cmd.nsid = 1; 1084 memset(&nsdata, 0, sizeof(nsdata)); 1085 memset(&rsp, 0, sizeof(rsp)); 1086 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1087 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1088 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1089 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1090 CU_ASSERT(nsdata.nsze == 1234); 1091 1092 /* Valid but inactive NSID 1 */ 1093 spdk_bit_array_clear(ctrlr.visible_ns, 0); 1094 cmd.nsid = 1; 1095 memset(&nsdata, 0, sizeof(nsdata)); 1096 memset(&rsp, 0, sizeof(rsp)); 1097 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1098 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1099 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1100 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1101 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1102 1103 /* Valid but unallocated NSID 2 */ 1104 cmd.nsid = 2; 1105 memset(&nsdata, 0, sizeof(nsdata)); 1106 memset(&rsp, 0, sizeof(rsp)); 1107 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1108 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1109 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1110 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1111 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1112 1113 /* Valid NSID 3 */ 1114 cmd.nsid = 3; 1115 memset(&nsdata, 0, sizeof(nsdata)); 1116 memset(&rsp, 0, sizeof(rsp)); 1117 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1118 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1119 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1120 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1121 CU_ASSERT(nsdata.nsze == 5678); 1122 1123 /* Invalid NSID 4 */ 1124 cmd.nsid = 4; 1125 memset(&nsdata, 0, sizeof(nsdata)); 1126 memset(&rsp, 0, sizeof(rsp)); 1127 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1128 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1129 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1130 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1131 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1132 1133 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 1134 cmd.nsid = 0xFFFFFFFF; 1135 memset(&nsdata, 0, sizeof(nsdata)); 1136 memset(&rsp, 0, sizeof(rsp)); 1137 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1138 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1139 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1140 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1141 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1142 1143 spdk_bit_array_free(&ctrlr.visible_ns); 1144 } 1145 1146 static void 1147 test_identify_ns_iocs_specific(void) 1148 { 1149 struct spdk_nvmf_subsystem subsystem = {}; 1150 struct spdk_nvmf_transport transport = {}; 1151 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport }; 1152 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1153 struct spdk_nvme_cmd cmd = {}; 1154 struct spdk_nvme_cpl rsp = {}; 1155 struct spdk_nvme_zns_ns_data nsdata = {}; 1156 struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}}; 1157 struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}}; 1158 struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]}; 1159 1160 ctrlr.visible_ns = spdk_bit_array_create(3); 1161 spdk_bit_array_set(ctrlr.visible_ns, 0); 1162 spdk_bit_array_set(ctrlr.visible_ns, 1); 1163 spdk_bit_array_set(ctrlr.visible_ns, 2); 1164 subsystem.ns = ns_arr; 1165 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1166 1167 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1168 1169 /* Invalid ZNS NSID 0 */ 1170 cmd.nsid = 0; 1171 memset(&nsdata, 0xFF, sizeof(nsdata)); 1172 memset(&rsp, 0, sizeof(rsp)); 1173 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1174 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1175 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1176 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1177 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1178 1179 /* Valid ZNS NSID 1 */ 1180 cmd.nsid = 1; 1181 memset(&nsdata, 0xFF, sizeof(nsdata)); 1182 memset(&rsp, 0, sizeof(rsp)); 1183 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1184 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1185 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1186 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1187 CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1); 1188 CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1); 1189 CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1); 1190 CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE); 1191 nsdata.ozcs.read_across_zone_boundaries = 0; 1192 nsdata.mar = 0; 1193 nsdata.mor = 0; 1194 nsdata.lbafe[0].zsze = 0; 1195 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1196 1197 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1198 1199 /* Valid NVM NSID 2 */ 1200 cmd.nsid = 2; 1201 memset(&nsdata, 0xFF, sizeof(nsdata)); 1202 memset(&rsp, 0, sizeof(rsp)); 1203 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1204 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1205 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1206 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1207 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1208 1209 /* Invalid NVM NSID 3 */ 1210 cmd.nsid = 0; 1211 memset(&nsdata, 0xFF, sizeof(nsdata)); 1212 memset(&rsp, 0, sizeof(rsp)); 1213 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1214 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1215 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1216 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1217 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1218 1219 spdk_bit_array_free(&ctrlr.visible_ns); 1220 } 1221 1222 static void 1223 test_set_get_features(void) 1224 { 1225 struct spdk_nvmf_subsystem subsystem = {}; 1226 struct spdk_nvmf_qpair admin_qpair = {}; 1227 enum spdk_nvme_ana_state ana_state[3]; 1228 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1229 struct spdk_nvmf_ctrlr ctrlr = { 1230 .subsys = &subsystem, 1231 .admin_qpair = &admin_qpair, 1232 .listener = &listener 1233 }; 1234 union nvmf_h2c_msg cmd = {}; 1235 union nvmf_c2h_msg rsp = {}; 1236 struct spdk_nvmf_ns ns[3]; 1237 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1238 struct spdk_nvmf_request req; 1239 int rc; 1240 1241 ctrlr.visible_ns = spdk_bit_array_create(3); 1242 spdk_bit_array_set(ctrlr.visible_ns, 0); 1243 spdk_bit_array_set(ctrlr.visible_ns, 2); 1244 ns[0].anagrpid = 1; 1245 ns[2].anagrpid = 3; 1246 subsystem.ns = ns_arr; 1247 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1248 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1249 listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1250 admin_qpair.ctrlr = &ctrlr; 1251 req.qpair = &admin_qpair; 1252 cmd.nvme_cmd.nsid = 1; 1253 req.cmd = &cmd; 1254 req.rsp = &rsp; 1255 1256 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1257 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1258 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1259 ns[0].ptpl_file = "testcfg"; 1260 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1261 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1262 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1263 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1264 CU_ASSERT(ns[0].ptpl_activated == true); 1265 1266 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1267 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1268 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1269 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1270 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1271 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1272 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1273 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1274 1275 1276 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1277 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1278 cmd.nvme_cmd.cdw11 = 0x42; 1279 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1280 1281 rc = nvmf_ctrlr_get_features(&req); 1282 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1283 1284 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1285 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1286 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1287 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1288 1289 rc = nvmf_ctrlr_get_features(&req); 1290 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1291 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1292 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1293 1294 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1295 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1296 cmd.nvme_cmd.cdw11 = 0x42; 1297 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1298 1299 rc = nvmf_ctrlr_set_features(&req); 1300 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1301 1302 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1303 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1304 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1305 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1306 1307 rc = nvmf_ctrlr_set_features(&req); 1308 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1309 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1310 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1311 1312 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1313 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1314 cmd.nvme_cmd.cdw11 = 0x42; 1315 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1316 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1317 1318 rc = nvmf_ctrlr_set_features(&req); 1319 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1320 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1321 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1322 1323 1324 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1325 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1326 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1327 1328 rc = nvmf_ctrlr_get_features(&req); 1329 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1330 1331 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1332 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1333 cmd.nvme_cmd.cdw11 = 0x42; 1334 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1335 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1336 1337 rc = nvmf_ctrlr_set_features(&req); 1338 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1339 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1340 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1341 1342 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1343 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1344 cmd.nvme_cmd.cdw11 = 0x42; 1345 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1346 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1347 1348 rc = nvmf_ctrlr_set_features(&req); 1349 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1350 1351 spdk_bit_array_free(&ctrlr.visible_ns); 1352 } 1353 1354 /* 1355 * Reservation Unit Test Configuration 1356 * -------- -------- -------- 1357 * | Host A | | Host B | | Host C | 1358 * -------- -------- -------- 1359 * / \ | | 1360 * -------- -------- ------- ------- 1361 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1362 * -------- -------- ------- ------- 1363 * \ \ / / 1364 * \ \ / / 1365 * \ \ / / 1366 * -------------------------------------- 1367 * | NAMESPACE 1 | 1368 * -------------------------------------- 1369 */ 1370 1371 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1372 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1373 1374 static void 1375 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1376 { 1377 /* Host A has two controllers */ 1378 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1379 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1380 1381 /* Host B has 1 controller */ 1382 spdk_uuid_generate(&g_ctrlr_B.hostid); 1383 1384 /* Host C has 1 controller */ 1385 spdk_uuid_generate(&g_ctrlr_C.hostid); 1386 1387 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1388 g_ns_info.rtype = rtype; 1389 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1390 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1391 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1392 } 1393 1394 static void 1395 test_reservation_write_exclusive(void) 1396 { 1397 struct spdk_nvmf_request req = {}; 1398 union nvmf_h2c_msg cmd = {}; 1399 union nvmf_c2h_msg rsp = {}; 1400 int rc; 1401 1402 req.cmd = &cmd; 1403 req.rsp = &rsp; 1404 1405 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1406 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1407 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1408 1409 /* Test Case: Issue a Read command from Host A and Host B */ 1410 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1411 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1412 SPDK_CU_ASSERT_FATAL(rc == 0); 1413 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1414 SPDK_CU_ASSERT_FATAL(rc == 0); 1415 1416 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1417 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1418 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1419 SPDK_CU_ASSERT_FATAL(rc == 0); 1420 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1421 SPDK_CU_ASSERT_FATAL(rc < 0); 1422 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1423 1424 /* Test Case: Issue a Write command from Host C */ 1425 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1426 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1427 SPDK_CU_ASSERT_FATAL(rc < 0); 1428 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1429 1430 /* Test Case: Issue a Read command from Host B */ 1431 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1432 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1433 SPDK_CU_ASSERT_FATAL(rc == 0); 1434 1435 /* Unregister Host C */ 1436 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1437 1438 /* Test Case: Read and Write commands from non-registrant Host C */ 1439 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1440 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1441 SPDK_CU_ASSERT_FATAL(rc < 0); 1442 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1443 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1444 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1445 SPDK_CU_ASSERT_FATAL(rc == 0); 1446 } 1447 1448 static void 1449 test_reservation_exclusive_access(void) 1450 { 1451 struct spdk_nvmf_request req = {}; 1452 union nvmf_h2c_msg cmd = {}; 1453 union nvmf_c2h_msg rsp = {}; 1454 int rc; 1455 1456 req.cmd = &cmd; 1457 req.rsp = &rsp; 1458 1459 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1460 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1461 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1462 1463 /* Test Case: Issue a Read command from Host B */ 1464 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1465 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1466 SPDK_CU_ASSERT_FATAL(rc < 0); 1467 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1468 1469 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1470 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1471 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1472 SPDK_CU_ASSERT_FATAL(rc == 0); 1473 } 1474 1475 static void 1476 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1477 { 1478 struct spdk_nvmf_request req = {}; 1479 union nvmf_h2c_msg cmd = {}; 1480 union nvmf_c2h_msg rsp = {}; 1481 int rc; 1482 1483 req.cmd = &cmd; 1484 req.rsp = &rsp; 1485 1486 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1487 ut_reservation_init(rtype); 1488 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1489 1490 /* Test Case: Issue a Read command from Host A and Host C */ 1491 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1492 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1493 SPDK_CU_ASSERT_FATAL(rc == 0); 1494 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1495 SPDK_CU_ASSERT_FATAL(rc == 0); 1496 1497 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1498 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1499 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1500 SPDK_CU_ASSERT_FATAL(rc == 0); 1501 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1502 SPDK_CU_ASSERT_FATAL(rc == 0); 1503 1504 /* Unregister Host C */ 1505 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1506 1507 /* Test Case: Read and Write commands from non-registrant Host C */ 1508 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1509 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1510 SPDK_CU_ASSERT_FATAL(rc == 0); 1511 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1512 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1513 SPDK_CU_ASSERT_FATAL(rc < 0); 1514 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1515 } 1516 1517 static void 1518 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1519 { 1520 _test_reservation_write_exclusive_regs_only_and_all_regs( 1521 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1522 _test_reservation_write_exclusive_regs_only_and_all_regs( 1523 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1524 } 1525 1526 static void 1527 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1528 { 1529 struct spdk_nvmf_request req = {}; 1530 union nvmf_h2c_msg cmd = {}; 1531 union nvmf_c2h_msg rsp = {}; 1532 int rc; 1533 1534 req.cmd = &cmd; 1535 req.rsp = &rsp; 1536 1537 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1538 ut_reservation_init(rtype); 1539 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1540 1541 /* Test Case: Issue a Write command from Host B */ 1542 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1543 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1544 SPDK_CU_ASSERT_FATAL(rc == 0); 1545 1546 /* Unregister Host B */ 1547 spdk_uuid_set_null(&g_ns_info.reg_hostid[1]); 1548 1549 /* Test Case: Issue a Read command from Host B */ 1550 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1551 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1552 SPDK_CU_ASSERT_FATAL(rc < 0); 1553 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1554 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1555 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1556 SPDK_CU_ASSERT_FATAL(rc < 0); 1557 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1558 } 1559 1560 static void 1561 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1562 { 1563 _test_reservation_exclusive_access_regs_only_and_all_regs( 1564 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1565 _test_reservation_exclusive_access_regs_only_and_all_regs( 1566 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1567 } 1568 1569 static void 1570 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1571 { 1572 STAILQ_INIT(&ctrlr->async_events); 1573 } 1574 1575 static void 1576 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1577 { 1578 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1579 1580 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1581 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1582 free(event); 1583 } 1584 } 1585 1586 static int 1587 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1588 { 1589 int num = 0; 1590 struct spdk_nvmf_async_event_completion *event; 1591 1592 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1593 num++; 1594 } 1595 return num; 1596 } 1597 1598 static void 1599 test_reservation_notification_log_page(void) 1600 { 1601 struct spdk_nvmf_ctrlr ctrlr; 1602 struct spdk_nvmf_qpair qpair; 1603 struct spdk_nvmf_ns ns; 1604 struct spdk_nvmf_request req = {}; 1605 union nvmf_h2c_msg cmd = {}; 1606 union nvmf_c2h_msg rsp = {}; 1607 union spdk_nvme_async_event_completion event = {}; 1608 struct spdk_nvme_reservation_notification_log logs[3]; 1609 struct iovec iov; 1610 1611 memset(&ctrlr, 0, sizeof(ctrlr)); 1612 ctrlr.thread = spdk_get_thread(); 1613 TAILQ_INIT(&ctrlr.log_head); 1614 init_pending_async_events(&ctrlr); 1615 ns.nsid = 1; 1616 1617 /* Test Case: Mask all the reservation notifications */ 1618 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1619 SPDK_NVME_RESERVATION_RELEASED_MASK | 1620 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1621 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1622 SPDK_NVME_REGISTRATION_PREEMPTED); 1623 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1624 SPDK_NVME_RESERVATION_RELEASED); 1625 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1626 SPDK_NVME_RESERVATION_PREEMPTED); 1627 poll_threads(); 1628 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1629 1630 /* Test Case: Unmask all the reservation notifications, 1631 * 3 log pages are generated, and AER was triggered. 1632 */ 1633 ns.mask = 0; 1634 ctrlr.num_avail_log_pages = 0; 1635 req.cmd = &cmd; 1636 req.rsp = &rsp; 1637 ctrlr.aer_req[0] = &req; 1638 ctrlr.nr_aer_reqs = 1; 1639 req.qpair = &qpair; 1640 TAILQ_INIT(&qpair.outstanding); 1641 qpair.ctrlr = NULL; 1642 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1643 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1644 1645 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1646 SPDK_NVME_REGISTRATION_PREEMPTED); 1647 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1648 SPDK_NVME_RESERVATION_RELEASED); 1649 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1650 SPDK_NVME_RESERVATION_PREEMPTED); 1651 poll_threads(); 1652 event.raw = rsp.nvme_cpl.cdw0; 1653 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1654 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1655 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1656 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1657 1658 /* Test Case: Get Log Page to clear the log pages */ 1659 iov.iov_base = &logs[0]; 1660 iov.iov_len = sizeof(logs); 1661 nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0); 1662 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1663 1664 cleanup_pending_async_events(&ctrlr); 1665 } 1666 1667 static void 1668 test_get_dif_ctx(void) 1669 { 1670 struct spdk_nvmf_subsystem subsystem = {}; 1671 struct spdk_nvmf_request req = {}; 1672 struct spdk_nvmf_qpair qpair = {}; 1673 struct spdk_nvmf_ctrlr ctrlr = {}; 1674 struct spdk_nvmf_ns ns = {}; 1675 struct spdk_nvmf_ns *_ns = NULL; 1676 struct spdk_bdev bdev = {}; 1677 union nvmf_h2c_msg cmd = {}; 1678 struct spdk_dif_ctx dif_ctx = {}; 1679 bool ret; 1680 1681 ctrlr.subsys = &subsystem; 1682 ctrlr.visible_ns = spdk_bit_array_create(1); 1683 spdk_bit_array_set(ctrlr.visible_ns, 0); 1684 1685 qpair.ctrlr = &ctrlr; 1686 1687 req.qpair = &qpair; 1688 req.cmd = &cmd; 1689 1690 ns.bdev = &bdev; 1691 1692 ctrlr.dif_insert_or_strip = false; 1693 1694 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1695 CU_ASSERT(ret == false); 1696 1697 ctrlr.dif_insert_or_strip = true; 1698 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1699 1700 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1701 CU_ASSERT(ret == false); 1702 1703 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1704 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1705 1706 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1707 CU_ASSERT(ret == false); 1708 1709 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1710 1711 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1712 CU_ASSERT(ret == false); 1713 1714 qpair.qid = 1; 1715 1716 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1717 CU_ASSERT(ret == false); 1718 1719 cmd.nvme_cmd.nsid = 1; 1720 1721 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1722 CU_ASSERT(ret == false); 1723 1724 subsystem.max_nsid = 1; 1725 subsystem.ns = &_ns; 1726 subsystem.ns[0] = &ns; 1727 1728 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1729 CU_ASSERT(ret == false); 1730 1731 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1732 1733 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1734 CU_ASSERT(ret == true); 1735 1736 spdk_bit_array_free(&ctrlr.visible_ns); 1737 } 1738 1739 static void 1740 test_identify_ctrlr(void) 1741 { 1742 struct spdk_nvmf_tgt tgt = {}; 1743 struct spdk_nvmf_subsystem subsystem = { 1744 .subtype = SPDK_NVMF_SUBTYPE_NVME, 1745 .tgt = &tgt, 1746 }; 1747 struct spdk_nvmf_transport_ops tops = {}; 1748 struct spdk_nvmf_transport transport = { 1749 .ops = &tops, 1750 .opts = { 1751 .in_capsule_data_size = 4096, 1752 }, 1753 }; 1754 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1755 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1756 struct spdk_nvme_ctrlr_data cdata = {}; 1757 uint32_t expected_ioccsz; 1758 1759 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1760 1761 /* Check ioccsz, TCP transport */ 1762 tops.type = SPDK_NVME_TRANSPORT_TCP; 1763 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1764 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1765 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1766 1767 /* Check ioccsz, RDMA transport */ 1768 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1769 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1770 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1771 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1772 1773 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1774 tops.type = SPDK_NVME_TRANSPORT_TCP; 1775 ctrlr.dif_insert_or_strip = true; 1776 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1777 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1778 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1779 } 1780 1781 static void 1782 test_identify_ctrlr_iocs_specific(void) 1783 { 1784 struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 }; 1785 struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 }; 1786 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop }; 1787 struct spdk_nvme_cmd cmd = {}; 1788 struct spdk_nvme_cpl rsp = {}; 1789 struct spdk_nvme_zns_ctrlr_data ctrlr_data = {}; 1790 struct spdk_nvme_nvm_ctrlr_data cdata_nvm = {}; 1791 1792 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1793 1794 /* ZNS max_zone_append_size_kib no limit */ 1795 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1796 memset(&rsp, 0, sizeof(rsp)); 1797 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1798 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1799 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1800 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1801 CU_ASSERT(ctrlr_data.zasl == 0); 1802 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1803 1804 /* ZNS max_zone_append_size_kib = 4096 */ 1805 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1806 memset(&rsp, 0, sizeof(rsp)); 1807 subsystem.max_zone_append_size_kib = 4096; 1808 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1809 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1810 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1811 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1812 CU_ASSERT(ctrlr_data.zasl == 0); 1813 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1814 1815 /* ZNS max_zone_append_size_kib = 60000 */ 1816 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1817 memset(&rsp, 0, sizeof(rsp)); 1818 subsystem.max_zone_append_size_kib = 60000; 1819 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1820 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1821 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1822 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1823 CU_ASSERT(ctrlr_data.zasl == 3); 1824 ctrlr_data.zasl = 0; 1825 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1826 1827 /* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */ 1828 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1829 memset(&rsp, 0, sizeof(rsp)); 1830 ctrlr.vcprop.cap.bits.mpsmin = 2; 1831 subsystem.max_zone_append_size_kib = 60000; 1832 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1833 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1834 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1835 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1836 CU_ASSERT(ctrlr_data.zasl == 1); 1837 ctrlr_data.zasl = 0; 1838 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1839 ctrlr.vcprop.cap.bits.mpsmin = 0; 1840 1841 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1842 1843 /* NVM max_discard_size_kib = 1024; 1844 * max_write_zeroes_size_kib = 1024; 1845 * mpsmin = 0; 1846 */ 1847 memset(&cdata_nvm, 0xFF, sizeof(cdata_nvm)); 1848 memset(&rsp, 0, sizeof(rsp)); 1849 subsystem.max_discard_size_kib = (uint64_t)1024; 1850 subsystem.max_write_zeroes_size_kib = (uint64_t)1024; 1851 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1852 &cdata_nvm, sizeof(cdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1853 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1854 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1855 CU_ASSERT(cdata_nvm.wzsl == 8); 1856 CU_ASSERT(cdata_nvm.dmrsl == 2048); 1857 CU_ASSERT(cdata_nvm.dmrl == 1); 1858 } 1859 1860 static int 1861 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1862 { 1863 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1864 1865 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1866 }; 1867 1868 static void 1869 test_custom_admin_cmd(void) 1870 { 1871 struct spdk_nvmf_subsystem subsystem; 1872 struct spdk_nvmf_qpair qpair; 1873 struct spdk_nvmf_ctrlr ctrlr; 1874 struct spdk_nvmf_request req; 1875 struct spdk_nvmf_ns *ns_ptrs[1]; 1876 struct spdk_nvmf_ns ns; 1877 union nvmf_h2c_msg cmd; 1878 union nvmf_c2h_msg rsp; 1879 struct spdk_bdev bdev; 1880 uint8_t buf[4096]; 1881 int rc; 1882 1883 memset(&subsystem, 0, sizeof(subsystem)); 1884 ns_ptrs[0] = &ns; 1885 subsystem.ns = ns_ptrs; 1886 subsystem.max_nsid = 1; 1887 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1888 1889 memset(&ns, 0, sizeof(ns)); 1890 ns.opts.nsid = 1; 1891 ns.bdev = &bdev; 1892 1893 memset(&qpair, 0, sizeof(qpair)); 1894 qpair.ctrlr = &ctrlr; 1895 1896 memset(&ctrlr, 0, sizeof(ctrlr)); 1897 ctrlr.subsys = &subsystem; 1898 ctrlr.vcprop.cc.bits.en = 1; 1899 ctrlr.thread = spdk_get_thread(); 1900 1901 memset(&req, 0, sizeof(req)); 1902 req.qpair = &qpair; 1903 req.cmd = &cmd; 1904 req.rsp = &rsp; 1905 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1906 req.length = sizeof(buf); 1907 SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length); 1908 1909 memset(&cmd, 0, sizeof(cmd)); 1910 cmd.nvme_cmd.opc = 0xc1; 1911 cmd.nvme_cmd.nsid = 0; 1912 memset(&rsp, 0, sizeof(rsp)); 1913 1914 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1915 1916 /* Ensure that our hdlr is being called */ 1917 rc = nvmf_ctrlr_process_admin_cmd(&req); 1918 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1919 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1920 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1921 } 1922 1923 static void 1924 test_fused_compare_and_write(void) 1925 { 1926 struct spdk_nvmf_request req = {}; 1927 struct spdk_nvmf_qpair qpair = {}; 1928 struct spdk_nvme_cmd cmd = {}; 1929 union nvmf_c2h_msg rsp = {}; 1930 struct spdk_nvmf_ctrlr ctrlr = {}; 1931 struct spdk_nvmf_subsystem subsystem = {}; 1932 struct spdk_nvmf_ns ns = {}; 1933 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1934 enum spdk_nvme_ana_state ana_state[1]; 1935 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1936 struct spdk_bdev bdev = {}; 1937 1938 struct spdk_nvmf_poll_group group = {}; 1939 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1940 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1941 struct spdk_io_channel io_ch = {}; 1942 1943 ns.bdev = &bdev; 1944 ns.anagrpid = 1; 1945 1946 subsystem.id = 0; 1947 subsystem.max_nsid = 1; 1948 subsys_ns[0] = &ns; 1949 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1950 1951 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1952 1953 /* Enable controller */ 1954 ctrlr.vcprop.cc.bits.en = 1; 1955 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1956 ctrlr.listener = &listener; 1957 ctrlr.visible_ns = spdk_bit_array_create(1); 1958 spdk_bit_array_set(ctrlr.visible_ns, 0); 1959 1960 group.num_sgroups = 1; 1961 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1962 sgroups.num_ns = 1; 1963 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1964 ns_info.channel = &io_ch; 1965 sgroups.ns_info = &ns_info; 1966 TAILQ_INIT(&sgroups.queued); 1967 group.sgroups = &sgroups; 1968 TAILQ_INIT(&qpair.outstanding); 1969 1970 qpair.ctrlr = &ctrlr; 1971 qpair.group = &group; 1972 qpair.qid = 1; 1973 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1974 1975 cmd.nsid = 1; 1976 1977 req.qpair = &qpair; 1978 req.cmd = (union nvmf_h2c_msg *)&cmd; 1979 req.rsp = &rsp; 1980 1981 /* SUCCESS/SUCCESS */ 1982 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1983 cmd.opc = SPDK_NVME_OPC_COMPARE; 1984 1985 spdk_nvmf_request_exec(&req); 1986 CU_ASSERT(qpair.first_fused_req != NULL); 1987 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1988 1989 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1990 cmd.opc = SPDK_NVME_OPC_WRITE; 1991 1992 spdk_nvmf_request_exec(&req); 1993 CU_ASSERT(qpair.first_fused_req == NULL); 1994 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1995 1996 /* Wrong sequence */ 1997 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1998 cmd.opc = SPDK_NVME_OPC_WRITE; 1999 2000 spdk_nvmf_request_exec(&req); 2001 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 2002 CU_ASSERT(qpair.first_fused_req == NULL); 2003 2004 /* Write as FUSE_FIRST (Wrong op code) */ 2005 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2006 cmd.opc = SPDK_NVME_OPC_WRITE; 2007 2008 spdk_nvmf_request_exec(&req); 2009 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 2010 CU_ASSERT(qpair.first_fused_req == NULL); 2011 2012 /* Compare as FUSE_SECOND (Wrong op code) */ 2013 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2014 cmd.opc = SPDK_NVME_OPC_COMPARE; 2015 2016 spdk_nvmf_request_exec(&req); 2017 CU_ASSERT(qpair.first_fused_req != NULL); 2018 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2019 2020 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2021 cmd.opc = SPDK_NVME_OPC_COMPARE; 2022 2023 spdk_nvmf_request_exec(&req); 2024 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 2025 CU_ASSERT(qpair.first_fused_req == NULL); 2026 2027 spdk_bit_array_free(&ctrlr.visible_ns); 2028 } 2029 2030 static void 2031 test_multi_async_event_reqs(void) 2032 { 2033 struct spdk_nvmf_subsystem subsystem = {}; 2034 struct spdk_nvmf_qpair qpair = {}; 2035 struct spdk_nvmf_ctrlr ctrlr = {}; 2036 struct spdk_nvmf_request req[5] = {}; 2037 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2038 struct spdk_nvmf_ns ns = {}; 2039 union nvmf_h2c_msg cmd[5] = {}; 2040 union nvmf_c2h_msg rsp[5] = {}; 2041 2042 struct spdk_nvmf_poll_group group = {}; 2043 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2044 2045 int i; 2046 2047 ns_ptrs[0] = &ns; 2048 subsystem.ns = ns_ptrs; 2049 subsystem.max_nsid = 1; 2050 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2051 2052 ns.opts.nsid = 1; 2053 group.sgroups = &sgroups; 2054 2055 qpair.ctrlr = &ctrlr; 2056 qpair.group = &group; 2057 TAILQ_INIT(&qpair.outstanding); 2058 2059 ctrlr.subsys = &subsystem; 2060 ctrlr.vcprop.cc.bits.en = 1; 2061 ctrlr.thread = spdk_get_thread(); 2062 2063 for (i = 0; i < 5; i++) { 2064 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2065 cmd[i].nvme_cmd.nsid = 1; 2066 cmd[i].nvme_cmd.cid = i; 2067 2068 req[i].qpair = &qpair; 2069 req[i].cmd = &cmd[i]; 2070 req[i].rsp = &rsp[i]; 2071 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2072 } 2073 2074 /* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */ 2075 sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS; 2076 for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) { 2077 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2078 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 2079 } 2080 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2081 2082 /* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */ 2083 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2084 CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS); 2085 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 2086 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 2087 2088 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 2089 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 2090 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2091 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2092 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 2093 2094 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 2095 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2096 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2097 CU_ASSERT(ctrlr.aer_req[2] == NULL); 2098 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 2099 2100 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 2101 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 2102 } 2103 2104 static void 2105 test_get_ana_log_page_one_ns_per_anagrp(void) 2106 { 2107 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 2108 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 2109 uint32_t ana_group[3]; 2110 struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group }; 2111 struct spdk_nvmf_ctrlr ctrlr = {}; 2112 enum spdk_nvme_ana_state ana_state[3]; 2113 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2114 struct spdk_nvmf_ns ns[3]; 2115 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 2116 uint64_t offset; 2117 uint32_t length; 2118 int i; 2119 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2120 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2121 struct iovec iov, iovs[2]; 2122 struct spdk_nvme_ana_page *ana_hdr; 2123 char _ana_desc[UT_ANA_DESC_SIZE]; 2124 struct spdk_nvme_ana_group_descriptor *ana_desc; 2125 2126 subsystem.ns = ns_arr; 2127 subsystem.max_nsid = 3; 2128 for (i = 0; i < 3; i++) { 2129 subsystem.ana_group[i] = 1; 2130 } 2131 ctrlr.subsys = &subsystem; 2132 ctrlr.listener = &listener; 2133 2134 for (i = 0; i < 3; i++) { 2135 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2136 } 2137 2138 for (i = 0; i < 3; i++) { 2139 ns_arr[i]->nsid = i + 1; 2140 ns_arr[i]->anagrpid = i + 1; 2141 } 2142 2143 /* create expected page */ 2144 ana_hdr = (void *)&expected_page[0]; 2145 ana_hdr->num_ana_group_desc = 3; 2146 ana_hdr->change_count = 0; 2147 2148 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2149 ana_desc = (void *)_ana_desc; 2150 offset = sizeof(struct spdk_nvme_ana_page); 2151 2152 for (i = 0; i < 3; i++) { 2153 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 2154 ana_desc->ana_group_id = ns_arr[i]->nsid; 2155 ana_desc->num_of_nsid = 1; 2156 ana_desc->change_count = 0; 2157 ana_desc->ana_state = ctrlr.listener->ana_state[i]; 2158 ana_desc->nsid[0] = ns_arr[i]->nsid; 2159 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 2160 offset += UT_ANA_DESC_SIZE; 2161 } 2162 2163 /* read entire actual log page */ 2164 offset = 0; 2165 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2166 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2167 iov.iov_base = &actual_page[offset]; 2168 iov.iov_len = length; 2169 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2170 offset += length; 2171 } 2172 2173 /* compare expected page and actual page */ 2174 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2175 2176 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2177 offset = 0; 2178 iovs[0].iov_base = &actual_page[offset]; 2179 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2180 offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2181 iovs[1].iov_base = &actual_page[offset]; 2182 iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset; 2183 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2184 2185 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2186 2187 #undef UT_ANA_DESC_SIZE 2188 #undef UT_ANA_LOG_PAGE_SIZE 2189 } 2190 2191 static void 2192 test_get_ana_log_page_multi_ns_per_anagrp(void) 2193 { 2194 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + \ 2195 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 + \ 2196 sizeof(uint32_t) * 5) 2197 struct spdk_nvmf_ns ns[5]; 2198 struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]}; 2199 uint32_t ana_group[5] = {0}; 2200 struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, }; 2201 enum spdk_nvme_ana_state ana_state[5]; 2202 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, }; 2203 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, }; 2204 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2205 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2206 struct iovec iov, iovs[2]; 2207 struct spdk_nvme_ana_page *ana_hdr; 2208 char _ana_desc[UT_ANA_LOG_PAGE_SIZE]; 2209 struct spdk_nvme_ana_group_descriptor *ana_desc; 2210 uint64_t offset; 2211 uint32_t length; 2212 int i; 2213 2214 subsystem.max_nsid = 5; 2215 subsystem.ana_group[1] = 3; 2216 subsystem.ana_group[2] = 2; 2217 for (i = 0; i < 5; i++) { 2218 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2219 } 2220 2221 for (i = 0; i < 5; i++) { 2222 ns_arr[i]->nsid = i + 1; 2223 } 2224 ns_arr[0]->anagrpid = 2; 2225 ns_arr[1]->anagrpid = 3; 2226 ns_arr[2]->anagrpid = 2; 2227 ns_arr[3]->anagrpid = 3; 2228 ns_arr[4]->anagrpid = 2; 2229 2230 /* create expected page */ 2231 ana_hdr = (void *)&expected_page[0]; 2232 ana_hdr->num_ana_group_desc = 2; 2233 ana_hdr->change_count = 0; 2234 2235 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2236 ana_desc = (void *)_ana_desc; 2237 offset = sizeof(struct spdk_nvme_ana_page); 2238 2239 memset(_ana_desc, 0, sizeof(_ana_desc)); 2240 ana_desc->ana_group_id = 2; 2241 ana_desc->num_of_nsid = 3; 2242 ana_desc->change_count = 0; 2243 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2244 ana_desc->nsid[0] = 1; 2245 ana_desc->nsid[1] = 3; 2246 ana_desc->nsid[2] = 5; 2247 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2248 sizeof(uint32_t) * 3); 2249 offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3; 2250 2251 memset(_ana_desc, 0, sizeof(_ana_desc)); 2252 ana_desc->ana_group_id = 3; 2253 ana_desc->num_of_nsid = 2; 2254 ana_desc->change_count = 0; 2255 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2256 ana_desc->nsid[0] = 2; 2257 ana_desc->nsid[1] = 4; 2258 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2259 sizeof(uint32_t) * 2); 2260 2261 /* read entire actual log page, and compare expected page and actual page. */ 2262 offset = 0; 2263 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2264 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2265 iov.iov_base = &actual_page[offset]; 2266 iov.iov_len = length; 2267 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2268 offset += length; 2269 } 2270 2271 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2272 2273 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2274 offset = 0; 2275 iovs[0].iov_base = &actual_page[offset]; 2276 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2277 offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2278 iovs[1].iov_base = &actual_page[offset]; 2279 iovs[1].iov_len = sizeof(uint32_t) * 5; 2280 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2281 2282 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2283 2284 #undef UT_ANA_LOG_PAGE_SIZE 2285 } 2286 static void 2287 test_multi_async_events(void) 2288 { 2289 struct spdk_nvmf_subsystem subsystem = {}; 2290 struct spdk_nvmf_qpair qpair = {}; 2291 struct spdk_nvmf_ctrlr ctrlr = {}; 2292 struct spdk_nvmf_request req[4] = {}; 2293 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2294 struct spdk_nvmf_ns ns = {}; 2295 union nvmf_h2c_msg cmd[4] = {}; 2296 union nvmf_c2h_msg rsp[4] = {}; 2297 union spdk_nvme_async_event_completion event = {}; 2298 struct spdk_nvmf_poll_group group = {}; 2299 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2300 int i; 2301 2302 ns_ptrs[0] = &ns; 2303 subsystem.ns = ns_ptrs; 2304 subsystem.max_nsid = 1; 2305 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2306 2307 ns.opts.nsid = 1; 2308 group.sgroups = &sgroups; 2309 2310 qpair.ctrlr = &ctrlr; 2311 qpair.group = &group; 2312 TAILQ_INIT(&qpair.outstanding); 2313 2314 ctrlr.subsys = &subsystem; 2315 ctrlr.vcprop.cc.bits.en = 1; 2316 ctrlr.thread = spdk_get_thread(); 2317 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2318 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 2319 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 2320 init_pending_async_events(&ctrlr); 2321 2322 /* Target queue pending events when there is no outstanding AER request */ 2323 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2324 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 2325 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 2326 2327 for (i = 0; i < 4; i++) { 2328 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2329 cmd[i].nvme_cmd.nsid = 1; 2330 cmd[i].nvme_cmd.cid = i; 2331 2332 req[i].qpair = &qpair; 2333 req[i].cmd = &cmd[i]; 2334 req[i].rsp = &rsp[i]; 2335 2336 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2337 2338 sgroups.mgmt_io_outstanding = 1; 2339 if (i < 3) { 2340 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2341 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2342 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 2343 } else { 2344 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2345 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2346 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 2347 } 2348 } 2349 2350 event.raw = rsp[0].nvme_cpl.cdw0; 2351 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2352 event.raw = rsp[1].nvme_cpl.cdw0; 2353 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 2354 event.raw = rsp[2].nvme_cpl.cdw0; 2355 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 2356 2357 cleanup_pending_async_events(&ctrlr); 2358 } 2359 2360 static void 2361 test_rae(void) 2362 { 2363 struct spdk_nvmf_subsystem subsystem = {}; 2364 struct spdk_nvmf_qpair qpair = {}; 2365 struct spdk_nvmf_ctrlr ctrlr = {}; 2366 struct spdk_nvmf_request req[3] = {}; 2367 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2368 struct spdk_nvmf_ns ns = {}; 2369 union nvmf_h2c_msg cmd[3] = {}; 2370 union nvmf_c2h_msg rsp[3] = {}; 2371 union spdk_nvme_async_event_completion event = {}; 2372 struct spdk_nvmf_poll_group group = {}; 2373 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2374 int i; 2375 char data[4096]; 2376 2377 ns_ptrs[0] = &ns; 2378 subsystem.ns = ns_ptrs; 2379 subsystem.max_nsid = 1; 2380 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2381 2382 ns.opts.nsid = 1; 2383 group.sgroups = &sgroups; 2384 2385 qpair.ctrlr = &ctrlr; 2386 qpair.group = &group; 2387 TAILQ_INIT(&qpair.outstanding); 2388 2389 ctrlr.subsys = &subsystem; 2390 ctrlr.vcprop.cc.bits.en = 1; 2391 ctrlr.thread = spdk_get_thread(); 2392 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2393 init_pending_async_events(&ctrlr); 2394 2395 /* Target queue pending events when there is no outstanding AER request */ 2396 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2397 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2398 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2399 /* only one event will be queued before RAE is clear */ 2400 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2401 2402 req[0].qpair = &qpair; 2403 req[0].cmd = &cmd[0]; 2404 req[0].rsp = &rsp[0]; 2405 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2406 cmd[0].nvme_cmd.nsid = 1; 2407 cmd[0].nvme_cmd.cid = 0; 2408 2409 for (i = 1; i < 3; i++) { 2410 req[i].qpair = &qpair; 2411 req[i].cmd = &cmd[i]; 2412 req[i].rsp = &rsp[i]; 2413 req[i].length = sizeof(data); 2414 SPDK_IOV_ONE(req[i].iov, &req[i].iovcnt, &data, req[i].length); 2415 2416 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 2417 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 2418 SPDK_NVME_LOG_CHANGED_NS_LIST; 2419 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 2420 spdk_nvme_bytes_to_numd(req[i].length); 2421 cmd[i].nvme_cmd.cid = i; 2422 } 2423 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 2424 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 2425 2426 /* consume the pending event */ 2427 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 2428 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2429 event.raw = rsp[0].nvme_cpl.cdw0; 2430 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2431 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2432 2433 /* get log with RAE set */ 2434 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2435 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2436 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2437 2438 /* will not generate new event until RAE is clear */ 2439 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2440 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2441 2442 /* get log with RAE clear */ 2443 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2444 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2445 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2446 2447 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2448 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2449 2450 cleanup_pending_async_events(&ctrlr); 2451 } 2452 2453 static void 2454 test_nvmf_ctrlr_create_destruct(void) 2455 { 2456 struct spdk_nvmf_fabric_connect_data connect_data = {}; 2457 struct spdk_nvmf_poll_group group = {}; 2458 struct spdk_nvmf_subsystem_poll_group sgroups[2] = {}; 2459 struct spdk_nvmf_transport transport = {}; 2460 struct spdk_nvmf_transport_ops tops = {}; 2461 struct spdk_nvmf_subsystem subsystem = {}; 2462 struct spdk_nvmf_ns *ns_arr[1] = { NULL }; 2463 struct spdk_nvmf_request req = {}; 2464 struct spdk_nvmf_qpair qpair = {}; 2465 struct spdk_nvmf_ctrlr *ctrlr = NULL; 2466 struct spdk_nvmf_tgt tgt = {}; 2467 union nvmf_h2c_msg cmd = {}; 2468 union nvmf_c2h_msg rsp = {}; 2469 const uint8_t hostid[16] = { 2470 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2471 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 2472 }; 2473 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 2474 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 2475 2476 group.thread = spdk_get_thread(); 2477 transport.ops = &tops; 2478 transport.opts.max_aq_depth = 32; 2479 transport.opts.max_queue_depth = 64; 2480 transport.opts.max_qpairs_per_ctrlr = 3; 2481 transport.opts.dif_insert_or_strip = true; 2482 transport.tgt = &tgt; 2483 qpair.transport = &transport; 2484 qpair.group = &group; 2485 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2486 TAILQ_INIT(&qpair.outstanding); 2487 2488 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 2489 connect_data.cntlid = 0xFFFF; 2490 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 2491 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 2492 2493 subsystem.thread = spdk_get_thread(); 2494 subsystem.id = 1; 2495 TAILQ_INIT(&subsystem.ctrlrs); 2496 subsystem.tgt = &tgt; 2497 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2498 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2499 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 2500 subsystem.ns = ns_arr; 2501 2502 group.sgroups = sgroups; 2503 2504 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2505 cmd.connect_cmd.cid = 1; 2506 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 2507 cmd.connect_cmd.recfmt = 0; 2508 cmd.connect_cmd.qid = 0; 2509 cmd.connect_cmd.sqsize = 31; 2510 cmd.connect_cmd.cattr = 0; 2511 cmd.connect_cmd.kato = 120000; 2512 2513 req.qpair = &qpair; 2514 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 2515 req.length = sizeof(connect_data); 2516 SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length); 2517 req.cmd = &cmd; 2518 req.rsp = &rsp; 2519 2520 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 2521 sgroups[subsystem.id].mgmt_io_outstanding++; 2522 2523 ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base); 2524 poll_threads(); 2525 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2526 CU_ASSERT(req.qpair->ctrlr == ctrlr); 2527 CU_ASSERT(ctrlr->subsys == &subsystem); 2528 CU_ASSERT(ctrlr->thread == req.qpair->group->thread); 2529 CU_ASSERT(ctrlr->disconnect_in_progress == false); 2530 CU_ASSERT(ctrlr->qpair_mask != NULL); 2531 CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000); 2532 CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1); 2533 CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1); 2534 CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1); 2535 CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1); 2536 CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16)); 2537 CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1); 2538 CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63); 2539 CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0); 2540 CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500); 2541 CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0); 2542 CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM); 2543 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0); 2544 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0); 2545 CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1); 2546 CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3); 2547 CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0); 2548 CU_ASSERT(ctrlr->vcprop.cc.raw == 0); 2549 CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0); 2550 CU_ASSERT(ctrlr->vcprop.csts.raw == 0); 2551 CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0); 2552 CU_ASSERT(ctrlr->dif_insert_or_strip == true); 2553 2554 ctrlr->in_destruct = true; 2555 nvmf_ctrlr_destruct(ctrlr); 2556 poll_threads(); 2557 CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs)); 2558 CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding)); 2559 } 2560 2561 static void 2562 test_nvmf_ctrlr_use_zcopy(void) 2563 { 2564 struct spdk_nvmf_subsystem subsystem = {}; 2565 struct spdk_nvmf_transport transport = {}; 2566 struct spdk_nvmf_request req = {}; 2567 struct spdk_nvmf_qpair qpair = {}; 2568 struct spdk_nvmf_ctrlr ctrlr = {}; 2569 union nvmf_h2c_msg cmd = {}; 2570 struct spdk_nvmf_ns ns = {}; 2571 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2572 struct spdk_bdev bdev = {}; 2573 struct spdk_nvmf_poll_group group = {}; 2574 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2575 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2576 struct spdk_io_channel io_ch = {}; 2577 int opc; 2578 2579 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2580 ns.bdev = &bdev; 2581 2582 subsystem.id = 0; 2583 subsystem.max_nsid = 1; 2584 subsys_ns[0] = &ns; 2585 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2586 2587 ctrlr.subsys = &subsystem; 2588 ctrlr.visible_ns = spdk_bit_array_create(1); 2589 spdk_bit_array_set(ctrlr.visible_ns, 0); 2590 2591 transport.opts.zcopy = true; 2592 2593 qpair.ctrlr = &ctrlr; 2594 qpair.group = &group; 2595 qpair.qid = 1; 2596 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2597 qpair.transport = &transport; 2598 2599 group.thread = spdk_get_thread(); 2600 group.num_sgroups = 1; 2601 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2602 sgroups.num_ns = 1; 2603 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2604 ns_info.channel = &io_ch; 2605 sgroups.ns_info = &ns_info; 2606 TAILQ_INIT(&sgroups.queued); 2607 group.sgroups = &sgroups; 2608 TAILQ_INIT(&qpair.outstanding); 2609 2610 req.qpair = &qpair; 2611 req.cmd = &cmd; 2612 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2613 2614 /* Admin queue */ 2615 qpair.qid = 0; 2616 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2617 qpair.qid = 1; 2618 2619 /* Invalid Opcodes */ 2620 for (opc = 0; opc <= 255; opc++) { 2621 cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc; 2622 if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) && 2623 (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) { 2624 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2625 } 2626 } 2627 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 2628 2629 /* Fused WRITE */ 2630 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2631 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2632 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE; 2633 2634 /* Non bdev */ 2635 cmd.nvme_cmd.nsid = 4; 2636 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2637 cmd.nvme_cmd.nsid = 1; 2638 2639 /* ZCOPY Not supported */ 2640 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2641 ns.zcopy = true; 2642 2643 /* ZCOPY disabled on transport level */ 2644 transport.opts.zcopy = false; 2645 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2646 transport.opts.zcopy = true; 2647 2648 /* Success */ 2649 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2650 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2651 2652 spdk_bit_array_free(&ctrlr.visible_ns); 2653 } 2654 2655 static void 2656 qpair_state_change_done(void *cb_arg, int status) 2657 { 2658 } 2659 2660 static void 2661 test_spdk_nvmf_request_zcopy_start(void) 2662 { 2663 struct spdk_nvmf_request req = {}; 2664 struct spdk_nvmf_qpair qpair = {}; 2665 struct spdk_nvmf_transport transport = {}; 2666 struct spdk_nvme_cmd cmd = {}; 2667 union nvmf_c2h_msg rsp = {}; 2668 struct spdk_nvmf_ctrlr ctrlr = {}; 2669 struct spdk_nvmf_subsystem subsystem = {}; 2670 struct spdk_nvmf_ns ns = {}; 2671 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2672 enum spdk_nvme_ana_state ana_state[1]; 2673 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2674 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2675 2676 struct spdk_nvmf_poll_group group = {}; 2677 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2678 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2679 struct spdk_io_channel io_ch = {}; 2680 2681 ns.bdev = &bdev; 2682 ns.zcopy = true; 2683 ns.anagrpid = 1; 2684 2685 subsystem.id = 0; 2686 subsystem.max_nsid = 1; 2687 subsys_ns[0] = &ns; 2688 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2689 2690 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2691 2692 /* Enable controller */ 2693 ctrlr.vcprop.cc.bits.en = 1; 2694 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2695 ctrlr.listener = &listener; 2696 ctrlr.visible_ns = spdk_bit_array_create(1); 2697 spdk_bit_array_set(ctrlr.visible_ns, 0); 2698 2699 transport.opts.zcopy = true; 2700 2701 group.thread = spdk_get_thread(); 2702 group.num_sgroups = 1; 2703 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2704 sgroups.num_ns = 1; 2705 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2706 ns_info.channel = &io_ch; 2707 sgroups.ns_info = &ns_info; 2708 TAILQ_INIT(&sgroups.queued); 2709 group.sgroups = &sgroups; 2710 TAILQ_INIT(&qpair.outstanding); 2711 2712 qpair.ctrlr = &ctrlr; 2713 qpair.group = &group; 2714 qpair.transport = &transport; 2715 qpair.qid = 1; 2716 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2717 2718 cmd.nsid = 1; 2719 2720 req.qpair = &qpair; 2721 req.cmd = (union nvmf_h2c_msg *)&cmd; 2722 req.rsp = &rsp; 2723 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2724 cmd.opc = SPDK_NVME_OPC_READ; 2725 2726 /* Fail because no controller */ 2727 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2728 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2729 qpair.ctrlr = NULL; 2730 spdk_nvmf_request_zcopy_start(&req); 2731 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2732 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2733 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 2734 qpair.ctrlr = &ctrlr; 2735 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2736 2737 /* Fail because bad NSID */ 2738 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2739 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2740 cmd.nsid = 0; 2741 spdk_nvmf_request_zcopy_start(&req); 2742 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2743 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2744 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2745 cmd.nsid = 1; 2746 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2747 2748 /* Fail because bad Channel */ 2749 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2750 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2751 ns_info.channel = NULL; 2752 spdk_nvmf_request_zcopy_start(&req); 2753 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2754 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2755 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2756 ns_info.channel = &io_ch; 2757 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2758 2759 /* Queue the requet because NSID is not active */ 2760 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2761 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2762 ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING; 2763 spdk_nvmf_request_zcopy_start(&req); 2764 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT); 2765 CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req); 2766 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2767 TAILQ_REMOVE(&sgroups.queued, &req, link); 2768 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2769 2770 /* Fail because QPair is not active */ 2771 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2772 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2773 qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 2774 qpair.state_cb = qpair_state_change_done; 2775 spdk_nvmf_request_zcopy_start(&req); 2776 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED); 2777 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2778 qpair.state_cb = NULL; 2779 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2780 2781 /* Fail because nvmf_bdev_ctrlr_zcopy_start fails */ 2782 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2783 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2784 cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */ 2785 cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 2786 req.length = (cmd.cdw12 + 1) * bdev.blocklen; 2787 spdk_nvmf_request_zcopy_start(&req); 2788 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2789 cmd.cdw10 = 0; 2790 cmd.cdw12 = 0; 2791 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2792 2793 /* Success */ 2794 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2795 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2796 spdk_nvmf_request_zcopy_start(&req); 2797 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2798 2799 spdk_bit_array_free(&ctrlr.visible_ns); 2800 } 2801 2802 static void 2803 test_zcopy_read(void) 2804 { 2805 struct spdk_nvmf_request req = {}; 2806 struct spdk_nvmf_qpair qpair = {}; 2807 struct spdk_nvmf_transport transport = {}; 2808 struct spdk_nvme_cmd cmd = {}; 2809 union nvmf_c2h_msg rsp = {}; 2810 struct spdk_nvmf_ctrlr ctrlr = {}; 2811 struct spdk_nvmf_subsystem subsystem = {}; 2812 struct spdk_nvmf_ns ns = {}; 2813 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2814 enum spdk_nvme_ana_state ana_state[1]; 2815 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2816 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2817 2818 struct spdk_nvmf_poll_group group = {}; 2819 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2820 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2821 struct spdk_io_channel io_ch = {}; 2822 2823 ns.bdev = &bdev; 2824 ns.zcopy = true; 2825 ns.anagrpid = 1; 2826 2827 subsystem.id = 0; 2828 subsystem.max_nsid = 1; 2829 subsys_ns[0] = &ns; 2830 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2831 2832 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2833 2834 /* Enable controller */ 2835 ctrlr.vcprop.cc.bits.en = 1; 2836 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2837 ctrlr.listener = &listener; 2838 ctrlr.visible_ns = spdk_bit_array_create(1); 2839 spdk_bit_array_set(ctrlr.visible_ns, 0); 2840 2841 transport.opts.zcopy = true; 2842 2843 group.thread = spdk_get_thread(); 2844 group.num_sgroups = 1; 2845 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2846 sgroups.num_ns = 1; 2847 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2848 ns_info.channel = &io_ch; 2849 sgroups.ns_info = &ns_info; 2850 TAILQ_INIT(&sgroups.queued); 2851 group.sgroups = &sgroups; 2852 TAILQ_INIT(&qpair.outstanding); 2853 2854 qpair.ctrlr = &ctrlr; 2855 qpair.group = &group; 2856 qpair.transport = &transport; 2857 qpair.qid = 1; 2858 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2859 2860 cmd.nsid = 1; 2861 2862 req.qpair = &qpair; 2863 req.cmd = (union nvmf_h2c_msg *)&cmd; 2864 req.rsp = &rsp; 2865 cmd.opc = SPDK_NVME_OPC_READ; 2866 2867 /* Prepare for zcopy */ 2868 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2869 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2870 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2871 CU_ASSERT(ns_info.io_outstanding == 0); 2872 2873 /* Perform the zcopy start */ 2874 spdk_nvmf_request_zcopy_start(&req); 2875 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2876 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2877 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2878 CU_ASSERT(ns_info.io_outstanding == 1); 2879 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2880 2881 /* Perform the zcopy end */ 2882 spdk_nvmf_request_zcopy_end(&req, false); 2883 CU_ASSERT(req.zcopy_bdev_io == NULL); 2884 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2885 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2886 CU_ASSERT(ns_info.io_outstanding == 0); 2887 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2888 2889 spdk_bit_array_free(&ctrlr.visible_ns); 2890 } 2891 2892 static void 2893 test_zcopy_write(void) 2894 { 2895 struct spdk_nvmf_request req = {}; 2896 struct spdk_nvmf_qpair qpair = {}; 2897 struct spdk_nvmf_transport transport = {}; 2898 struct spdk_nvme_cmd cmd = {}; 2899 union nvmf_c2h_msg rsp = {}; 2900 struct spdk_nvmf_ctrlr ctrlr = {}; 2901 struct spdk_nvmf_subsystem subsystem = {}; 2902 struct spdk_nvmf_ns ns = {}; 2903 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2904 enum spdk_nvme_ana_state ana_state[1]; 2905 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2906 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2907 2908 struct spdk_nvmf_poll_group group = {}; 2909 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2910 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2911 struct spdk_io_channel io_ch = {}; 2912 2913 ns.bdev = &bdev; 2914 ns.zcopy = true; 2915 ns.anagrpid = 1; 2916 2917 subsystem.id = 0; 2918 subsystem.max_nsid = 1; 2919 subsys_ns[0] = &ns; 2920 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2921 2922 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2923 2924 /* Enable controller */ 2925 ctrlr.vcprop.cc.bits.en = 1; 2926 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2927 ctrlr.listener = &listener; 2928 ctrlr.visible_ns = spdk_bit_array_create(1); 2929 spdk_bit_array_set(ctrlr.visible_ns, 0); 2930 2931 transport.opts.zcopy = true; 2932 2933 group.thread = spdk_get_thread(); 2934 group.num_sgroups = 1; 2935 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2936 sgroups.num_ns = 1; 2937 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2938 ns_info.channel = &io_ch; 2939 sgroups.ns_info = &ns_info; 2940 TAILQ_INIT(&sgroups.queued); 2941 group.sgroups = &sgroups; 2942 TAILQ_INIT(&qpair.outstanding); 2943 2944 qpair.ctrlr = &ctrlr; 2945 qpair.group = &group; 2946 qpair.transport = &transport; 2947 qpair.qid = 1; 2948 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2949 2950 cmd.nsid = 1; 2951 2952 req.qpair = &qpair; 2953 req.cmd = (union nvmf_h2c_msg *)&cmd; 2954 req.rsp = &rsp; 2955 cmd.opc = SPDK_NVME_OPC_WRITE; 2956 2957 /* Prepare for zcopy */ 2958 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2959 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2960 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2961 CU_ASSERT(ns_info.io_outstanding == 0); 2962 2963 /* Perform the zcopy start */ 2964 spdk_nvmf_request_zcopy_start(&req); 2965 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2966 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 2967 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2968 CU_ASSERT(ns_info.io_outstanding == 1); 2969 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2970 2971 /* Perform the zcopy end */ 2972 spdk_nvmf_request_zcopy_end(&req, true); 2973 CU_ASSERT(req.zcopy_bdev_io == NULL); 2974 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2975 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2976 CU_ASSERT(ns_info.io_outstanding == 0); 2977 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2978 2979 spdk_bit_array_free(&ctrlr.visible_ns); 2980 } 2981 2982 static void 2983 test_nvmf_property_set(void) 2984 { 2985 int rc; 2986 struct spdk_nvmf_request req = {}; 2987 struct spdk_nvmf_qpair qpair = {}; 2988 struct spdk_nvmf_ctrlr ctrlr = {}; 2989 union nvmf_h2c_msg cmd = {}; 2990 union nvmf_c2h_msg rsp = {}; 2991 2992 req.qpair = &qpair; 2993 qpair.ctrlr = &ctrlr; 2994 req.cmd = &cmd; 2995 req.rsp = &rsp; 2996 2997 /* Invalid parameters */ 2998 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2999 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs); 3000 3001 rc = nvmf_property_set(&req); 3002 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3003 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 3004 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 3005 3006 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms); 3007 3008 rc = nvmf_property_get(&req); 3009 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3010 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 3011 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 3012 3013 /* Set cc with same property size */ 3014 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 3015 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc); 3016 3017 rc = nvmf_property_set(&req); 3018 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3019 3020 /* Emulate cc data */ 3021 ctrlr.vcprop.cc.raw = 0xDEADBEEF; 3022 3023 rc = nvmf_property_get(&req); 3024 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3025 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF); 3026 3027 /* Set asq with different property size */ 3028 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 3029 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 3030 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq); 3031 3032 rc = nvmf_property_set(&req); 3033 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3034 3035 /* Emulate asq data */ 3036 ctrlr.vcprop.asq = 0xAADDADBEEF; 3037 3038 rc = nvmf_property_get(&req); 3039 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3040 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF); 3041 } 3042 3043 static void 3044 test_nvmf_ctrlr_get_features_host_behavior_support(void) 3045 { 3046 int rc; 3047 struct spdk_nvmf_request req = {}; 3048 struct spdk_nvmf_qpair qpair = {}; 3049 struct spdk_nvmf_ctrlr ctrlr = {}; 3050 struct spdk_nvme_host_behavior behavior = {}; 3051 union nvmf_h2c_msg cmd = {}; 3052 union nvmf_c2h_msg rsp = {}; 3053 3054 qpair.ctrlr = &ctrlr; 3055 req.qpair = &qpair; 3056 req.cmd = &cmd; 3057 req.rsp = &rsp; 3058 3059 /* Invalid data */ 3060 req.length = sizeof(struct spdk_nvme_host_behavior); 3061 req.iovcnt = 0; 3062 3063 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3064 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3065 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3066 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3067 3068 /* Wrong structure length */ 3069 req.length = sizeof(struct spdk_nvme_host_behavior) - 1; 3070 SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length); 3071 3072 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3073 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3074 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3075 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3076 3077 /* Get Features Host Behavior Support Success */ 3078 req.length = sizeof(struct spdk_nvme_host_behavior); 3079 SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length); 3080 3081 ctrlr.acre_enabled = true; 3082 behavior.acre = false; 3083 3084 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3085 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3086 CU_ASSERT(behavior.acre == true); 3087 } 3088 3089 static void 3090 test_nvmf_ctrlr_set_features_host_behavior_support(void) 3091 { 3092 int rc; 3093 struct spdk_nvmf_request req = {}; 3094 struct spdk_nvmf_qpair qpair = {}; 3095 struct spdk_nvmf_ctrlr ctrlr = {}; 3096 struct spdk_nvme_host_behavior host_behavior = {}; 3097 union nvmf_h2c_msg cmd = {}; 3098 union nvmf_c2h_msg rsp = {}; 3099 3100 qpair.ctrlr = &ctrlr; 3101 req.qpair = &qpair; 3102 req.cmd = &cmd; 3103 req.rsp = &rsp; 3104 req.iov[0].iov_base = &host_behavior; 3105 req.iov[0].iov_len = sizeof(host_behavior); 3106 3107 /* Invalid iovcnt */ 3108 req.iovcnt = 0; 3109 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3110 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3111 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3112 3113 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3114 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3115 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3116 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3117 3118 /* Invalid iov_len */ 3119 req.iovcnt = 1; 3120 req.iov[0].iov_len = 0; 3121 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3122 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3123 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3124 3125 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3126 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3127 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3128 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3129 3130 /* acre is false */ 3131 host_behavior.acre = 0; 3132 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3133 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3134 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3135 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3136 3137 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3138 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3139 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3140 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3141 CU_ASSERT(ctrlr.acre_enabled == false); 3142 3143 /* acre is true */ 3144 host_behavior.acre = 1; 3145 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3146 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3147 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3148 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3149 3150 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3151 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3152 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3153 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3154 CU_ASSERT(ctrlr.acre_enabled == true); 3155 3156 /* Invalid acre */ 3157 host_behavior.acre = 2; 3158 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3159 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3160 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3161 3162 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3163 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3164 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3165 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3166 } 3167 3168 static void 3169 test_nvmf_ctrlr_ns_attachment(void) 3170 { 3171 struct spdk_nvmf_subsystem subsystem = {}; 3172 struct spdk_nvmf_ns ns1 = { 3173 .nsid = 1, 3174 .always_visible = false 3175 }; 3176 struct spdk_nvmf_ns ns3 = { 3177 .nsid = 3, 3178 .always_visible = false 3179 }; 3180 struct spdk_nvmf_ctrlr ctrlrA = { 3181 .subsys = &subsystem 3182 }; 3183 struct spdk_nvmf_ctrlr ctrlrB = { 3184 .subsys = &subsystem 3185 }; 3186 struct spdk_nvmf_host *host; 3187 uint32_t nsid; 3188 3189 subsystem.max_nsid = 3; 3190 subsystem.ns = calloc(subsystem.max_nsid, sizeof(subsystem.ns)); 3191 SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL); 3192 3193 /* nsid = 2 -> unallocated, nsid = 1,3 -> allocated */ 3194 subsystem.ns[0] = &ns1; 3195 subsystem.ns[2] = &ns3; 3196 3197 snprintf(ctrlrA.hostnqn, sizeof(ctrlrA.hostnqn), "nqn.2016-06.io.spdk:host1"); 3198 ctrlrA.visible_ns = spdk_bit_array_create(subsystem.max_nsid); 3199 SPDK_CU_ASSERT_FATAL(ctrlrA.visible_ns != NULL); 3200 snprintf(ctrlrB.hostnqn, sizeof(ctrlrB.hostnqn), "nqn.2016-06.io.spdk:host2"); 3201 ctrlrB.visible_ns = spdk_bit_array_create(subsystem.max_nsid); 3202 SPDK_CU_ASSERT_FATAL(ctrlrB.visible_ns != NULL); 3203 3204 /* Do not auto attach and no cold attach of any ctrlr */ 3205 nsid = 1; 3206 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3207 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3208 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3209 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3210 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3211 nsid = 3; 3212 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3213 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3214 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3215 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3216 3217 /* Cold attach ctrlrA to namespace 1 */ 3218 nsid = 1; 3219 host = calloc(1, sizeof(*host)); 3220 SPDK_CU_ASSERT_FATAL(host != NULL); 3221 snprintf(host->nqn, sizeof(host->nqn), "%s", ctrlrA.hostnqn); 3222 TAILQ_INSERT_HEAD(&ns1.hosts, host, link); 3223 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host); 3224 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3225 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3226 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3227 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3228 nsid = 3; 3229 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3230 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3231 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host); 3232 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3233 3234 /* Detach ctrlrA from namespace 1 */ 3235 nsid = 1; 3236 spdk_bit_array_clear(ctrlrA.visible_ns, nsid - 1); 3237 TAILQ_REMOVE(&ns1.hosts, host, link); 3238 free(host); 3239 3240 /* Auto attach any ctrlr to namespace 2 */ 3241 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3242 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3243 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3244 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3245 nsid = 3; 3246 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3247 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3248 ns1.always_visible = true; 3249 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3250 nsid = 1; 3251 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3252 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3253 nsid = 3; 3254 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3255 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3256 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3257 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3258 nvmf_ctrlr_init_visible_ns(&ctrlrB); 3259 nsid = 1; 3260 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3261 CU_ASSERT(spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3262 nsid = 3; 3263 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3264 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3265 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3266 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3267 3268 free(ctrlrA.visible_ns); 3269 free(ctrlrB.visible_ns); 3270 free(subsystem.ns); 3271 } 3272 3273 int 3274 main(int argc, char **argv) 3275 { 3276 CU_pSuite suite = NULL; 3277 unsigned int num_failures; 3278 3279 CU_initialize_registry(); 3280 3281 suite = CU_add_suite("nvmf", NULL, NULL); 3282 CU_ADD_TEST(suite, test_get_log_page); 3283 CU_ADD_TEST(suite, test_process_fabrics_cmd); 3284 CU_ADD_TEST(suite, test_connect); 3285 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 3286 CU_ADD_TEST(suite, test_identify_ns); 3287 CU_ADD_TEST(suite, test_identify_ns_iocs_specific); 3288 CU_ADD_TEST(suite, test_reservation_write_exclusive); 3289 CU_ADD_TEST(suite, test_reservation_exclusive_access); 3290 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 3291 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 3292 CU_ADD_TEST(suite, test_reservation_notification_log_page); 3293 CU_ADD_TEST(suite, test_get_dif_ctx); 3294 CU_ADD_TEST(suite, test_set_get_features); 3295 CU_ADD_TEST(suite, test_identify_ctrlr); 3296 CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific); 3297 CU_ADD_TEST(suite, test_custom_admin_cmd); 3298 CU_ADD_TEST(suite, test_fused_compare_and_write); 3299 CU_ADD_TEST(suite, test_multi_async_event_reqs); 3300 CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp); 3301 CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp); 3302 CU_ADD_TEST(suite, test_multi_async_events); 3303 CU_ADD_TEST(suite, test_rae); 3304 CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct); 3305 CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy); 3306 CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start); 3307 CU_ADD_TEST(suite, test_zcopy_read); 3308 CU_ADD_TEST(suite, test_zcopy_write); 3309 CU_ADD_TEST(suite, test_nvmf_property_set); 3310 CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support); 3311 CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support); 3312 CU_ADD_TEST(suite, test_nvmf_ctrlr_ns_attachment); 3313 3314 allocate_threads(1); 3315 set_thread(0); 3316 3317 num_failures = spdk_ut_run_tests(argc, argv, NULL); 3318 CU_cleanup_registry(); 3319 3320 free_threads(); 3321 3322 return num_failures; 3323 } 3324