1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/bdev_zone.h" 8 #include "spdk/nvme_spec.h" 9 #include "spdk/stdinc.h" 10 11 #include "spdk_internal/cunit.h" 12 #include "spdk_internal/mock.h" 13 #include "thread/thread_internal.h" 14 15 #include "common/lib/ut_multithread.c" 16 #include "nvmf/ctrlr.c" 17 18 SPDK_LOG_REGISTER_COMPONENT(nvmf) 19 20 struct spdk_bdev { 21 int ut_mock; 22 uint64_t blockcnt; 23 uint32_t blocklen; 24 bool zoned; 25 uint32_t zone_size; 26 uint32_t max_open_zones; 27 uint32_t max_active_zones; 28 }; 29 30 #define MAX_OPEN_ZONES 12 31 #define MAX_ACTIVE_ZONES 34 32 #define ZONE_SIZE 56 33 34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 36 37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL; 38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *) 39 0x8877665544332211UL; 40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL; 41 42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 43 struct spdk_nvmf_subsystem *, 44 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 45 NULL); 46 47 DEFINE_STUB(spdk_nvmf_poll_group_create, 48 struct spdk_nvmf_poll_group *, 49 (struct spdk_nvmf_tgt *tgt), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 53 const char *, 54 (const struct spdk_nvmf_subsystem *subsystem), 55 subsystem_default_sn); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 58 const char *, 59 (const struct spdk_nvmf_subsystem *subsystem), 60 subsystem_default_mn); 61 62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 63 bool, 64 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 65 true); 66 67 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 68 int, 69 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 70 0); 71 72 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 73 struct spdk_nvmf_ctrlr *, 74 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 75 NULL); 76 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool, 77 (struct spdk_nvmf_subsystem *subsystem), false); 78 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 79 bool, 80 (struct spdk_nvmf_ctrlr *ctrlr), 81 false); 82 83 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 84 bool, 85 (struct spdk_nvmf_ctrlr *ctrlr), 86 false); 87 88 DEFINE_STUB(nvmf_ctrlr_copy_supported, 89 bool, 90 (struct spdk_nvmf_ctrlr *ctrlr), 91 false); 92 93 DEFINE_STUB_V(nvmf_get_discovery_log_page, 94 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 95 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 96 97 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 98 int, 99 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 100 0); 101 102 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 103 bool, 104 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 105 true); 106 107 DEFINE_STUB(nvmf_subsystem_find_listener, 108 struct spdk_nvmf_subsystem_listener *, 109 (struct spdk_nvmf_subsystem *subsystem, 110 const struct spdk_nvme_transport_id *trid), 111 (void *)0x1); 112 113 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 114 int, 115 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 116 struct spdk_nvmf_request *req), 117 0); 118 119 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 120 int, 121 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 122 struct spdk_nvmf_request *req), 123 0); 124 125 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 126 int, 127 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 128 struct spdk_nvmf_request *req), 129 0); 130 131 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 132 int, 133 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 134 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 135 0); 136 137 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 138 int, 139 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 140 struct spdk_nvmf_request *req), 141 0); 142 143 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 144 int, 145 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 146 struct spdk_nvmf_request *req), 147 0); 148 149 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 150 int, 151 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 152 struct spdk_nvmf_request *req), 153 0); 154 155 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 156 int, 157 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 158 struct spdk_nvmf_request *req), 159 0); 160 161 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 162 int, 163 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 164 struct spdk_nvmf_request *req), 165 0); 166 167 DEFINE_STUB(nvmf_transport_req_complete, 168 int, 169 (struct spdk_nvmf_request *req), 170 0); 171 172 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 173 174 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 175 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 176 struct spdk_dif_ctx *dif_ctx), 177 true); 178 179 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 180 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 181 182 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 183 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 184 185 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 186 struct spdk_nvmf_ctrlr *ctrlr)); 187 188 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 189 int, 190 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 191 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 192 0); 193 194 DEFINE_STUB(nvmf_transport_req_free, 195 int, 196 (struct spdk_nvmf_request *req), 197 0); 198 199 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 200 int, 201 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 202 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 203 0); 204 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 205 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 206 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 207 208 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev), 209 MAX_ACTIVE_ZONES); 210 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES); 211 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE); 212 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 213 214 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 215 (const struct spdk_nvme_ns_data *nsdata), 0); 216 217 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 218 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n), 219 false); 220 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0); 221 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r), 222 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 223 224 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 225 (const struct spdk_nvmf_subsystem *subsystem), NULL); 226 227 void 228 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, enum spdk_nvmf_qpair_state state) 229 { 230 qpair->state = state; 231 } 232 233 int 234 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair) 235 { 236 return 0; 237 } 238 239 void 240 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 241 bool dif_insert_or_strip) 242 { 243 uint64_t num_blocks; 244 245 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 246 num_blocks = ns->bdev->blockcnt; 247 nsdata->nsze = num_blocks; 248 nsdata->ncap = num_blocks; 249 nsdata->nuse = num_blocks; 250 nsdata->nlbaf = 0; 251 nsdata->flbas.format = 0; 252 nsdata->flbas.msb_format = 0; 253 nsdata->lbaf[0].lbads = spdk_u32log2(512); 254 } 255 256 struct spdk_nvmf_ns * 257 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 258 { 259 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 260 return subsystem->ns[0]; 261 } 262 263 struct spdk_nvmf_ns * 264 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 265 struct spdk_nvmf_ns *prev_ns) 266 { 267 uint32_t nsid; 268 269 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 270 nsid = prev_ns->nsid; 271 272 if (nsid >= subsystem->max_nsid) { 273 return NULL; 274 } 275 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 276 if (subsystem->ns[nsid - 1]) { 277 return subsystem->ns[nsid - 1]; 278 } 279 } 280 return NULL; 281 } 282 283 bool 284 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 285 { 286 return true; 287 } 288 289 int 290 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 291 struct spdk_bdev_desc *desc, 292 struct spdk_io_channel *ch, 293 struct spdk_nvmf_request *req) 294 { 295 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 296 uint64_t start_lba; 297 uint64_t num_blocks; 298 299 start_lba = from_le64(&req->cmd->nvme_cmd.cdw10); 300 num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1; 301 302 if ((start_lba + num_blocks) > bdev->blockcnt) { 303 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 304 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 305 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 306 } 307 308 if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) { 309 req->zcopy_bdev_io = zcopy_start_bdev_io_write; 310 } else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) { 311 req->zcopy_bdev_io = zcopy_start_bdev_io_read; 312 } else { 313 req->zcopy_bdev_io = zcopy_start_bdev_io_fail; 314 } 315 316 317 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 318 } 319 320 void 321 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 322 { 323 req->zcopy_bdev_io = NULL; 324 spdk_nvmf_request_complete(req); 325 } 326 327 bool 328 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns) 329 { 330 return ns->ptpl_file != NULL; 331 } 332 333 static void 334 test_get_log_page(void) 335 { 336 struct spdk_nvmf_subsystem subsystem = {}; 337 struct spdk_nvmf_request req = {}; 338 struct spdk_nvmf_qpair qpair = {}; 339 struct spdk_nvmf_ctrlr ctrlr = {}; 340 union nvmf_h2c_msg cmd = {}; 341 union nvmf_c2h_msg rsp = {}; 342 char data[4096]; 343 344 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 345 346 ctrlr.subsys = &subsystem; 347 348 qpair.ctrlr = &ctrlr; 349 350 req.qpair = &qpair; 351 req.cmd = &cmd; 352 req.rsp = &rsp; 353 req.length = sizeof(data); 354 SPDK_IOV_ONE(req.iov, &req.iovcnt, &data, req.length); 355 356 /* Get Log Page - all valid */ 357 memset(&cmd, 0, sizeof(cmd)); 358 memset(&rsp, 0, sizeof(rsp)); 359 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 360 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 361 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 362 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 363 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 364 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 365 366 /* Get Log Page with invalid log ID */ 367 memset(&cmd, 0, sizeof(cmd)); 368 memset(&rsp, 0, sizeof(rsp)); 369 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 370 cmd.nvme_cmd.cdw10 = 0; 371 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 372 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 373 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 374 375 /* Get Log Page with invalid offset (not dword aligned) */ 376 memset(&cmd, 0, sizeof(cmd)); 377 memset(&rsp, 0, sizeof(rsp)); 378 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 379 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 380 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 381 cmd.nvme_cmd.cdw12 = 2; 382 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 383 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 384 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 385 386 /* Get Log Page without data buffer */ 387 memset(&cmd, 0, sizeof(cmd)); 388 memset(&rsp, 0, sizeof(rsp)); 389 req.iovcnt = 0; 390 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 391 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 392 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 393 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 394 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 395 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 396 } 397 398 static void 399 test_process_fabrics_cmd(void) 400 { 401 struct spdk_nvmf_request req = {}; 402 bool ret; 403 struct spdk_nvmf_qpair req_qpair = {}; 404 union nvmf_h2c_msg req_cmd = {}; 405 union nvmf_c2h_msg req_rsp = {}; 406 407 TAILQ_INIT(&req_qpair.outstanding); 408 req_qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 409 req.qpair = &req_qpair; 410 req.cmd = &req_cmd; 411 req.rsp = &req_rsp; 412 req.qpair->ctrlr = NULL; 413 414 /* No ctrlr and invalid command check */ 415 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 416 ret = nvmf_check_qpair_active(&req); 417 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 418 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 419 CU_ASSERT(ret == false); 420 } 421 422 static bool 423 nvme_status_success(const struct spdk_nvme_status *status) 424 { 425 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 426 } 427 428 static void 429 test_connect(void) 430 { 431 struct spdk_nvmf_fabric_connect_data connect_data; 432 struct spdk_nvmf_poll_group group; 433 struct spdk_nvmf_subsystem_poll_group *sgroups; 434 struct spdk_nvmf_transport transport; 435 struct spdk_nvmf_transport_ops tops = {}; 436 struct spdk_nvmf_subsystem subsystem; 437 struct spdk_nvmf_ns *ns_arr[1] = { NULL }; 438 struct spdk_nvmf_request req; 439 struct spdk_nvmf_qpair admin_qpair; 440 struct spdk_nvmf_qpair qpair; 441 struct spdk_nvmf_ctrlr ctrlr; 442 struct spdk_nvmf_tgt tgt; 443 union nvmf_h2c_msg cmd; 444 union nvmf_c2h_msg rsp; 445 const uint8_t hostid[16] = { 446 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 447 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 448 }; 449 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 450 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 451 int rc; 452 453 memset(&group, 0, sizeof(group)); 454 group.thread = spdk_get_thread(); 455 456 memset(&ctrlr, 0, sizeof(ctrlr)); 457 ctrlr.subsys = &subsystem; 458 ctrlr.qpair_mask = spdk_bit_array_create(3); 459 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 460 ctrlr.vcprop.cc.bits.en = 1; 461 ctrlr.vcprop.cc.bits.iosqes = 6; 462 ctrlr.vcprop.cc.bits.iocqes = 4; 463 464 memset(&admin_qpair, 0, sizeof(admin_qpair)); 465 admin_qpair.group = &group; 466 admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 467 468 memset(&tgt, 0, sizeof(tgt)); 469 memset(&transport, 0, sizeof(transport)); 470 transport.ops = &tops; 471 transport.opts.max_aq_depth = 32; 472 transport.opts.max_queue_depth = 64; 473 transport.opts.max_qpairs_per_ctrlr = 3; 474 transport.tgt = &tgt; 475 476 memset(&qpair, 0, sizeof(qpair)); 477 qpair.transport = &transport; 478 qpair.group = &group; 479 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 480 TAILQ_INIT(&qpair.outstanding); 481 482 memset(&connect_data, 0, sizeof(connect_data)); 483 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 484 connect_data.cntlid = 0xFFFF; 485 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 486 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 487 488 memset(&subsystem, 0, sizeof(subsystem)); 489 subsystem.thread = spdk_get_thread(); 490 subsystem.id = 1; 491 TAILQ_INIT(&subsystem.ctrlrs); 492 subsystem.tgt = &tgt; 493 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 494 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 495 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 496 subsystem.ns = ns_arr; 497 subsystem.max_nsid = 1; 498 499 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 500 group.sgroups = sgroups; 501 502 memset(&cmd, 0, sizeof(cmd)); 503 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 504 cmd.connect_cmd.cid = 1; 505 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 506 cmd.connect_cmd.recfmt = 0; 507 cmd.connect_cmd.qid = 0; 508 cmd.connect_cmd.sqsize = 31; 509 cmd.connect_cmd.cattr = 0; 510 cmd.connect_cmd.kato = 120000; 511 512 memset(&req, 0, sizeof(req)); 513 req.qpair = &qpair; 514 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 515 req.length = sizeof(connect_data); 516 SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length); 517 req.cmd = &cmd; 518 req.rsp = &rsp; 519 520 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 521 MOCK_SET(spdk_nvmf_poll_group_create, &group); 522 523 /* Valid admin connect command */ 524 memset(&rsp, 0, sizeof(rsp)); 525 sgroups[subsystem.id].mgmt_io_outstanding++; 526 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 527 rc = nvmf_ctrlr_cmd_connect(&req); 528 poll_threads(); 529 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 530 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 531 CU_ASSERT(qpair.ctrlr != NULL); 532 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 533 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 534 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 535 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 536 free(qpair.ctrlr->visible_ns); 537 free(qpair.ctrlr); 538 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 539 qpair.ctrlr = NULL; 540 541 /* Valid admin connect command with kato = 0 */ 542 cmd.connect_cmd.kato = 0; 543 memset(&rsp, 0, sizeof(rsp)); 544 sgroups[subsystem.id].mgmt_io_outstanding++; 545 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 546 rc = nvmf_ctrlr_cmd_connect(&req); 547 poll_threads(); 548 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 549 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 550 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 551 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 552 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 553 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 554 free(qpair.ctrlr->visible_ns); 555 free(qpair.ctrlr); 556 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 557 qpair.ctrlr = NULL; 558 cmd.connect_cmd.kato = 120000; 559 560 /* Invalid data length */ 561 memset(&rsp, 0, sizeof(rsp)); 562 req.length = sizeof(connect_data) - 1; 563 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 564 rc = nvmf_ctrlr_cmd_connect(&req); 565 poll_threads(); 566 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 567 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 568 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 569 CU_ASSERT(qpair.ctrlr == NULL); 570 req.length = sizeof(connect_data); 571 572 /* Invalid recfmt */ 573 memset(&rsp, 0, sizeof(rsp)); 574 cmd.connect_cmd.recfmt = 1234; 575 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 576 rc = nvmf_ctrlr_cmd_connect(&req); 577 poll_threads(); 578 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 579 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 580 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 581 CU_ASSERT(qpair.ctrlr == NULL); 582 cmd.connect_cmd.recfmt = 0; 583 584 /* Subsystem not found */ 585 memset(&rsp, 0, sizeof(rsp)); 586 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 587 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 588 rc = nvmf_ctrlr_cmd_connect(&req); 589 poll_threads(); 590 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 591 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 592 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 593 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 594 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 595 CU_ASSERT(qpair.ctrlr == NULL); 596 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 597 598 /* Unterminated hostnqn */ 599 memset(&rsp, 0, sizeof(rsp)); 600 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 601 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 602 rc = nvmf_ctrlr_cmd_connect(&req); 603 poll_threads(); 604 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 605 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 606 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 607 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 608 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 609 CU_ASSERT(qpair.ctrlr == NULL); 610 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 611 612 /* Host not allowed */ 613 memset(&rsp, 0, sizeof(rsp)); 614 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 615 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 616 rc = nvmf_ctrlr_cmd_connect(&req); 617 poll_threads(); 618 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 619 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 620 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 621 CU_ASSERT(qpair.ctrlr == NULL); 622 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 623 624 /* Invalid sqsize == 0 */ 625 memset(&rsp, 0, sizeof(rsp)); 626 cmd.connect_cmd.sqsize = 0; 627 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 628 rc = nvmf_ctrlr_cmd_connect(&req); 629 poll_threads(); 630 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 631 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 632 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 633 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 634 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 635 CU_ASSERT(qpair.ctrlr == NULL); 636 cmd.connect_cmd.sqsize = 31; 637 638 /* Invalid admin sqsize > max_aq_depth */ 639 memset(&rsp, 0, sizeof(rsp)); 640 cmd.connect_cmd.sqsize = 32; 641 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 642 rc = nvmf_ctrlr_cmd_connect(&req); 643 poll_threads(); 644 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 645 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 646 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 647 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 648 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 649 CU_ASSERT(qpair.ctrlr == NULL); 650 cmd.connect_cmd.sqsize = 31; 651 652 /* Invalid I/O sqsize > max_queue_depth */ 653 memset(&rsp, 0, sizeof(rsp)); 654 cmd.connect_cmd.qid = 1; 655 cmd.connect_cmd.sqsize = 64; 656 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 657 rc = nvmf_ctrlr_cmd_connect(&req); 658 poll_threads(); 659 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 660 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 661 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 662 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 663 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 664 CU_ASSERT(qpair.ctrlr == NULL); 665 cmd.connect_cmd.qid = 0; 666 cmd.connect_cmd.sqsize = 31; 667 668 /* Invalid cntlid for admin queue */ 669 memset(&rsp, 0, sizeof(rsp)); 670 connect_data.cntlid = 0x1234; 671 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 672 rc = nvmf_ctrlr_cmd_connect(&req); 673 poll_threads(); 674 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 675 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 676 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 677 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 678 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 679 CU_ASSERT(qpair.ctrlr == NULL); 680 connect_data.cntlid = 0xFFFF; 681 682 ctrlr.admin_qpair = &admin_qpair; 683 ctrlr.subsys = &subsystem; 684 685 /* Valid I/O queue connect command */ 686 memset(&rsp, 0, sizeof(rsp)); 687 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 688 cmd.connect_cmd.qid = 1; 689 cmd.connect_cmd.sqsize = 63; 690 sgroups[subsystem.id].mgmt_io_outstanding++; 691 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 692 rc = nvmf_ctrlr_cmd_connect(&req); 693 poll_threads(); 694 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 695 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 696 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 697 CU_ASSERT(qpair.ctrlr == &ctrlr); 698 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 699 qpair.ctrlr = NULL; 700 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 701 cmd.connect_cmd.sqsize = 31; 702 703 /* Non-existent controller */ 704 memset(&rsp, 0, sizeof(rsp)); 705 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 706 sgroups[subsystem.id].mgmt_io_outstanding++; 707 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 708 rc = nvmf_ctrlr_cmd_connect(&req); 709 poll_threads(); 710 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 711 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 712 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 713 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 714 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 715 CU_ASSERT(qpair.ctrlr == NULL); 716 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 717 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 718 719 /* I/O connect to discovery controller */ 720 memset(&rsp, 0, sizeof(rsp)); 721 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 722 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 723 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 724 sgroups[subsystem.id].mgmt_io_outstanding++; 725 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 726 rc = nvmf_ctrlr_cmd_connect(&req); 727 poll_threads(); 728 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 729 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 730 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 731 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 732 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 733 CU_ASSERT(qpair.ctrlr == NULL); 734 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 735 736 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 737 cmd.connect_cmd.qid = 0; 738 cmd.connect_cmd.kato = 120000; 739 memset(&rsp, 0, sizeof(rsp)); 740 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 741 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 742 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 743 sgroups[subsystem.id].mgmt_io_outstanding++; 744 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 745 rc = nvmf_ctrlr_cmd_connect(&req); 746 poll_threads(); 747 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 748 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 749 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 750 CU_ASSERT(qpair.ctrlr != NULL); 751 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 752 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 753 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 754 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 755 free(qpair.ctrlr->visible_ns); 756 free(qpair.ctrlr); 757 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 758 qpair.ctrlr = NULL; 759 760 /* I/O connect to discovery controller with keep-alive-timeout == 0. 761 * Then, a fixed timeout value is set to keep-alive-timeout. 762 */ 763 cmd.connect_cmd.kato = 0; 764 memset(&rsp, 0, sizeof(rsp)); 765 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 766 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 767 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 768 sgroups[subsystem.id].mgmt_io_outstanding++; 769 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 770 rc = nvmf_ctrlr_cmd_connect(&req); 771 poll_threads(); 772 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 773 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 774 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 775 CU_ASSERT(qpair.ctrlr != NULL); 776 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 777 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 778 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 779 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 780 free(qpair.ctrlr->visible_ns); 781 free(qpair.ctrlr); 782 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 783 qpair.ctrlr = NULL; 784 cmd.connect_cmd.qid = 1; 785 cmd.connect_cmd.kato = 120000; 786 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 787 MOCK_SET(spdk_nvmf_subsystem_is_discovery, false); 788 789 /* I/O connect to disabled controller */ 790 memset(&rsp, 0, sizeof(rsp)); 791 ctrlr.vcprop.cc.bits.en = 0; 792 sgroups[subsystem.id].mgmt_io_outstanding++; 793 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 794 rc = nvmf_ctrlr_cmd_connect(&req); 795 poll_threads(); 796 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 797 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 798 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 799 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 800 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 801 CU_ASSERT(qpair.ctrlr == NULL); 802 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 803 ctrlr.vcprop.cc.bits.en = 1; 804 805 /* I/O connect with invalid IOSQES */ 806 memset(&rsp, 0, sizeof(rsp)); 807 ctrlr.vcprop.cc.bits.iosqes = 3; 808 sgroups[subsystem.id].mgmt_io_outstanding++; 809 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 810 rc = nvmf_ctrlr_cmd_connect(&req); 811 poll_threads(); 812 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 813 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 814 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 815 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 816 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 817 CU_ASSERT(qpair.ctrlr == NULL); 818 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 819 ctrlr.vcprop.cc.bits.iosqes = 6; 820 821 /* I/O connect with invalid IOCQES */ 822 memset(&rsp, 0, sizeof(rsp)); 823 ctrlr.vcprop.cc.bits.iocqes = 3; 824 sgroups[subsystem.id].mgmt_io_outstanding++; 825 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 826 rc = nvmf_ctrlr_cmd_connect(&req); 827 poll_threads(); 828 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 829 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 830 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 831 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 832 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 833 CU_ASSERT(qpair.ctrlr == NULL); 834 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 835 ctrlr.vcprop.cc.bits.iocqes = 4; 836 837 /* I/O connect with qid that is too large */ 838 memset(&rsp, 0, sizeof(rsp)); 839 cmd.connect_cmd.qid = 3; 840 sgroups[subsystem.id].mgmt_io_outstanding++; 841 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 842 rc = nvmf_ctrlr_cmd_connect(&req); 843 poll_threads(); 844 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 845 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 846 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 847 CU_ASSERT(qpair.ctrlr == NULL); 848 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 849 850 /* I/O connect with duplicate queue ID */ 851 memset(&rsp, 0, sizeof(rsp)); 852 spdk_bit_array_set(ctrlr.qpair_mask, 1); 853 cmd.connect_cmd.qid = 1; 854 sgroups[subsystem.id].mgmt_io_outstanding++; 855 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 856 rc = nvmf_ctrlr_cmd_connect(&req); 857 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 858 poll_threads(); 859 /* First time, it will detect duplicate QID and schedule a retry. So for 860 * now we should expect the response to still be all zeroes. 861 */ 862 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 863 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 864 865 /* Now advance the clock, so that the retry poller executes. */ 866 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 867 poll_threads(); 868 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 869 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 870 CU_ASSERT(qpair.ctrlr == NULL); 871 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 872 873 /* I/O connect with temporarily duplicate queue ID. This covers race 874 * where qpair_mask bit may not yet be cleared, even though initiator 875 * has closed the connection. See issue #2955. */ 876 memset(&rsp, 0, sizeof(rsp)); 877 sgroups[subsystem.id].mgmt_io_outstanding++; 878 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 879 rc = nvmf_ctrlr_cmd_connect(&req); 880 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 881 poll_threads(); 882 /* First time, it will detect duplicate QID and schedule a retry. So for 883 * now we should expect the response to still be all zeroes. 884 */ 885 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 886 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 887 888 /* Now advance the clock, so that the retry poller executes. */ 889 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 890 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 891 poll_threads(); 892 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 893 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 894 CU_ASSERT(qpair.ctrlr == &ctrlr); 895 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 896 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 897 qpair.ctrlr = NULL; 898 899 /* I/O connect when admin qpair is being destroyed */ 900 admin_qpair.group = NULL; 901 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 902 memset(&rsp, 0, sizeof(rsp)); 903 sgroups[subsystem.id].mgmt_io_outstanding++; 904 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 905 rc = nvmf_ctrlr_cmd_connect(&req); 906 poll_threads(); 907 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 908 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 909 CU_ASSERT(qpair.ctrlr == NULL); 910 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 911 admin_qpair.group = &group; 912 admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 913 914 /* I/O connect when admin qpair was destroyed */ 915 ctrlr.admin_qpair = NULL; 916 memset(&rsp, 0, sizeof(rsp)); 917 sgroups[subsystem.id].mgmt_io_outstanding++; 918 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 919 rc = nvmf_ctrlr_cmd_connect(&req); 920 poll_threads(); 921 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 922 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 923 CU_ASSERT(qpair.ctrlr == NULL); 924 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 925 ctrlr.admin_qpair = &admin_qpair; 926 927 /* Clean up globals */ 928 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 929 MOCK_CLEAR(spdk_nvmf_poll_group_create); 930 931 spdk_bit_array_free(&ctrlr.qpair_mask); 932 free(sgroups); 933 } 934 935 static void 936 test_get_ns_id_desc_list(void) 937 { 938 struct spdk_nvmf_subsystem subsystem; 939 struct spdk_nvmf_qpair qpair; 940 struct spdk_nvmf_ctrlr ctrlr; 941 struct spdk_nvmf_request req; 942 struct spdk_nvmf_ns *ns_ptrs[1]; 943 struct spdk_nvmf_ns ns; 944 union nvmf_h2c_msg cmd; 945 union nvmf_c2h_msg rsp; 946 struct spdk_bdev bdev; 947 uint8_t buf[4096]; 948 949 memset(&subsystem, 0, sizeof(subsystem)); 950 ns_ptrs[0] = &ns; 951 subsystem.ns = ns_ptrs; 952 subsystem.max_nsid = 1; 953 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 954 955 memset(&ns, 0, sizeof(ns)); 956 ns.opts.nsid = 1; 957 ns.bdev = &bdev; 958 959 memset(&qpair, 0, sizeof(qpair)); 960 qpair.ctrlr = &ctrlr; 961 962 memset(&ctrlr, 0, sizeof(ctrlr)); 963 ctrlr.subsys = &subsystem; 964 ctrlr.vcprop.cc.bits.en = 1; 965 ctrlr.thread = spdk_get_thread(); 966 ctrlr.visible_ns = spdk_bit_array_create(1); 967 968 memset(&req, 0, sizeof(req)); 969 req.qpair = &qpair; 970 req.cmd = &cmd; 971 req.rsp = &rsp; 972 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 973 req.length = sizeof(buf); 974 SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length); 975 976 memset(&cmd, 0, sizeof(cmd)); 977 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 978 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 979 980 /* Invalid NSID */ 981 cmd.nvme_cmd.nsid = 0; 982 memset(&rsp, 0, sizeof(rsp)); 983 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 984 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 985 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 986 987 /* Valid NSID, but ns is inactive */ 988 spdk_bit_array_clear(ctrlr.visible_ns, 0); 989 cmd.nvme_cmd.nsid = 1; 990 memset(&rsp, 0, sizeof(rsp)); 991 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 992 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 993 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 994 995 /* Valid NSID, but ns has no IDs defined */ 996 spdk_bit_array_set(ctrlr.visible_ns, 0); 997 cmd.nvme_cmd.nsid = 1; 998 memset(&rsp, 0, sizeof(rsp)); 999 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1000 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1001 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1002 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 1003 1004 /* Valid NSID, only EUI64 defined */ 1005 ns.opts.eui64[0] = 0x11; 1006 ns.opts.eui64[7] = 0xFF; 1007 memset(&rsp, 0, sizeof(rsp)); 1008 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1009 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1010 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1011 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1012 CU_ASSERT(buf[1] == 8); 1013 CU_ASSERT(buf[4] == 0x11); 1014 CU_ASSERT(buf[11] == 0xFF); 1015 CU_ASSERT(buf[13] == 0); 1016 1017 /* Valid NSID, only NGUID defined */ 1018 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 1019 ns.opts.nguid[0] = 0x22; 1020 ns.opts.nguid[15] = 0xEE; 1021 memset(&rsp, 0, sizeof(rsp)); 1022 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1023 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1024 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1025 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 1026 CU_ASSERT(buf[1] == 16); 1027 CU_ASSERT(buf[4] == 0x22); 1028 CU_ASSERT(buf[19] == 0xEE); 1029 CU_ASSERT(buf[21] == 0); 1030 1031 /* Valid NSID, both EUI64 and NGUID defined */ 1032 ns.opts.eui64[0] = 0x11; 1033 ns.opts.eui64[7] = 0xFF; 1034 ns.opts.nguid[0] = 0x22; 1035 ns.opts.nguid[15] = 0xEE; 1036 memset(&rsp, 0, sizeof(rsp)); 1037 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1038 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1039 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1040 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1041 CU_ASSERT(buf[1] == 8); 1042 CU_ASSERT(buf[4] == 0x11); 1043 CU_ASSERT(buf[11] == 0xFF); 1044 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 1045 CU_ASSERT(buf[13] == 16); 1046 CU_ASSERT(buf[16] == 0x22); 1047 CU_ASSERT(buf[31] == 0xEE); 1048 CU_ASSERT(buf[33] == 0); 1049 1050 /* Valid NSID, EUI64, NGUID, and UUID defined */ 1051 ns.opts.eui64[0] = 0x11; 1052 ns.opts.eui64[7] = 0xFF; 1053 ns.opts.nguid[0] = 0x22; 1054 ns.opts.nguid[15] = 0xEE; 1055 ns.opts.uuid.u.raw[0] = 0x33; 1056 ns.opts.uuid.u.raw[15] = 0xDD; 1057 memset(&rsp, 0, sizeof(rsp)); 1058 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1059 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1060 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1061 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1062 CU_ASSERT(buf[1] == 8); 1063 CU_ASSERT(buf[4] == 0x11); 1064 CU_ASSERT(buf[11] == 0xFF); 1065 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 1066 CU_ASSERT(buf[13] == 16); 1067 CU_ASSERT(buf[16] == 0x22); 1068 CU_ASSERT(buf[31] == 0xEE); 1069 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 1070 CU_ASSERT(buf[33] == 16); 1071 CU_ASSERT(buf[36] == 0x33); 1072 CU_ASSERT(buf[51] == 0xDD); 1073 CU_ASSERT(buf[53] == 0); 1074 1075 spdk_bit_array_free(&ctrlr.visible_ns); 1076 } 1077 1078 static void 1079 test_identify_ns(void) 1080 { 1081 struct spdk_nvmf_subsystem subsystem = {}; 1082 struct spdk_nvmf_transport transport = {}; 1083 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1084 struct spdk_nvmf_ctrlr ctrlr = { 1085 .subsys = &subsystem, 1086 .admin_qpair = &admin_qpair, 1087 }; 1088 struct spdk_nvme_cmd cmd = {}; 1089 struct spdk_nvme_cpl rsp = {}; 1090 struct spdk_nvme_ns_data nsdata = {}; 1091 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 1092 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 1093 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1094 1095 ctrlr.visible_ns = spdk_bit_array_create(3); 1096 spdk_bit_array_set(ctrlr.visible_ns, 0); 1097 spdk_bit_array_set(ctrlr.visible_ns, 2); 1098 1099 subsystem.ns = ns_arr; 1100 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1101 1102 /* Invalid NSID 0 */ 1103 cmd.nsid = 0; 1104 memset(&nsdata, 0, sizeof(nsdata)); 1105 memset(&rsp, 0, sizeof(rsp)); 1106 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1107 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1108 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1109 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1110 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1111 1112 /* Valid NSID 1 */ 1113 cmd.nsid = 1; 1114 memset(&nsdata, 0, sizeof(nsdata)); 1115 memset(&rsp, 0, sizeof(rsp)); 1116 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1117 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1118 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1119 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1120 CU_ASSERT(nsdata.nsze == 1234); 1121 1122 /* Valid but inactive NSID 1 */ 1123 spdk_bit_array_clear(ctrlr.visible_ns, 0); 1124 cmd.nsid = 1; 1125 memset(&nsdata, 0, sizeof(nsdata)); 1126 memset(&rsp, 0, sizeof(rsp)); 1127 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1128 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1129 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1130 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1131 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1132 1133 /* Valid but unallocated NSID 2 */ 1134 cmd.nsid = 2; 1135 memset(&nsdata, 0, sizeof(nsdata)); 1136 memset(&rsp, 0, sizeof(rsp)); 1137 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1138 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1139 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1140 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1141 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1142 1143 /* Valid NSID 3 */ 1144 cmd.nsid = 3; 1145 memset(&nsdata, 0, sizeof(nsdata)); 1146 memset(&rsp, 0, sizeof(rsp)); 1147 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1148 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1149 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1150 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1151 CU_ASSERT(nsdata.nsze == 5678); 1152 1153 /* Invalid NSID 4 */ 1154 cmd.nsid = 4; 1155 memset(&nsdata, 0, sizeof(nsdata)); 1156 memset(&rsp, 0, sizeof(rsp)); 1157 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1158 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1159 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1160 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1161 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1162 1163 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 1164 cmd.nsid = 0xFFFFFFFF; 1165 memset(&nsdata, 0, sizeof(nsdata)); 1166 memset(&rsp, 0, sizeof(rsp)); 1167 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1168 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1169 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1170 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1171 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1172 1173 spdk_bit_array_free(&ctrlr.visible_ns); 1174 } 1175 1176 static void 1177 test_identify_ns_iocs_specific(void) 1178 { 1179 struct spdk_nvmf_subsystem subsystem = {}; 1180 struct spdk_nvmf_transport transport = {}; 1181 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport }; 1182 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1183 struct spdk_nvme_cmd cmd = {}; 1184 struct spdk_nvme_cpl rsp = {}; 1185 struct spdk_nvme_zns_ns_data nsdata = {}; 1186 struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}}; 1187 struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}}; 1188 struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]}; 1189 1190 ctrlr.visible_ns = spdk_bit_array_create(3); 1191 spdk_bit_array_set(ctrlr.visible_ns, 0); 1192 spdk_bit_array_set(ctrlr.visible_ns, 1); 1193 spdk_bit_array_set(ctrlr.visible_ns, 2); 1194 subsystem.ns = ns_arr; 1195 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1196 1197 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1198 1199 /* Invalid ZNS NSID 0 */ 1200 cmd.nsid = 0; 1201 memset(&nsdata, 0xFF, sizeof(nsdata)); 1202 memset(&rsp, 0, sizeof(rsp)); 1203 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1204 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1205 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1206 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1207 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1208 1209 /* Valid ZNS NSID 1 */ 1210 cmd.nsid = 1; 1211 memset(&nsdata, 0xFF, sizeof(nsdata)); 1212 memset(&rsp, 0, sizeof(rsp)); 1213 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1214 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1215 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1216 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1217 CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1); 1218 CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1); 1219 CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1); 1220 CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE); 1221 nsdata.ozcs.read_across_zone_boundaries = 0; 1222 nsdata.mar = 0; 1223 nsdata.mor = 0; 1224 nsdata.lbafe[0].zsze = 0; 1225 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1226 1227 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1228 1229 /* Valid NVM NSID 2 */ 1230 cmd.nsid = 2; 1231 memset(&nsdata, 0xFF, sizeof(nsdata)); 1232 memset(&rsp, 0, sizeof(rsp)); 1233 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1234 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1235 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1236 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1237 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1238 1239 /* Invalid NVM NSID 3 */ 1240 cmd.nsid = 0; 1241 memset(&nsdata, 0xFF, sizeof(nsdata)); 1242 memset(&rsp, 0, sizeof(rsp)); 1243 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1244 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1245 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1246 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1247 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1248 1249 spdk_bit_array_free(&ctrlr.visible_ns); 1250 } 1251 1252 static void 1253 test_set_get_features(void) 1254 { 1255 struct spdk_nvmf_subsystem subsystem = {}; 1256 struct spdk_nvmf_qpair admin_qpair = {}; 1257 enum spdk_nvme_ana_state ana_state[3]; 1258 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1259 struct spdk_nvmf_ctrlr ctrlr = { 1260 .subsys = &subsystem, 1261 .admin_qpair = &admin_qpair, 1262 .listener = &listener 1263 }; 1264 union nvmf_h2c_msg cmd = {}; 1265 union nvmf_c2h_msg rsp = {}; 1266 struct spdk_nvmf_ns ns[3]; 1267 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1268 struct spdk_nvmf_request req; 1269 int rc; 1270 1271 ctrlr.visible_ns = spdk_bit_array_create(3); 1272 spdk_bit_array_set(ctrlr.visible_ns, 0); 1273 spdk_bit_array_set(ctrlr.visible_ns, 2); 1274 ns[0].anagrpid = 1; 1275 ns[2].anagrpid = 3; 1276 subsystem.ns = ns_arr; 1277 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1278 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1279 listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1280 admin_qpair.ctrlr = &ctrlr; 1281 req.qpair = &admin_qpair; 1282 cmd.nvme_cmd.nsid = 1; 1283 req.cmd = &cmd; 1284 req.rsp = &rsp; 1285 1286 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1287 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1288 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1289 ns[0].ptpl_file = "testcfg"; 1290 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1291 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1292 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1293 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1294 CU_ASSERT(ns[0].ptpl_activated == true); 1295 1296 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1297 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1298 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1299 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1300 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1301 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1302 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1303 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1304 1305 1306 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1307 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1308 cmd.nvme_cmd.cdw11 = 0x42; 1309 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1310 1311 rc = nvmf_ctrlr_get_features(&req); 1312 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1313 1314 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1315 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1316 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1317 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1318 1319 rc = nvmf_ctrlr_get_features(&req); 1320 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1321 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1322 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1323 1324 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1325 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1326 cmd.nvme_cmd.cdw11 = 0x42; 1327 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1328 1329 rc = nvmf_ctrlr_set_features(&req); 1330 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1331 1332 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1333 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1334 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1335 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1336 1337 rc = nvmf_ctrlr_set_features(&req); 1338 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1339 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1340 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1341 1342 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1343 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1344 cmd.nvme_cmd.cdw11 = 0x42; 1345 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1346 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1347 1348 rc = nvmf_ctrlr_set_features(&req); 1349 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1350 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1351 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1352 1353 1354 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1355 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1356 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1357 1358 rc = nvmf_ctrlr_get_features(&req); 1359 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1360 1361 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1362 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1363 cmd.nvme_cmd.cdw11 = 0x42; 1364 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1365 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1366 1367 rc = nvmf_ctrlr_set_features(&req); 1368 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1369 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1370 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1371 1372 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1373 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1374 cmd.nvme_cmd.cdw11 = 0x42; 1375 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1376 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1377 1378 rc = nvmf_ctrlr_set_features(&req); 1379 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1380 1381 spdk_bit_array_free(&ctrlr.visible_ns); 1382 } 1383 1384 /* 1385 * Reservation Unit Test Configuration 1386 * -------- -------- -------- 1387 * | Host A | | Host B | | Host C | 1388 * -------- -------- -------- 1389 * / \ | | 1390 * -------- -------- ------- ------- 1391 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1392 * -------- -------- ------- ------- 1393 * \ \ / / 1394 * \ \ / / 1395 * \ \ / / 1396 * -------------------------------------- 1397 * | NAMESPACE 1 | 1398 * -------------------------------------- 1399 */ 1400 1401 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1402 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1403 1404 static void 1405 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1406 { 1407 /* Host A has two controllers */ 1408 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1409 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1410 1411 /* Host B has 1 controller */ 1412 spdk_uuid_generate(&g_ctrlr_B.hostid); 1413 1414 /* Host C has 1 controller */ 1415 spdk_uuid_generate(&g_ctrlr_C.hostid); 1416 1417 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1418 g_ns_info.rtype = rtype; 1419 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1420 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1421 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1422 } 1423 1424 static void 1425 test_reservation_write_exclusive(void) 1426 { 1427 struct spdk_nvmf_request req = {}; 1428 union nvmf_h2c_msg cmd = {}; 1429 union nvmf_c2h_msg rsp = {}; 1430 int rc; 1431 1432 req.cmd = &cmd; 1433 req.rsp = &rsp; 1434 1435 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1436 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1437 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1438 1439 /* Test Case: Issue a Read command from Host A and Host B */ 1440 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1441 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1442 SPDK_CU_ASSERT_FATAL(rc == 0); 1443 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1444 SPDK_CU_ASSERT_FATAL(rc == 0); 1445 1446 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1447 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1448 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1449 SPDK_CU_ASSERT_FATAL(rc == 0); 1450 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1451 SPDK_CU_ASSERT_FATAL(rc < 0); 1452 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1453 1454 /* Test Case: Issue a Write command from Host C */ 1455 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1456 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1457 SPDK_CU_ASSERT_FATAL(rc < 0); 1458 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1459 1460 /* Test Case: Issue a Read command from Host B */ 1461 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1462 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1463 SPDK_CU_ASSERT_FATAL(rc == 0); 1464 1465 /* Unregister Host C */ 1466 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1467 1468 /* Test Case: Read and Write commands from non-registrant Host C */ 1469 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1470 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1471 SPDK_CU_ASSERT_FATAL(rc < 0); 1472 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1473 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1474 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1475 SPDK_CU_ASSERT_FATAL(rc == 0); 1476 } 1477 1478 static void 1479 test_reservation_exclusive_access(void) 1480 { 1481 struct spdk_nvmf_request req = {}; 1482 union nvmf_h2c_msg cmd = {}; 1483 union nvmf_c2h_msg rsp = {}; 1484 int rc; 1485 1486 req.cmd = &cmd; 1487 req.rsp = &rsp; 1488 1489 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1490 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1491 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1492 1493 /* Test Case: Issue a Read command from Host B */ 1494 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1495 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1496 SPDK_CU_ASSERT_FATAL(rc < 0); 1497 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1498 1499 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1500 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1501 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1502 SPDK_CU_ASSERT_FATAL(rc == 0); 1503 } 1504 1505 static void 1506 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1507 { 1508 struct spdk_nvmf_request req = {}; 1509 union nvmf_h2c_msg cmd = {}; 1510 union nvmf_c2h_msg rsp = {}; 1511 int rc; 1512 1513 req.cmd = &cmd; 1514 req.rsp = &rsp; 1515 1516 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1517 ut_reservation_init(rtype); 1518 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1519 1520 /* Test Case: Issue a Read command from Host A and Host C */ 1521 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1522 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1523 SPDK_CU_ASSERT_FATAL(rc == 0); 1524 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1525 SPDK_CU_ASSERT_FATAL(rc == 0); 1526 1527 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1528 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1529 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1530 SPDK_CU_ASSERT_FATAL(rc == 0); 1531 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1532 SPDK_CU_ASSERT_FATAL(rc == 0); 1533 1534 /* Unregister Host C */ 1535 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1536 1537 /* Test Case: Read and Write commands from non-registrant Host C */ 1538 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1539 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1540 SPDK_CU_ASSERT_FATAL(rc == 0); 1541 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1542 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1543 SPDK_CU_ASSERT_FATAL(rc < 0); 1544 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1545 } 1546 1547 static void 1548 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1549 { 1550 _test_reservation_write_exclusive_regs_only_and_all_regs( 1551 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1552 _test_reservation_write_exclusive_regs_only_and_all_regs( 1553 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1554 } 1555 1556 static void 1557 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1558 { 1559 struct spdk_nvmf_request req = {}; 1560 union nvmf_h2c_msg cmd = {}; 1561 union nvmf_c2h_msg rsp = {}; 1562 int rc; 1563 1564 req.cmd = &cmd; 1565 req.rsp = &rsp; 1566 1567 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1568 ut_reservation_init(rtype); 1569 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1570 1571 /* Test Case: Issue a Write command from Host B */ 1572 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1573 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1574 SPDK_CU_ASSERT_FATAL(rc == 0); 1575 1576 /* Unregister Host B */ 1577 spdk_uuid_set_null(&g_ns_info.reg_hostid[1]); 1578 1579 /* Test Case: Issue a Read command from Host B */ 1580 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1581 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1582 SPDK_CU_ASSERT_FATAL(rc < 0); 1583 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1584 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1585 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1586 SPDK_CU_ASSERT_FATAL(rc < 0); 1587 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1588 } 1589 1590 static void 1591 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1592 { 1593 _test_reservation_exclusive_access_regs_only_and_all_regs( 1594 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1595 _test_reservation_exclusive_access_regs_only_and_all_regs( 1596 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1597 } 1598 1599 static void 1600 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1601 { 1602 STAILQ_INIT(&ctrlr->async_events); 1603 } 1604 1605 static void 1606 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1607 { 1608 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1609 1610 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1611 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1612 free(event); 1613 } 1614 } 1615 1616 static int 1617 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1618 { 1619 int num = 0; 1620 struct spdk_nvmf_async_event_completion *event; 1621 1622 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1623 num++; 1624 } 1625 return num; 1626 } 1627 1628 static void 1629 test_reservation_notification_log_page(void) 1630 { 1631 struct spdk_nvmf_ctrlr ctrlr; 1632 struct spdk_nvmf_qpair qpair; 1633 struct spdk_nvmf_ns ns; 1634 struct spdk_nvmf_request req = {}; 1635 union nvmf_h2c_msg cmd = {}; 1636 union nvmf_c2h_msg rsp = {}; 1637 union spdk_nvme_async_event_completion event = {}; 1638 struct spdk_nvme_reservation_notification_log logs[3]; 1639 struct iovec iov; 1640 1641 memset(&ctrlr, 0, sizeof(ctrlr)); 1642 ctrlr.thread = spdk_get_thread(); 1643 TAILQ_INIT(&ctrlr.log_head); 1644 init_pending_async_events(&ctrlr); 1645 ns.nsid = 1; 1646 1647 /* Test Case: Mask all the reservation notifications */ 1648 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1649 SPDK_NVME_RESERVATION_RELEASED_MASK | 1650 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1651 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1652 SPDK_NVME_REGISTRATION_PREEMPTED); 1653 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1654 SPDK_NVME_RESERVATION_RELEASED); 1655 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1656 SPDK_NVME_RESERVATION_PREEMPTED); 1657 poll_threads(); 1658 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1659 1660 /* Test Case: Unmask all the reservation notifications, 1661 * 3 log pages are generated, and AER was triggered. 1662 */ 1663 ns.mask = 0; 1664 ctrlr.num_avail_log_pages = 0; 1665 req.cmd = &cmd; 1666 req.rsp = &rsp; 1667 ctrlr.aer_req[0] = &req; 1668 ctrlr.nr_aer_reqs = 1; 1669 req.qpair = &qpair; 1670 TAILQ_INIT(&qpair.outstanding); 1671 qpair.ctrlr = NULL; 1672 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1673 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1674 1675 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1676 SPDK_NVME_REGISTRATION_PREEMPTED); 1677 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1678 SPDK_NVME_RESERVATION_RELEASED); 1679 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1680 SPDK_NVME_RESERVATION_PREEMPTED); 1681 poll_threads(); 1682 event.raw = rsp.nvme_cpl.cdw0; 1683 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1684 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1685 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1686 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1687 1688 /* Test Case: Get Log Page to clear the log pages */ 1689 iov.iov_base = &logs[0]; 1690 iov.iov_len = sizeof(logs); 1691 nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0); 1692 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1693 1694 cleanup_pending_async_events(&ctrlr); 1695 } 1696 1697 static void 1698 test_get_dif_ctx(void) 1699 { 1700 struct spdk_nvmf_subsystem subsystem = {}; 1701 struct spdk_nvmf_request req = {}; 1702 struct spdk_nvmf_qpair qpair = {}; 1703 struct spdk_nvmf_ctrlr ctrlr = {}; 1704 struct spdk_nvmf_ns ns = {}; 1705 struct spdk_nvmf_ns *_ns = NULL; 1706 struct spdk_bdev bdev = {}; 1707 union nvmf_h2c_msg cmd = {}; 1708 struct spdk_dif_ctx dif_ctx = {}; 1709 bool ret; 1710 1711 ctrlr.subsys = &subsystem; 1712 ctrlr.visible_ns = spdk_bit_array_create(1); 1713 spdk_bit_array_set(ctrlr.visible_ns, 0); 1714 1715 qpair.ctrlr = &ctrlr; 1716 1717 req.qpair = &qpair; 1718 req.cmd = &cmd; 1719 1720 ns.bdev = &bdev; 1721 1722 ctrlr.dif_insert_or_strip = false; 1723 1724 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1725 CU_ASSERT(ret == false); 1726 1727 ctrlr.dif_insert_or_strip = true; 1728 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1729 1730 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1731 CU_ASSERT(ret == false); 1732 1733 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1734 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1735 1736 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1737 CU_ASSERT(ret == false); 1738 1739 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1740 1741 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1742 CU_ASSERT(ret == false); 1743 1744 qpair.qid = 1; 1745 1746 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1747 CU_ASSERT(ret == false); 1748 1749 cmd.nvme_cmd.nsid = 1; 1750 1751 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1752 CU_ASSERT(ret == false); 1753 1754 subsystem.max_nsid = 1; 1755 subsystem.ns = &_ns; 1756 subsystem.ns[0] = &ns; 1757 1758 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1759 CU_ASSERT(ret == false); 1760 1761 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1762 1763 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1764 CU_ASSERT(ret == true); 1765 1766 spdk_bit_array_free(&ctrlr.visible_ns); 1767 } 1768 1769 static void 1770 test_identify_ctrlr(void) 1771 { 1772 struct spdk_nvmf_tgt tgt = {}; 1773 struct spdk_nvmf_subsystem subsystem = { 1774 .subtype = SPDK_NVMF_SUBTYPE_NVME, 1775 .tgt = &tgt, 1776 }; 1777 struct spdk_nvmf_transport_ops tops = {}; 1778 struct spdk_nvmf_transport transport = { 1779 .ops = &tops, 1780 .opts = { 1781 .in_capsule_data_size = 4096, 1782 }, 1783 }; 1784 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1785 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1786 struct spdk_nvme_ctrlr_data cdata = {}; 1787 uint32_t expected_ioccsz; 1788 1789 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1790 1791 /* Check ioccsz, TCP transport */ 1792 tops.type = SPDK_NVME_TRANSPORT_TCP; 1793 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1794 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1795 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1796 1797 /* Check ioccsz, RDMA transport */ 1798 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1799 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1800 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1801 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1802 1803 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1804 tops.type = SPDK_NVME_TRANSPORT_TCP; 1805 ctrlr.dif_insert_or_strip = true; 1806 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1807 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1808 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1809 } 1810 1811 static void 1812 test_identify_ctrlr_iocs_specific(void) 1813 { 1814 struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 }; 1815 struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 }; 1816 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop }; 1817 struct spdk_nvme_cmd cmd = {}; 1818 struct spdk_nvme_cpl rsp = {}; 1819 struct spdk_nvme_zns_ctrlr_data ctrlr_data = {}; 1820 struct spdk_nvme_nvm_ctrlr_data cdata_nvm = {}; 1821 1822 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1823 1824 /* ZNS max_zone_append_size_kib no limit */ 1825 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1826 memset(&rsp, 0, sizeof(rsp)); 1827 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1828 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1829 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1830 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1831 CU_ASSERT(ctrlr_data.zasl == 0); 1832 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1833 1834 /* ZNS max_zone_append_size_kib = 4096 */ 1835 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1836 memset(&rsp, 0, sizeof(rsp)); 1837 subsystem.max_zone_append_size_kib = 4096; 1838 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1839 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1840 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1841 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1842 CU_ASSERT(ctrlr_data.zasl == 0); 1843 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1844 1845 /* ZNS max_zone_append_size_kib = 60000 */ 1846 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1847 memset(&rsp, 0, sizeof(rsp)); 1848 subsystem.max_zone_append_size_kib = 60000; 1849 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1850 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1851 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1852 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1853 CU_ASSERT(ctrlr_data.zasl == 3); 1854 ctrlr_data.zasl = 0; 1855 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1856 1857 /* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */ 1858 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1859 memset(&rsp, 0, sizeof(rsp)); 1860 ctrlr.vcprop.cap.bits.mpsmin = 2; 1861 subsystem.max_zone_append_size_kib = 60000; 1862 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1863 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1864 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1865 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1866 CU_ASSERT(ctrlr_data.zasl == 1); 1867 ctrlr_data.zasl = 0; 1868 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1869 ctrlr.vcprop.cap.bits.mpsmin = 0; 1870 1871 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1872 1873 /* NVM max_discard_size_kib = 1024; 1874 * max_write_zeroes_size_kib = 1024; 1875 * mpsmin = 0; 1876 */ 1877 memset(&cdata_nvm, 0xFF, sizeof(cdata_nvm)); 1878 memset(&rsp, 0, sizeof(rsp)); 1879 subsystem.max_discard_size_kib = (uint64_t)1024; 1880 subsystem.max_write_zeroes_size_kib = (uint64_t)1024; 1881 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1882 &cdata_nvm, sizeof(cdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1883 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1884 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1885 CU_ASSERT(cdata_nvm.wzsl == 8); 1886 CU_ASSERT(cdata_nvm.dmrsl == 2048); 1887 CU_ASSERT(cdata_nvm.dmrl == 1); 1888 } 1889 1890 static int 1891 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1892 { 1893 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1894 1895 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1896 }; 1897 1898 static void 1899 test_custom_admin_cmd(void) 1900 { 1901 struct spdk_nvmf_subsystem subsystem; 1902 struct spdk_nvmf_qpair qpair; 1903 struct spdk_nvmf_ctrlr ctrlr; 1904 struct spdk_nvmf_request req; 1905 struct spdk_nvmf_ns *ns_ptrs[1]; 1906 struct spdk_nvmf_ns ns; 1907 union nvmf_h2c_msg cmd; 1908 union nvmf_c2h_msg rsp; 1909 struct spdk_bdev bdev; 1910 uint8_t buf[4096]; 1911 int rc; 1912 1913 memset(&subsystem, 0, sizeof(subsystem)); 1914 ns_ptrs[0] = &ns; 1915 subsystem.ns = ns_ptrs; 1916 subsystem.max_nsid = 1; 1917 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1918 1919 memset(&ns, 0, sizeof(ns)); 1920 ns.opts.nsid = 1; 1921 ns.bdev = &bdev; 1922 1923 memset(&qpair, 0, sizeof(qpair)); 1924 qpair.ctrlr = &ctrlr; 1925 1926 memset(&ctrlr, 0, sizeof(ctrlr)); 1927 ctrlr.subsys = &subsystem; 1928 ctrlr.vcprop.cc.bits.en = 1; 1929 ctrlr.thread = spdk_get_thread(); 1930 1931 memset(&req, 0, sizeof(req)); 1932 req.qpair = &qpair; 1933 req.cmd = &cmd; 1934 req.rsp = &rsp; 1935 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1936 req.length = sizeof(buf); 1937 SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length); 1938 1939 memset(&cmd, 0, sizeof(cmd)); 1940 cmd.nvme_cmd.opc = 0xc1; 1941 cmd.nvme_cmd.nsid = 0; 1942 memset(&rsp, 0, sizeof(rsp)); 1943 1944 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1945 1946 /* Ensure that our hdlr is being called */ 1947 rc = nvmf_ctrlr_process_admin_cmd(&req); 1948 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1949 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1950 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1951 } 1952 1953 static void 1954 test_fused_compare_and_write(void) 1955 { 1956 struct spdk_nvmf_request req = {}; 1957 struct spdk_nvmf_qpair qpair = {}; 1958 struct spdk_nvme_cmd cmd = {}; 1959 union nvmf_c2h_msg rsp = {}; 1960 struct spdk_nvmf_ctrlr ctrlr = {}; 1961 struct spdk_nvmf_subsystem subsystem = {}; 1962 struct spdk_nvmf_ns ns = {}; 1963 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1964 enum spdk_nvme_ana_state ana_state[1]; 1965 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1966 struct spdk_bdev bdev = {}; 1967 1968 struct spdk_nvmf_poll_group group = {}; 1969 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1970 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1971 struct spdk_io_channel io_ch = {}; 1972 1973 ns.bdev = &bdev; 1974 ns.anagrpid = 1; 1975 1976 subsystem.id = 0; 1977 subsystem.max_nsid = 1; 1978 subsys_ns[0] = &ns; 1979 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1980 1981 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1982 1983 /* Enable controller */ 1984 ctrlr.vcprop.cc.bits.en = 1; 1985 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1986 ctrlr.listener = &listener; 1987 ctrlr.visible_ns = spdk_bit_array_create(1); 1988 spdk_bit_array_set(ctrlr.visible_ns, 0); 1989 1990 group.num_sgroups = 1; 1991 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1992 sgroups.num_ns = 1; 1993 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1994 ns_info.channel = &io_ch; 1995 sgroups.ns_info = &ns_info; 1996 TAILQ_INIT(&sgroups.queued); 1997 group.sgroups = &sgroups; 1998 TAILQ_INIT(&qpair.outstanding); 1999 2000 qpair.ctrlr = &ctrlr; 2001 qpair.group = &group; 2002 qpair.qid = 1; 2003 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2004 2005 cmd.nsid = 1; 2006 2007 req.qpair = &qpair; 2008 req.cmd = (union nvmf_h2c_msg *)&cmd; 2009 req.rsp = &rsp; 2010 2011 /* SUCCESS/SUCCESS */ 2012 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2013 cmd.opc = SPDK_NVME_OPC_COMPARE; 2014 2015 spdk_nvmf_request_exec(&req); 2016 CU_ASSERT(qpair.first_fused_req != NULL); 2017 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2018 2019 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2020 cmd.opc = SPDK_NVME_OPC_WRITE; 2021 2022 spdk_nvmf_request_exec(&req); 2023 CU_ASSERT(qpair.first_fused_req == NULL); 2024 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2025 2026 /* Wrong sequence */ 2027 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2028 cmd.opc = SPDK_NVME_OPC_WRITE; 2029 2030 spdk_nvmf_request_exec(&req); 2031 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 2032 CU_ASSERT(qpair.first_fused_req == NULL); 2033 2034 /* Write as FUSE_FIRST (Wrong op code) */ 2035 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2036 cmd.opc = SPDK_NVME_OPC_WRITE; 2037 2038 spdk_nvmf_request_exec(&req); 2039 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 2040 CU_ASSERT(qpair.first_fused_req == NULL); 2041 2042 /* Compare as FUSE_SECOND (Wrong op code) */ 2043 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2044 cmd.opc = SPDK_NVME_OPC_COMPARE; 2045 2046 spdk_nvmf_request_exec(&req); 2047 CU_ASSERT(qpair.first_fused_req != NULL); 2048 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2049 2050 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2051 cmd.opc = SPDK_NVME_OPC_COMPARE; 2052 2053 spdk_nvmf_request_exec(&req); 2054 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 2055 CU_ASSERT(qpair.first_fused_req == NULL); 2056 2057 spdk_bit_array_free(&ctrlr.visible_ns); 2058 } 2059 2060 static void 2061 test_multi_async_event_reqs(void) 2062 { 2063 struct spdk_nvmf_subsystem subsystem = {}; 2064 struct spdk_nvmf_qpair qpair = {}; 2065 struct spdk_nvmf_ctrlr ctrlr = {}; 2066 struct spdk_nvmf_request req[5] = {}; 2067 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2068 struct spdk_nvmf_ns ns = {}; 2069 union nvmf_h2c_msg cmd[5] = {}; 2070 union nvmf_c2h_msg rsp[5] = {}; 2071 2072 struct spdk_nvmf_poll_group group = {}; 2073 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2074 2075 int i; 2076 2077 ns_ptrs[0] = &ns; 2078 subsystem.ns = ns_ptrs; 2079 subsystem.max_nsid = 1; 2080 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2081 2082 ns.opts.nsid = 1; 2083 group.sgroups = &sgroups; 2084 2085 qpair.ctrlr = &ctrlr; 2086 qpair.group = &group; 2087 TAILQ_INIT(&qpair.outstanding); 2088 2089 ctrlr.subsys = &subsystem; 2090 ctrlr.vcprop.cc.bits.en = 1; 2091 ctrlr.thread = spdk_get_thread(); 2092 2093 for (i = 0; i < 5; i++) { 2094 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2095 cmd[i].nvme_cmd.nsid = 1; 2096 cmd[i].nvme_cmd.cid = i; 2097 2098 req[i].qpair = &qpair; 2099 req[i].cmd = &cmd[i]; 2100 req[i].rsp = &rsp[i]; 2101 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2102 } 2103 2104 /* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */ 2105 sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS; 2106 for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) { 2107 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2108 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 2109 } 2110 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2111 2112 /* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */ 2113 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2114 CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS); 2115 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 2116 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 2117 2118 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 2119 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 2120 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2121 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2122 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 2123 2124 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 2125 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2126 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2127 CU_ASSERT(ctrlr.aer_req[2] == NULL); 2128 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 2129 2130 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 2131 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 2132 } 2133 2134 static void 2135 test_get_ana_log_page_one_ns_per_anagrp(void) 2136 { 2137 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 2138 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 2139 uint32_t ana_group[3]; 2140 struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group }; 2141 struct spdk_nvmf_ctrlr ctrlr = {}; 2142 enum spdk_nvme_ana_state ana_state[3]; 2143 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2144 struct spdk_nvmf_ns ns[3]; 2145 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 2146 uint64_t offset; 2147 uint32_t length; 2148 int i; 2149 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2150 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2151 struct iovec iov, iovs[2]; 2152 struct spdk_nvme_ana_page *ana_hdr; 2153 char _ana_desc[UT_ANA_DESC_SIZE]; 2154 struct spdk_nvme_ana_group_descriptor *ana_desc; 2155 2156 subsystem.ns = ns_arr; 2157 subsystem.max_nsid = 3; 2158 for (i = 0; i < 3; i++) { 2159 subsystem.ana_group[i] = 1; 2160 } 2161 ctrlr.subsys = &subsystem; 2162 ctrlr.listener = &listener; 2163 2164 for (i = 0; i < 3; i++) { 2165 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2166 } 2167 2168 for (i = 0; i < 3; i++) { 2169 ns_arr[i]->nsid = i + 1; 2170 ns_arr[i]->anagrpid = i + 1; 2171 } 2172 2173 /* create expected page */ 2174 ana_hdr = (void *)&expected_page[0]; 2175 ana_hdr->num_ana_group_desc = 3; 2176 ana_hdr->change_count = 0; 2177 2178 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2179 ana_desc = (void *)_ana_desc; 2180 offset = sizeof(struct spdk_nvme_ana_page); 2181 2182 for (i = 0; i < 3; i++) { 2183 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 2184 ana_desc->ana_group_id = ns_arr[i]->nsid; 2185 ana_desc->num_of_nsid = 1; 2186 ana_desc->change_count = 0; 2187 ana_desc->ana_state = ctrlr.listener->ana_state[i]; 2188 ana_desc->nsid[0] = ns_arr[i]->nsid; 2189 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 2190 offset += UT_ANA_DESC_SIZE; 2191 } 2192 2193 /* read entire actual log page */ 2194 offset = 0; 2195 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2196 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2197 iov.iov_base = &actual_page[offset]; 2198 iov.iov_len = length; 2199 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2200 offset += length; 2201 } 2202 2203 /* compare expected page and actual page */ 2204 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2205 2206 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2207 offset = 0; 2208 iovs[0].iov_base = &actual_page[offset]; 2209 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2210 offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2211 iovs[1].iov_base = &actual_page[offset]; 2212 iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset; 2213 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2214 2215 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2216 2217 #undef UT_ANA_DESC_SIZE 2218 #undef UT_ANA_LOG_PAGE_SIZE 2219 } 2220 2221 static void 2222 test_get_ana_log_page_multi_ns_per_anagrp(void) 2223 { 2224 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + \ 2225 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 + \ 2226 sizeof(uint32_t) * 5) 2227 struct spdk_nvmf_ns ns[5]; 2228 struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]}; 2229 uint32_t ana_group[5] = {0}; 2230 struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, }; 2231 enum spdk_nvme_ana_state ana_state[5]; 2232 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, }; 2233 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, }; 2234 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2235 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2236 struct iovec iov, iovs[2]; 2237 struct spdk_nvme_ana_page *ana_hdr; 2238 char _ana_desc[UT_ANA_LOG_PAGE_SIZE]; 2239 struct spdk_nvme_ana_group_descriptor *ana_desc; 2240 uint64_t offset; 2241 uint32_t length; 2242 int i; 2243 2244 subsystem.max_nsid = 5; 2245 subsystem.ana_group[1] = 3; 2246 subsystem.ana_group[2] = 2; 2247 for (i = 0; i < 5; i++) { 2248 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2249 } 2250 2251 for (i = 0; i < 5; i++) { 2252 ns_arr[i]->nsid = i + 1; 2253 } 2254 ns_arr[0]->anagrpid = 2; 2255 ns_arr[1]->anagrpid = 3; 2256 ns_arr[2]->anagrpid = 2; 2257 ns_arr[3]->anagrpid = 3; 2258 ns_arr[4]->anagrpid = 2; 2259 2260 /* create expected page */ 2261 ana_hdr = (void *)&expected_page[0]; 2262 ana_hdr->num_ana_group_desc = 2; 2263 ana_hdr->change_count = 0; 2264 2265 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2266 ana_desc = (void *)_ana_desc; 2267 offset = sizeof(struct spdk_nvme_ana_page); 2268 2269 memset(_ana_desc, 0, sizeof(_ana_desc)); 2270 ana_desc->ana_group_id = 2; 2271 ana_desc->num_of_nsid = 3; 2272 ana_desc->change_count = 0; 2273 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2274 ana_desc->nsid[0] = 1; 2275 ana_desc->nsid[1] = 3; 2276 ana_desc->nsid[2] = 5; 2277 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2278 sizeof(uint32_t) * 3); 2279 offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3; 2280 2281 memset(_ana_desc, 0, sizeof(_ana_desc)); 2282 ana_desc->ana_group_id = 3; 2283 ana_desc->num_of_nsid = 2; 2284 ana_desc->change_count = 0; 2285 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2286 ana_desc->nsid[0] = 2; 2287 ana_desc->nsid[1] = 4; 2288 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2289 sizeof(uint32_t) * 2); 2290 2291 /* read entire actual log page, and compare expected page and actual page. */ 2292 offset = 0; 2293 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2294 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2295 iov.iov_base = &actual_page[offset]; 2296 iov.iov_len = length; 2297 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2298 offset += length; 2299 } 2300 2301 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2302 2303 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2304 offset = 0; 2305 iovs[0].iov_base = &actual_page[offset]; 2306 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2307 offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2308 iovs[1].iov_base = &actual_page[offset]; 2309 iovs[1].iov_len = sizeof(uint32_t) * 5; 2310 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2311 2312 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2313 2314 #undef UT_ANA_LOG_PAGE_SIZE 2315 } 2316 static void 2317 test_multi_async_events(void) 2318 { 2319 struct spdk_nvmf_subsystem subsystem = {}; 2320 struct spdk_nvmf_qpair qpair = {}; 2321 struct spdk_nvmf_ctrlr ctrlr = {}; 2322 struct spdk_nvmf_request req[4] = {}; 2323 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2324 struct spdk_nvmf_ns ns = {}; 2325 union nvmf_h2c_msg cmd[4] = {}; 2326 union nvmf_c2h_msg rsp[4] = {}; 2327 union spdk_nvme_async_event_completion event = {}; 2328 struct spdk_nvmf_poll_group group = {}; 2329 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2330 int i; 2331 2332 ns_ptrs[0] = &ns; 2333 subsystem.ns = ns_ptrs; 2334 subsystem.max_nsid = 1; 2335 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2336 2337 ns.opts.nsid = 1; 2338 group.sgroups = &sgroups; 2339 2340 qpair.ctrlr = &ctrlr; 2341 qpair.group = &group; 2342 TAILQ_INIT(&qpair.outstanding); 2343 2344 ctrlr.subsys = &subsystem; 2345 ctrlr.vcprop.cc.bits.en = 1; 2346 ctrlr.thread = spdk_get_thread(); 2347 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2348 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 2349 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 2350 init_pending_async_events(&ctrlr); 2351 2352 /* Target queue pending events when there is no outstanding AER request */ 2353 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2354 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 2355 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 2356 2357 for (i = 0; i < 4; i++) { 2358 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2359 cmd[i].nvme_cmd.nsid = 1; 2360 cmd[i].nvme_cmd.cid = i; 2361 2362 req[i].qpair = &qpair; 2363 req[i].cmd = &cmd[i]; 2364 req[i].rsp = &rsp[i]; 2365 2366 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2367 2368 sgroups.mgmt_io_outstanding = 1; 2369 if (i < 3) { 2370 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2371 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2372 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 2373 } else { 2374 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2375 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2376 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 2377 } 2378 } 2379 2380 event.raw = rsp[0].nvme_cpl.cdw0; 2381 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2382 event.raw = rsp[1].nvme_cpl.cdw0; 2383 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 2384 event.raw = rsp[2].nvme_cpl.cdw0; 2385 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 2386 2387 cleanup_pending_async_events(&ctrlr); 2388 } 2389 2390 static void 2391 test_rae(void) 2392 { 2393 struct spdk_nvmf_subsystem subsystem = {}; 2394 struct spdk_nvmf_qpair qpair = {}; 2395 struct spdk_nvmf_ctrlr ctrlr = {}; 2396 struct spdk_nvmf_request req[3] = {}; 2397 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2398 struct spdk_nvmf_ns ns = {}; 2399 union nvmf_h2c_msg cmd[3] = {}; 2400 union nvmf_c2h_msg rsp[3] = {}; 2401 union spdk_nvme_async_event_completion event = {}; 2402 struct spdk_nvmf_poll_group group = {}; 2403 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2404 int i; 2405 char data[4096]; 2406 2407 ns_ptrs[0] = &ns; 2408 subsystem.ns = ns_ptrs; 2409 subsystem.max_nsid = 1; 2410 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2411 2412 ns.opts.nsid = 1; 2413 group.sgroups = &sgroups; 2414 2415 qpair.ctrlr = &ctrlr; 2416 qpair.group = &group; 2417 TAILQ_INIT(&qpair.outstanding); 2418 2419 ctrlr.subsys = &subsystem; 2420 ctrlr.vcprop.cc.bits.en = 1; 2421 ctrlr.thread = spdk_get_thread(); 2422 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2423 init_pending_async_events(&ctrlr); 2424 2425 /* Target queue pending events when there is no outstanding AER request */ 2426 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2427 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2428 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2429 /* only one event will be queued before RAE is clear */ 2430 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2431 2432 req[0].qpair = &qpair; 2433 req[0].cmd = &cmd[0]; 2434 req[0].rsp = &rsp[0]; 2435 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2436 cmd[0].nvme_cmd.nsid = 1; 2437 cmd[0].nvme_cmd.cid = 0; 2438 2439 for (i = 1; i < 3; i++) { 2440 req[i].qpair = &qpair; 2441 req[i].cmd = &cmd[i]; 2442 req[i].rsp = &rsp[i]; 2443 req[i].length = sizeof(data); 2444 SPDK_IOV_ONE(req[i].iov, &req[i].iovcnt, &data, req[i].length); 2445 2446 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 2447 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 2448 SPDK_NVME_LOG_CHANGED_NS_LIST; 2449 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 2450 spdk_nvme_bytes_to_numd(req[i].length); 2451 cmd[i].nvme_cmd.cid = i; 2452 } 2453 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 2454 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 2455 2456 /* consume the pending event */ 2457 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 2458 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2459 event.raw = rsp[0].nvme_cpl.cdw0; 2460 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2461 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2462 2463 /* get log with RAE set */ 2464 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2465 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2466 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2467 2468 /* will not generate new event until RAE is clear */ 2469 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2470 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2471 2472 /* get log with RAE clear */ 2473 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2474 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2475 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2476 2477 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2478 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2479 2480 cleanup_pending_async_events(&ctrlr); 2481 } 2482 2483 static void 2484 test_nvmf_ctrlr_create_destruct(void) 2485 { 2486 struct spdk_nvmf_fabric_connect_data connect_data = {}; 2487 struct spdk_nvmf_poll_group group = {}; 2488 struct spdk_nvmf_subsystem_poll_group sgroups[2] = {}; 2489 struct spdk_nvmf_transport transport = {}; 2490 struct spdk_nvmf_transport_ops tops = {}; 2491 struct spdk_nvmf_subsystem subsystem = {}; 2492 struct spdk_nvmf_ns *ns_arr[1] = { NULL }; 2493 struct spdk_nvmf_request req = {}; 2494 struct spdk_nvmf_qpair qpair = {}; 2495 struct spdk_nvmf_ctrlr *ctrlr = NULL; 2496 struct spdk_nvmf_tgt tgt = {}; 2497 union nvmf_h2c_msg cmd = {}; 2498 union nvmf_c2h_msg rsp = {}; 2499 const uint8_t hostid[16] = { 2500 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2501 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 2502 }; 2503 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 2504 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 2505 2506 group.thread = spdk_get_thread(); 2507 transport.ops = &tops; 2508 transport.opts.max_aq_depth = 32; 2509 transport.opts.max_queue_depth = 64; 2510 transport.opts.max_qpairs_per_ctrlr = 3; 2511 transport.opts.dif_insert_or_strip = true; 2512 transport.tgt = &tgt; 2513 qpair.transport = &transport; 2514 qpair.group = &group; 2515 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 2516 TAILQ_INIT(&qpair.outstanding); 2517 2518 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 2519 connect_data.cntlid = 0xFFFF; 2520 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 2521 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 2522 2523 subsystem.thread = spdk_get_thread(); 2524 subsystem.id = 1; 2525 TAILQ_INIT(&subsystem.ctrlrs); 2526 subsystem.tgt = &tgt; 2527 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2528 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2529 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 2530 subsystem.ns = ns_arr; 2531 2532 group.sgroups = sgroups; 2533 2534 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2535 cmd.connect_cmd.cid = 1; 2536 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 2537 cmd.connect_cmd.recfmt = 0; 2538 cmd.connect_cmd.qid = 0; 2539 cmd.connect_cmd.sqsize = 31; 2540 cmd.connect_cmd.cattr = 0; 2541 cmd.connect_cmd.kato = 120000; 2542 2543 req.qpair = &qpair; 2544 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 2545 req.length = sizeof(connect_data); 2546 SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length); 2547 req.cmd = &cmd; 2548 req.rsp = &rsp; 2549 2550 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 2551 sgroups[subsystem.id].mgmt_io_outstanding++; 2552 2553 ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base); 2554 poll_threads(); 2555 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2556 CU_ASSERT(req.qpair->ctrlr == ctrlr); 2557 CU_ASSERT(ctrlr->subsys == &subsystem); 2558 CU_ASSERT(ctrlr->thread == req.qpair->group->thread); 2559 CU_ASSERT(ctrlr->disconnect_in_progress == false); 2560 CU_ASSERT(ctrlr->qpair_mask != NULL); 2561 CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000); 2562 CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1); 2563 CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1); 2564 CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1); 2565 CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1); 2566 CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16)); 2567 CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1); 2568 CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63); 2569 CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0); 2570 CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500); 2571 CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0); 2572 CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM); 2573 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0); 2574 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0); 2575 CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1); 2576 CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3); 2577 CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0); 2578 CU_ASSERT(ctrlr->vcprop.cc.raw == 0); 2579 CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0); 2580 CU_ASSERT(ctrlr->vcprop.csts.raw == 0); 2581 CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0); 2582 CU_ASSERT(ctrlr->dif_insert_or_strip == true); 2583 2584 ctrlr->in_destruct = true; 2585 nvmf_ctrlr_destruct(ctrlr); 2586 poll_threads(); 2587 CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs)); 2588 CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding)); 2589 } 2590 2591 static void 2592 test_nvmf_ctrlr_use_zcopy(void) 2593 { 2594 struct spdk_nvmf_subsystem subsystem = {}; 2595 struct spdk_nvmf_transport transport = {}; 2596 struct spdk_nvmf_request req = {}; 2597 struct spdk_nvmf_qpair qpair = {}; 2598 struct spdk_nvmf_ctrlr ctrlr = {}; 2599 union nvmf_h2c_msg cmd = {}; 2600 struct spdk_nvmf_ns ns = {}; 2601 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2602 struct spdk_bdev bdev = {}; 2603 struct spdk_nvmf_poll_group group = {}; 2604 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2605 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2606 struct spdk_io_channel io_ch = {}; 2607 int opc; 2608 2609 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2610 ns.bdev = &bdev; 2611 2612 subsystem.id = 0; 2613 subsystem.max_nsid = 1; 2614 subsys_ns[0] = &ns; 2615 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2616 2617 ctrlr.subsys = &subsystem; 2618 ctrlr.visible_ns = spdk_bit_array_create(1); 2619 spdk_bit_array_set(ctrlr.visible_ns, 0); 2620 2621 transport.opts.zcopy = true; 2622 2623 qpair.ctrlr = &ctrlr; 2624 qpair.group = &group; 2625 qpair.qid = 1; 2626 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2627 qpair.transport = &transport; 2628 2629 group.thread = spdk_get_thread(); 2630 group.num_sgroups = 1; 2631 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2632 sgroups.num_ns = 1; 2633 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2634 ns_info.channel = &io_ch; 2635 sgroups.ns_info = &ns_info; 2636 TAILQ_INIT(&sgroups.queued); 2637 group.sgroups = &sgroups; 2638 TAILQ_INIT(&qpair.outstanding); 2639 2640 req.qpair = &qpair; 2641 req.cmd = &cmd; 2642 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2643 2644 /* Admin queue */ 2645 qpair.qid = 0; 2646 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2647 qpair.qid = 1; 2648 2649 /* Invalid Opcodes */ 2650 for (opc = 0; opc <= 255; opc++) { 2651 cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc; 2652 if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) && 2653 (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) { 2654 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2655 } 2656 } 2657 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 2658 2659 /* Fused WRITE */ 2660 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2661 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2662 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE; 2663 2664 /* Non bdev */ 2665 cmd.nvme_cmd.nsid = 4; 2666 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2667 cmd.nvme_cmd.nsid = 1; 2668 2669 /* ZCOPY Not supported */ 2670 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2671 ns.zcopy = true; 2672 2673 /* ZCOPY disabled on transport level */ 2674 transport.opts.zcopy = false; 2675 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2676 transport.opts.zcopy = true; 2677 2678 /* Success */ 2679 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2680 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2681 2682 spdk_bit_array_free(&ctrlr.visible_ns); 2683 } 2684 2685 static void 2686 qpair_state_change_done(void *cb_arg, int status) 2687 { 2688 } 2689 2690 static void 2691 test_spdk_nvmf_request_zcopy_start(void) 2692 { 2693 struct spdk_nvmf_request req = {}; 2694 struct spdk_nvmf_qpair qpair = {}; 2695 struct spdk_nvmf_transport transport = {}; 2696 struct spdk_nvme_cmd cmd = {}; 2697 union nvmf_c2h_msg rsp = {}; 2698 struct spdk_nvmf_ctrlr ctrlr = {}; 2699 struct spdk_nvmf_subsystem subsystem = {}; 2700 struct spdk_nvmf_ns ns = {}; 2701 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2702 enum spdk_nvme_ana_state ana_state[1]; 2703 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2704 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2705 2706 struct spdk_nvmf_poll_group group = {}; 2707 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2708 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2709 struct spdk_io_channel io_ch = {}; 2710 2711 ns.bdev = &bdev; 2712 ns.zcopy = true; 2713 ns.anagrpid = 1; 2714 2715 subsystem.id = 0; 2716 subsystem.max_nsid = 1; 2717 subsys_ns[0] = &ns; 2718 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2719 2720 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2721 2722 /* Enable controller */ 2723 ctrlr.vcprop.cc.bits.en = 1; 2724 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2725 ctrlr.listener = &listener; 2726 ctrlr.visible_ns = spdk_bit_array_create(1); 2727 spdk_bit_array_set(ctrlr.visible_ns, 0); 2728 2729 transport.opts.zcopy = true; 2730 2731 group.thread = spdk_get_thread(); 2732 group.num_sgroups = 1; 2733 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2734 sgroups.num_ns = 1; 2735 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2736 ns_info.channel = &io_ch; 2737 sgroups.ns_info = &ns_info; 2738 TAILQ_INIT(&sgroups.queued); 2739 group.sgroups = &sgroups; 2740 TAILQ_INIT(&qpair.outstanding); 2741 2742 qpair.ctrlr = &ctrlr; 2743 qpair.group = &group; 2744 qpair.transport = &transport; 2745 qpair.qid = 1; 2746 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2747 2748 cmd.nsid = 1; 2749 2750 req.qpair = &qpair; 2751 req.cmd = (union nvmf_h2c_msg *)&cmd; 2752 req.rsp = &rsp; 2753 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2754 cmd.opc = SPDK_NVME_OPC_READ; 2755 2756 /* Fail because no controller */ 2757 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2758 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2759 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 2760 qpair.ctrlr = NULL; 2761 spdk_nvmf_request_zcopy_start(&req); 2762 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2763 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2764 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 2765 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2766 qpair.ctrlr = &ctrlr; 2767 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2768 2769 /* Fail because bad NSID */ 2770 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2771 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2772 cmd.nsid = 0; 2773 spdk_nvmf_request_zcopy_start(&req); 2774 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2775 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2776 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2777 cmd.nsid = 1; 2778 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2779 2780 /* Fail because bad Channel */ 2781 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2782 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2783 ns_info.channel = NULL; 2784 spdk_nvmf_request_zcopy_start(&req); 2785 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2786 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2787 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2788 ns_info.channel = &io_ch; 2789 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2790 2791 /* Queue the requet because NSID is not active */ 2792 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2793 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2794 ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING; 2795 spdk_nvmf_request_zcopy_start(&req); 2796 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT); 2797 CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req); 2798 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2799 TAILQ_REMOVE(&sgroups.queued, &req, link); 2800 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2801 2802 /* Fail because QPair is not active */ 2803 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2804 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2805 qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 2806 qpair.state_cb = qpair_state_change_done; 2807 spdk_nvmf_request_zcopy_start(&req); 2808 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED); 2809 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2810 qpair.state_cb = NULL; 2811 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2812 2813 /* Fail because nvmf_bdev_ctrlr_zcopy_start fails */ 2814 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2815 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2816 cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */ 2817 cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 2818 req.length = (cmd.cdw12 + 1) * bdev.blocklen; 2819 spdk_nvmf_request_zcopy_start(&req); 2820 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2821 cmd.cdw10 = 0; 2822 cmd.cdw12 = 0; 2823 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2824 2825 /* Success */ 2826 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2827 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2828 spdk_nvmf_request_zcopy_start(&req); 2829 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2830 2831 spdk_bit_array_free(&ctrlr.visible_ns); 2832 } 2833 2834 static void 2835 test_zcopy_read(void) 2836 { 2837 struct spdk_nvmf_request req = {}; 2838 struct spdk_nvmf_qpair qpair = {}; 2839 struct spdk_nvmf_transport transport = {}; 2840 struct spdk_nvme_cmd cmd = {}; 2841 union nvmf_c2h_msg rsp = {}; 2842 struct spdk_nvmf_ctrlr ctrlr = {}; 2843 struct spdk_nvmf_subsystem subsystem = {}; 2844 struct spdk_nvmf_ns ns = {}; 2845 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2846 enum spdk_nvme_ana_state ana_state[1]; 2847 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2848 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2849 2850 struct spdk_nvmf_poll_group group = {}; 2851 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2852 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2853 struct spdk_io_channel io_ch = {}; 2854 2855 ns.bdev = &bdev; 2856 ns.zcopy = true; 2857 ns.anagrpid = 1; 2858 2859 subsystem.id = 0; 2860 subsystem.max_nsid = 1; 2861 subsys_ns[0] = &ns; 2862 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2863 2864 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2865 2866 /* Enable controller */ 2867 ctrlr.vcprop.cc.bits.en = 1; 2868 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2869 ctrlr.listener = &listener; 2870 ctrlr.visible_ns = spdk_bit_array_create(1); 2871 spdk_bit_array_set(ctrlr.visible_ns, 0); 2872 2873 transport.opts.zcopy = true; 2874 2875 group.thread = spdk_get_thread(); 2876 group.num_sgroups = 1; 2877 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2878 sgroups.num_ns = 1; 2879 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2880 ns_info.channel = &io_ch; 2881 sgroups.ns_info = &ns_info; 2882 TAILQ_INIT(&sgroups.queued); 2883 group.sgroups = &sgroups; 2884 TAILQ_INIT(&qpair.outstanding); 2885 2886 qpair.ctrlr = &ctrlr; 2887 qpair.group = &group; 2888 qpair.transport = &transport; 2889 qpair.qid = 1; 2890 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2891 2892 cmd.nsid = 1; 2893 2894 req.qpair = &qpair; 2895 req.cmd = (union nvmf_h2c_msg *)&cmd; 2896 req.rsp = &rsp; 2897 cmd.opc = SPDK_NVME_OPC_READ; 2898 2899 /* Prepare for zcopy */ 2900 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2901 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2902 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2903 CU_ASSERT(ns_info.io_outstanding == 0); 2904 2905 /* Perform the zcopy start */ 2906 spdk_nvmf_request_zcopy_start(&req); 2907 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2908 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2909 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2910 CU_ASSERT(ns_info.io_outstanding == 1); 2911 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2912 2913 /* Perform the zcopy end */ 2914 spdk_nvmf_request_zcopy_end(&req, false); 2915 CU_ASSERT(req.zcopy_bdev_io == NULL); 2916 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2917 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2918 CU_ASSERT(ns_info.io_outstanding == 0); 2919 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2920 2921 spdk_bit_array_free(&ctrlr.visible_ns); 2922 } 2923 2924 static void 2925 test_zcopy_write(void) 2926 { 2927 struct spdk_nvmf_request req = {}; 2928 struct spdk_nvmf_qpair qpair = {}; 2929 struct spdk_nvmf_transport transport = {}; 2930 struct spdk_nvme_cmd cmd = {}; 2931 union nvmf_c2h_msg rsp = {}; 2932 struct spdk_nvmf_ctrlr ctrlr = {}; 2933 struct spdk_nvmf_subsystem subsystem = {}; 2934 struct spdk_nvmf_ns ns = {}; 2935 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2936 enum spdk_nvme_ana_state ana_state[1]; 2937 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2938 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2939 2940 struct spdk_nvmf_poll_group group = {}; 2941 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2942 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2943 struct spdk_io_channel io_ch = {}; 2944 2945 ns.bdev = &bdev; 2946 ns.zcopy = true; 2947 ns.anagrpid = 1; 2948 2949 subsystem.id = 0; 2950 subsystem.max_nsid = 1; 2951 subsys_ns[0] = &ns; 2952 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2953 2954 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2955 2956 /* Enable controller */ 2957 ctrlr.vcprop.cc.bits.en = 1; 2958 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2959 ctrlr.listener = &listener; 2960 ctrlr.visible_ns = spdk_bit_array_create(1); 2961 spdk_bit_array_set(ctrlr.visible_ns, 0); 2962 2963 transport.opts.zcopy = true; 2964 2965 group.thread = spdk_get_thread(); 2966 group.num_sgroups = 1; 2967 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2968 sgroups.num_ns = 1; 2969 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2970 ns_info.channel = &io_ch; 2971 sgroups.ns_info = &ns_info; 2972 TAILQ_INIT(&sgroups.queued); 2973 group.sgroups = &sgroups; 2974 TAILQ_INIT(&qpair.outstanding); 2975 2976 qpair.ctrlr = &ctrlr; 2977 qpair.group = &group; 2978 qpair.transport = &transport; 2979 qpair.qid = 1; 2980 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2981 2982 cmd.nsid = 1; 2983 2984 req.qpair = &qpair; 2985 req.cmd = (union nvmf_h2c_msg *)&cmd; 2986 req.rsp = &rsp; 2987 cmd.opc = SPDK_NVME_OPC_WRITE; 2988 2989 /* Prepare for zcopy */ 2990 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2991 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2992 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2993 CU_ASSERT(ns_info.io_outstanding == 0); 2994 2995 /* Perform the zcopy start */ 2996 spdk_nvmf_request_zcopy_start(&req); 2997 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2998 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 2999 CU_ASSERT(qpair.outstanding.tqh_first == &req); 3000 CU_ASSERT(ns_info.io_outstanding == 1); 3001 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 3002 3003 /* Perform the zcopy end */ 3004 spdk_nvmf_request_zcopy_end(&req, true); 3005 CU_ASSERT(req.zcopy_bdev_io == NULL); 3006 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 3007 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 3008 CU_ASSERT(ns_info.io_outstanding == 0); 3009 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 3010 3011 spdk_bit_array_free(&ctrlr.visible_ns); 3012 } 3013 3014 static void 3015 test_nvmf_property_set(void) 3016 { 3017 int rc; 3018 struct spdk_nvmf_request req = {}; 3019 struct spdk_nvmf_qpair qpair = {}; 3020 struct spdk_nvmf_ctrlr ctrlr = {}; 3021 union nvmf_h2c_msg cmd = {}; 3022 union nvmf_c2h_msg rsp = {}; 3023 3024 req.qpair = &qpair; 3025 qpair.ctrlr = &ctrlr; 3026 req.cmd = &cmd; 3027 req.rsp = &rsp; 3028 3029 /* Invalid parameters */ 3030 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 3031 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs); 3032 3033 rc = nvmf_property_set(&req); 3034 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3035 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 3036 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 3037 3038 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms); 3039 3040 rc = nvmf_property_get(&req); 3041 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3042 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 3043 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 3044 3045 /* Set cc with same property size */ 3046 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 3047 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc); 3048 3049 rc = nvmf_property_set(&req); 3050 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3051 3052 /* Emulate cc data */ 3053 ctrlr.vcprop.cc.raw = 0xDEADBEEF; 3054 3055 rc = nvmf_property_get(&req); 3056 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3057 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF); 3058 3059 /* Set asq with different property size */ 3060 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 3061 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 3062 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq); 3063 3064 rc = nvmf_property_set(&req); 3065 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3066 3067 /* Emulate asq data */ 3068 ctrlr.vcprop.asq = 0xAADDADBEEF; 3069 3070 rc = nvmf_property_get(&req); 3071 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3072 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF); 3073 } 3074 3075 static void 3076 test_nvmf_ctrlr_get_features_host_behavior_support(void) 3077 { 3078 int rc; 3079 struct spdk_nvmf_request req = {}; 3080 struct spdk_nvmf_qpair qpair = {}; 3081 struct spdk_nvmf_ctrlr ctrlr = {}; 3082 struct spdk_nvme_host_behavior behavior = {}; 3083 union nvmf_h2c_msg cmd = {}; 3084 union nvmf_c2h_msg rsp = {}; 3085 3086 qpair.ctrlr = &ctrlr; 3087 req.qpair = &qpair; 3088 req.cmd = &cmd; 3089 req.rsp = &rsp; 3090 3091 /* Invalid data */ 3092 req.length = sizeof(struct spdk_nvme_host_behavior); 3093 req.iovcnt = 0; 3094 3095 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3096 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3097 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3098 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3099 3100 /* Wrong structure length */ 3101 req.length = sizeof(struct spdk_nvme_host_behavior) - 1; 3102 SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length); 3103 3104 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3105 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3106 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3107 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3108 3109 /* Get Features Host Behavior Support Success */ 3110 req.length = sizeof(struct spdk_nvme_host_behavior); 3111 SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length); 3112 3113 ctrlr.acre_enabled = true; 3114 ctrlr.lbafee_enabled = true; 3115 behavior.acre = false; 3116 behavior.lbafee = false; 3117 3118 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3119 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3120 CU_ASSERT(behavior.acre == true); 3121 CU_ASSERT(behavior.lbafee == true); 3122 } 3123 3124 static void 3125 test_nvmf_ctrlr_set_features_host_behavior_support(void) 3126 { 3127 int rc; 3128 struct spdk_nvmf_request req = {}; 3129 struct spdk_nvmf_qpair qpair = {}; 3130 struct spdk_nvmf_ctrlr ctrlr = {}; 3131 struct spdk_nvme_host_behavior host_behavior = {}; 3132 union nvmf_h2c_msg cmd = {}; 3133 union nvmf_c2h_msg rsp = {}; 3134 3135 qpair.ctrlr = &ctrlr; 3136 req.qpair = &qpair; 3137 req.cmd = &cmd; 3138 req.rsp = &rsp; 3139 req.iov[0].iov_base = &host_behavior; 3140 req.iov[0].iov_len = sizeof(host_behavior); 3141 3142 /* Invalid iovcnt */ 3143 req.iovcnt = 0; 3144 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3145 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3146 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3147 3148 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3149 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3150 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3151 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3152 3153 /* Invalid iov_len */ 3154 req.iovcnt = 1; 3155 req.iov[0].iov_len = 0; 3156 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3157 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3158 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3159 3160 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3161 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3162 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3163 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3164 3165 /* acre is false but lbafee is true */ 3166 host_behavior.acre = 0; 3167 host_behavior.lbafee = 1; 3168 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3169 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3170 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3171 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3172 3173 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3174 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3175 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3176 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3177 CU_ASSERT(ctrlr.acre_enabled == false); 3178 CU_ASSERT(ctrlr.lbafee_enabled == true); 3179 3180 /* acre is true but lbafee is false */ 3181 host_behavior.acre = 1; 3182 host_behavior.lbafee = 0; 3183 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3184 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3185 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3186 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3187 3188 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3189 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3190 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3191 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3192 CU_ASSERT(ctrlr.acre_enabled == true); 3193 CU_ASSERT(ctrlr.lbafee_enabled == false); 3194 3195 /* Invalid acre */ 3196 host_behavior.acre = 2; 3197 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3198 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3199 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3200 3201 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3202 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3203 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3204 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3205 3206 /* Invalid lbafee */ 3207 host_behavior.lbafee = 3; 3208 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3209 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3210 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3211 3212 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3213 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3214 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3215 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3216 } 3217 3218 static void 3219 test_nvmf_ctrlr_ns_attachment(void) 3220 { 3221 struct spdk_nvmf_subsystem subsystem = {}; 3222 struct spdk_nvmf_ns ns1 = { 3223 .nsid = 1, 3224 .always_visible = false 3225 }; 3226 struct spdk_nvmf_ns ns3 = { 3227 .nsid = 3, 3228 .always_visible = false 3229 }; 3230 struct spdk_nvmf_ctrlr ctrlrA = { 3231 .subsys = &subsystem 3232 }; 3233 struct spdk_nvmf_ctrlr ctrlrB = { 3234 .subsys = &subsystem 3235 }; 3236 struct spdk_nvmf_host *host; 3237 uint32_t nsid; 3238 3239 subsystem.max_nsid = 3; 3240 subsystem.ns = calloc(subsystem.max_nsid, sizeof(subsystem.ns)); 3241 SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL); 3242 3243 /* nsid = 2 -> unallocated, nsid = 1,3 -> allocated */ 3244 subsystem.ns[0] = &ns1; 3245 subsystem.ns[2] = &ns3; 3246 3247 snprintf(ctrlrA.hostnqn, sizeof(ctrlrA.hostnqn), "nqn.2016-06.io.spdk:host1"); 3248 ctrlrA.visible_ns = spdk_bit_array_create(subsystem.max_nsid); 3249 SPDK_CU_ASSERT_FATAL(ctrlrA.visible_ns != NULL); 3250 snprintf(ctrlrB.hostnqn, sizeof(ctrlrB.hostnqn), "nqn.2016-06.io.spdk:host2"); 3251 ctrlrB.visible_ns = spdk_bit_array_create(subsystem.max_nsid); 3252 SPDK_CU_ASSERT_FATAL(ctrlrB.visible_ns != NULL); 3253 3254 /* Do not auto attach and no cold attach of any ctrlr */ 3255 nsid = 1; 3256 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3257 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3258 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3259 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3260 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3261 nsid = 3; 3262 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3263 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3264 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3265 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3266 3267 /* Cold attach ctrlrA to namespace 1 */ 3268 nsid = 1; 3269 host = calloc(1, sizeof(*host)); 3270 SPDK_CU_ASSERT_FATAL(host != NULL); 3271 snprintf(host->nqn, sizeof(host->nqn), "%s", ctrlrA.hostnqn); 3272 TAILQ_INSERT_HEAD(&ns1.hosts, host, link); 3273 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host); 3274 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3275 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3276 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3277 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3278 nsid = 3; 3279 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3280 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3281 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host); 3282 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3283 3284 /* Detach ctrlrA from namespace 1 */ 3285 nsid = 1; 3286 spdk_bit_array_clear(ctrlrA.visible_ns, nsid - 1); 3287 TAILQ_REMOVE(&ns1.hosts, host, link); 3288 free(host); 3289 3290 /* Auto attach any ctrlr to namespace 2 */ 3291 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3292 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3293 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3294 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3295 nsid = 3; 3296 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3297 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3298 ns1.always_visible = true; 3299 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3300 nsid = 1; 3301 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3302 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3303 nsid = 3; 3304 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3305 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3306 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3307 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3308 nvmf_ctrlr_init_visible_ns(&ctrlrB); 3309 nsid = 1; 3310 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3311 CU_ASSERT(spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3312 nsid = 3; 3313 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3314 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3315 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3316 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3317 3318 free(ctrlrA.visible_ns); 3319 free(ctrlrB.visible_ns); 3320 free(subsystem.ns); 3321 } 3322 3323 static void 3324 test_nvmf_check_qpair_active(void) 3325 { 3326 union nvmf_c2h_msg rsp = {}; 3327 union nvmf_h2c_msg cmd = {}; 3328 struct spdk_nvmf_qpair qpair = { .outstanding = TAILQ_HEAD_INITIALIZER(qpair.outstanding) }; 3329 struct spdk_nvmf_request req = { .qpair = &qpair, .cmd = &cmd, .rsp = &rsp }; 3330 size_t i; 3331 3332 /* qpair is active */ 3333 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3334 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 3335 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3336 3337 /* qpair is connecting - CONNECT is allowed */ 3338 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 3339 cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 3340 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 3341 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3342 3343 /* qpair is connecting - other commands are disallowed */ 3344 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3345 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 3346 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false); 3347 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 3348 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 3349 3350 /* qpair is authenticating - AUTHENTICATION_SEND is allowed */ 3351 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 3352 cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND; 3353 qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING; 3354 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3355 3356 /* qpair is authenticating - AUTHENTICATION_RECV is allowed */ 3357 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 3358 cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV; 3359 qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING; 3360 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3361 3362 /* qpair is authenticating - other commands are disallowed */ 3363 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3364 qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING; 3365 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false); 3366 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_COMMAND_SPECIFIC); 3367 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVMF_FABRIC_SC_AUTH_REQUIRED); 3368 3369 /* qpair is in one of the other states - all commands are disallowed */ 3370 int disallowed_states[] = { 3371 SPDK_NVMF_QPAIR_UNINITIALIZED, 3372 SPDK_NVMF_QPAIR_DEACTIVATING, 3373 SPDK_NVMF_QPAIR_ERROR, 3374 }; 3375 qpair.state_cb = qpair_state_change_done; 3376 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3377 for (i = 0; i < SPDK_COUNTOF(disallowed_states); ++i) { 3378 qpair.state = disallowed_states[i]; 3379 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false); 3380 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 3381 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 3382 } 3383 } 3384 3385 int 3386 main(int argc, char **argv) 3387 { 3388 CU_pSuite suite = NULL; 3389 unsigned int num_failures; 3390 3391 CU_initialize_registry(); 3392 3393 suite = CU_add_suite("nvmf", NULL, NULL); 3394 CU_ADD_TEST(suite, test_get_log_page); 3395 CU_ADD_TEST(suite, test_process_fabrics_cmd); 3396 CU_ADD_TEST(suite, test_connect); 3397 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 3398 CU_ADD_TEST(suite, test_identify_ns); 3399 CU_ADD_TEST(suite, test_identify_ns_iocs_specific); 3400 CU_ADD_TEST(suite, test_reservation_write_exclusive); 3401 CU_ADD_TEST(suite, test_reservation_exclusive_access); 3402 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 3403 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 3404 CU_ADD_TEST(suite, test_reservation_notification_log_page); 3405 CU_ADD_TEST(suite, test_get_dif_ctx); 3406 CU_ADD_TEST(suite, test_set_get_features); 3407 CU_ADD_TEST(suite, test_identify_ctrlr); 3408 CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific); 3409 CU_ADD_TEST(suite, test_custom_admin_cmd); 3410 CU_ADD_TEST(suite, test_fused_compare_and_write); 3411 CU_ADD_TEST(suite, test_multi_async_event_reqs); 3412 CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp); 3413 CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp); 3414 CU_ADD_TEST(suite, test_multi_async_events); 3415 CU_ADD_TEST(suite, test_rae); 3416 CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct); 3417 CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy); 3418 CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start); 3419 CU_ADD_TEST(suite, test_zcopy_read); 3420 CU_ADD_TEST(suite, test_zcopy_write); 3421 CU_ADD_TEST(suite, test_nvmf_property_set); 3422 CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support); 3423 CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support); 3424 CU_ADD_TEST(suite, test_nvmf_ctrlr_ns_attachment); 3425 CU_ADD_TEST(suite, test_nvmf_check_qpair_active); 3426 3427 allocate_threads(1); 3428 set_thread(0); 3429 3430 num_failures = spdk_ut_run_tests(argc, argv, NULL); 3431 CU_cleanup_registry(); 3432 3433 free_threads(); 3434 3435 return num_failures; 3436 } 3437