1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/bdev_zone.h" 8 #include "spdk/nvme_spec.h" 9 #include "spdk/stdinc.h" 10 11 #include "spdk_internal/cunit.h" 12 #include "spdk_internal/mock.h" 13 #include "thread/thread_internal.h" 14 15 #include "common/lib/ut_multithread.c" 16 #include "nvmf/ctrlr.c" 17 18 SPDK_LOG_REGISTER_COMPONENT(nvmf) 19 20 struct spdk_bdev { 21 int ut_mock; 22 uint64_t blockcnt; 23 uint32_t blocklen; 24 bool zoned; 25 uint32_t zone_size; 26 uint32_t max_open_zones; 27 uint32_t max_active_zones; 28 enum spdk_dif_type dif_type; 29 }; 30 31 #define MAX_OPEN_ZONES 12 32 #define MAX_ACTIVE_ZONES 34 33 #define ZONE_SIZE 56 34 35 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 36 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 37 38 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL; 39 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *) 40 0x8877665544332211UL; 41 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL; 42 43 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 44 struct spdk_nvmf_subsystem *, 45 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 46 NULL); 47 48 DEFINE_STUB(spdk_nvmf_poll_group_create, 49 struct spdk_nvmf_poll_group *, 50 (struct spdk_nvmf_tgt *tgt), 51 NULL); 52 53 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 54 const char *, 55 (const struct spdk_nvmf_subsystem *subsystem), 56 subsystem_default_sn); 57 58 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 59 const char *, 60 (const struct spdk_nvmf_subsystem *subsystem), 61 subsystem_default_mn); 62 63 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 64 bool, 65 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 66 true); 67 68 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 69 int, 70 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 71 0); 72 73 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 74 struct spdk_nvmf_ctrlr *, 75 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 76 NULL); 77 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool, 78 (struct spdk_nvmf_subsystem *subsystem), false); 79 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 80 bool, 81 (struct spdk_nvmf_ctrlr *ctrlr), 82 false); 83 84 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 85 bool, 86 (struct spdk_nvmf_ctrlr *ctrlr), 87 false); 88 89 DEFINE_STUB(nvmf_ctrlr_copy_supported, 90 bool, 91 (struct spdk_nvmf_ctrlr *ctrlr), 92 false); 93 94 DEFINE_STUB_V(nvmf_get_discovery_log_page, 95 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 96 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 97 98 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 99 int, 100 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 101 0); 102 103 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 104 bool, 105 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 106 true); 107 108 DEFINE_STUB(nvmf_subsystem_find_listener, 109 struct spdk_nvmf_subsystem_listener *, 110 (struct spdk_nvmf_subsystem *subsystem, 111 const struct spdk_nvme_transport_id *trid), 112 (void *)0x1); 113 114 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 115 int, 116 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 117 struct spdk_nvmf_request *req), 118 0); 119 120 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 121 int, 122 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 123 struct spdk_nvmf_request *req), 124 0); 125 126 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 127 int, 128 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 129 struct spdk_nvmf_request *req), 130 0); 131 132 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 133 int, 134 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 135 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 136 0); 137 138 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 139 int, 140 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 141 struct spdk_nvmf_request *req), 142 0); 143 144 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 145 int, 146 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 147 struct spdk_nvmf_request *req), 148 0); 149 150 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 151 int, 152 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 153 struct spdk_nvmf_request *req), 154 0); 155 156 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 157 int, 158 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 159 struct spdk_nvmf_request *req), 160 0); 161 162 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 163 int, 164 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 165 struct spdk_nvmf_request *req), 166 0); 167 168 DEFINE_STUB(nvmf_transport_req_complete, 169 int, 170 (struct spdk_nvmf_request *req), 171 0); 172 173 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 174 175 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 176 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 177 struct spdk_dif_ctx *dif_ctx), 178 true); 179 180 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 181 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 182 183 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 184 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 185 186 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 187 struct spdk_nvmf_ctrlr *ctrlr)); 188 189 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 190 int, 191 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 192 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 193 0); 194 195 DEFINE_STUB(nvmf_transport_req_free, 196 int, 197 (struct spdk_nvmf_request *req), 198 0); 199 200 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 201 int, 202 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 203 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 204 0); 205 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 206 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 207 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 208 209 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev), 210 MAX_ACTIVE_ZONES); 211 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES); 212 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE); 213 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 214 215 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 216 (const struct spdk_nvme_ns_data *nsdata), 0); 217 218 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 219 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n), 220 false); 221 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0); 222 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r), 223 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 224 225 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 226 (const struct spdk_nvmf_subsystem *subsystem), NULL); 227 228 DEFINE_STUB(spdk_bdev_io_type_supported, bool, 229 (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false); 230 231 void 232 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, enum spdk_nvmf_qpair_state state) 233 { 234 qpair->state = state; 235 } 236 237 int 238 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair) 239 { 240 return 0; 241 } 242 243 void 244 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 245 bool dif_insert_or_strip) 246 { 247 uint64_t num_blocks; 248 249 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 250 num_blocks = ns->bdev->blockcnt; 251 nsdata->nsze = num_blocks; 252 nsdata->ncap = num_blocks; 253 nsdata->nuse = num_blocks; 254 nsdata->nlbaf = 0; 255 nsdata->flbas.format = 0; 256 nsdata->flbas.msb_format = 0; 257 nsdata->lbaf[0].lbads = spdk_u32log2(512); 258 } 259 260 void 261 nvmf_bdev_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ns *ns, 262 struct spdk_nvme_nvm_ns_data *nsdata_nvm) 263 { 264 if (ns->bdev->dif_type == SPDK_DIF_DISABLE) { 265 return; 266 } 267 268 nsdata_nvm->lbstm = 0; 269 nsdata_nvm->pic._16bpists = 0; 270 nsdata_nvm->pic._16bpistm = 1; 271 nsdata_nvm->pic.stcrs = 0; 272 nsdata_nvm->elbaf[0].sts = 16; 273 nsdata_nvm->elbaf[0].pif = SPDK_DIF_PI_FORMAT_32; 274 } 275 276 struct spdk_nvmf_ns * 277 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 278 { 279 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 280 return subsystem->ns[0]; 281 } 282 283 struct spdk_nvmf_ns * 284 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 285 struct spdk_nvmf_ns *prev_ns) 286 { 287 uint32_t nsid; 288 289 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 290 nsid = prev_ns->nsid; 291 292 if (nsid >= subsystem->max_nsid) { 293 return NULL; 294 } 295 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 296 if (subsystem->ns[nsid - 1]) { 297 return subsystem->ns[nsid - 1]; 298 } 299 } 300 return NULL; 301 } 302 303 bool 304 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 305 { 306 return true; 307 } 308 309 int 310 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 311 struct spdk_bdev_desc *desc, 312 struct spdk_io_channel *ch, 313 struct spdk_nvmf_request *req) 314 { 315 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 316 uint64_t start_lba; 317 uint64_t num_blocks; 318 319 start_lba = from_le64(&req->cmd->nvme_cmd.cdw10); 320 num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1; 321 322 if ((start_lba + num_blocks) > bdev->blockcnt) { 323 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 324 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 325 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 326 } 327 328 if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) { 329 req->zcopy_bdev_io = zcopy_start_bdev_io_write; 330 } else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) { 331 req->zcopy_bdev_io = zcopy_start_bdev_io_read; 332 } else { 333 req->zcopy_bdev_io = zcopy_start_bdev_io_fail; 334 } 335 336 337 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 338 } 339 340 void 341 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 342 { 343 req->zcopy_bdev_io = NULL; 344 spdk_nvmf_request_complete(req); 345 } 346 347 bool 348 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns) 349 { 350 return ns->ptpl_file != NULL; 351 } 352 353 static void 354 test_get_log_page(void) 355 { 356 struct spdk_nvmf_subsystem subsystem = {}; 357 struct spdk_nvmf_request req = {}; 358 struct spdk_nvmf_qpair qpair = {}; 359 struct spdk_nvmf_ctrlr ctrlr = {}; 360 union nvmf_h2c_msg cmd = {}; 361 union nvmf_c2h_msg rsp = {}; 362 char data[4096]; 363 364 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 365 366 ctrlr.subsys = &subsystem; 367 368 qpair.ctrlr = &ctrlr; 369 370 req.qpair = &qpair; 371 req.cmd = &cmd; 372 req.rsp = &rsp; 373 req.length = sizeof(data); 374 SPDK_IOV_ONE(req.iov, &req.iovcnt, &data, req.length); 375 376 /* Get Log Page - all valid */ 377 memset(&cmd, 0, sizeof(cmd)); 378 memset(&rsp, 0, sizeof(rsp)); 379 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 380 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 381 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 382 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 383 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 384 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 385 386 /* Get Log Page with invalid log ID */ 387 memset(&cmd, 0, sizeof(cmd)); 388 memset(&rsp, 0, sizeof(rsp)); 389 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 390 cmd.nvme_cmd.cdw10 = 0; 391 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 392 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 393 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 394 395 /* Get Log Page with invalid offset (not dword aligned) */ 396 memset(&cmd, 0, sizeof(cmd)); 397 memset(&rsp, 0, sizeof(rsp)); 398 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 399 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 400 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 401 cmd.nvme_cmd.cdw12 = 2; 402 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 403 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 404 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 405 406 /* Get Log Page without data buffer */ 407 memset(&cmd, 0, sizeof(cmd)); 408 memset(&rsp, 0, sizeof(rsp)); 409 req.iovcnt = 0; 410 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 411 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 412 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 413 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 414 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 415 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 416 } 417 418 static void 419 test_process_fabrics_cmd(void) 420 { 421 struct spdk_nvmf_request req = {}; 422 bool ret; 423 struct spdk_nvmf_qpair req_qpair = {}; 424 union nvmf_h2c_msg req_cmd = {}; 425 union nvmf_c2h_msg req_rsp = {}; 426 427 TAILQ_INIT(&req_qpair.outstanding); 428 req_qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 429 req.qpair = &req_qpair; 430 req.cmd = &req_cmd; 431 req.rsp = &req_rsp; 432 req.qpair->ctrlr = NULL; 433 434 /* No ctrlr and invalid command check */ 435 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 436 ret = nvmf_check_qpair_active(&req); 437 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 438 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 439 CU_ASSERT(ret == false); 440 } 441 442 static bool 443 nvme_status_success(const struct spdk_nvme_status *status) 444 { 445 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 446 } 447 448 static void 449 test_connect(void) 450 { 451 struct spdk_nvmf_fabric_connect_data connect_data; 452 struct spdk_nvmf_poll_group group; 453 struct spdk_nvmf_subsystem_poll_group *sgroups; 454 struct spdk_nvmf_transport transport; 455 struct spdk_nvmf_transport_ops tops = {}; 456 struct spdk_nvmf_subsystem subsystem; 457 struct spdk_nvmf_ns *ns_arr[1] = { NULL }; 458 struct spdk_nvmf_request req; 459 struct spdk_nvmf_qpair admin_qpair; 460 struct spdk_nvmf_qpair qpair; 461 struct spdk_nvmf_ctrlr ctrlr; 462 struct spdk_nvmf_tgt tgt; 463 union nvmf_h2c_msg cmd; 464 union nvmf_c2h_msg rsp; 465 const uint8_t hostid[16] = { 466 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 467 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 468 }; 469 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 470 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 471 int rc; 472 473 memset(&group, 0, sizeof(group)); 474 group.thread = spdk_get_thread(); 475 476 memset(&ctrlr, 0, sizeof(ctrlr)); 477 ctrlr.subsys = &subsystem; 478 ctrlr.qpair_mask = spdk_bit_array_create(3); 479 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 480 ctrlr.vcprop.cc.bits.en = 1; 481 ctrlr.vcprop.cc.bits.iosqes = 6; 482 ctrlr.vcprop.cc.bits.iocqes = 4; 483 484 memset(&admin_qpair, 0, sizeof(admin_qpair)); 485 admin_qpair.group = &group; 486 admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 487 488 memset(&tgt, 0, sizeof(tgt)); 489 memset(&transport, 0, sizeof(transport)); 490 transport.ops = &tops; 491 transport.opts.max_aq_depth = 32; 492 transport.opts.max_queue_depth = 64; 493 transport.opts.max_qpairs_per_ctrlr = 3; 494 transport.tgt = &tgt; 495 496 memset(&qpair, 0, sizeof(qpair)); 497 qpair.transport = &transport; 498 qpair.group = &group; 499 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 500 TAILQ_INIT(&qpair.outstanding); 501 502 memset(&connect_data, 0, sizeof(connect_data)); 503 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 504 connect_data.cntlid = 0xFFFF; 505 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 506 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 507 508 memset(&subsystem, 0, sizeof(subsystem)); 509 subsystem.thread = spdk_get_thread(); 510 subsystem.id = 1; 511 TAILQ_INIT(&subsystem.ctrlrs); 512 subsystem.tgt = &tgt; 513 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 514 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 515 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 516 subsystem.ns = ns_arr; 517 subsystem.max_nsid = 1; 518 519 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 520 group.sgroups = sgroups; 521 522 memset(&cmd, 0, sizeof(cmd)); 523 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 524 cmd.connect_cmd.cid = 1; 525 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 526 cmd.connect_cmd.recfmt = 0; 527 cmd.connect_cmd.qid = 0; 528 cmd.connect_cmd.sqsize = 31; 529 cmd.connect_cmd.cattr = 0; 530 cmd.connect_cmd.kato = 120000; 531 532 memset(&req, 0, sizeof(req)); 533 req.qpair = &qpair; 534 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 535 req.length = sizeof(connect_data); 536 SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length); 537 req.cmd = &cmd; 538 req.rsp = &rsp; 539 540 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 541 MOCK_SET(spdk_nvmf_poll_group_create, &group); 542 543 /* Valid admin connect command */ 544 memset(&rsp, 0, sizeof(rsp)); 545 sgroups[subsystem.id].mgmt_io_outstanding++; 546 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 547 rc = nvmf_ctrlr_cmd_connect(&req); 548 poll_threads(); 549 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 550 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 551 CU_ASSERT(qpair.ctrlr != NULL); 552 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 553 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 554 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 555 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 556 free(qpair.ctrlr->visible_ns); 557 free(qpair.ctrlr); 558 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 559 qpair.ctrlr = NULL; 560 561 /* Valid admin connect command with kato = 0 */ 562 cmd.connect_cmd.kato = 0; 563 memset(&rsp, 0, sizeof(rsp)); 564 sgroups[subsystem.id].mgmt_io_outstanding++; 565 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 566 rc = nvmf_ctrlr_cmd_connect(&req); 567 poll_threads(); 568 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 569 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 570 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 571 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 572 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 573 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 574 free(qpair.ctrlr->visible_ns); 575 free(qpair.ctrlr); 576 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 577 qpair.ctrlr = NULL; 578 cmd.connect_cmd.kato = 120000; 579 580 /* Invalid data length */ 581 memset(&rsp, 0, sizeof(rsp)); 582 req.length = sizeof(connect_data) - 1; 583 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 584 rc = nvmf_ctrlr_cmd_connect(&req); 585 poll_threads(); 586 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 587 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 588 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 589 CU_ASSERT(qpair.ctrlr == NULL); 590 req.length = sizeof(connect_data); 591 592 /* Invalid recfmt */ 593 memset(&rsp, 0, sizeof(rsp)); 594 cmd.connect_cmd.recfmt = 1234; 595 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 596 rc = nvmf_ctrlr_cmd_connect(&req); 597 poll_threads(); 598 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 599 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 600 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 601 CU_ASSERT(qpair.ctrlr == NULL); 602 cmd.connect_cmd.recfmt = 0; 603 604 /* Subsystem not found */ 605 memset(&rsp, 0, sizeof(rsp)); 606 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 607 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 608 rc = nvmf_ctrlr_cmd_connect(&req); 609 poll_threads(); 610 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 611 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 612 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 613 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 614 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 615 CU_ASSERT(qpair.ctrlr == NULL); 616 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 617 618 /* Unterminated hostnqn */ 619 memset(&rsp, 0, sizeof(rsp)); 620 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 621 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 622 rc = nvmf_ctrlr_cmd_connect(&req); 623 poll_threads(); 624 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 625 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 626 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 627 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 628 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 629 CU_ASSERT(qpair.ctrlr == NULL); 630 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 631 632 /* Host not allowed */ 633 memset(&rsp, 0, sizeof(rsp)); 634 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 635 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 636 rc = nvmf_ctrlr_cmd_connect(&req); 637 poll_threads(); 638 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 639 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 640 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 641 CU_ASSERT(qpair.ctrlr == NULL); 642 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 643 644 /* Invalid sqsize == 0 */ 645 memset(&rsp, 0, sizeof(rsp)); 646 cmd.connect_cmd.sqsize = 0; 647 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 648 rc = nvmf_ctrlr_cmd_connect(&req); 649 poll_threads(); 650 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 651 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 652 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 653 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 654 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 655 CU_ASSERT(qpair.ctrlr == NULL); 656 cmd.connect_cmd.sqsize = 31; 657 658 /* Invalid admin sqsize > max_aq_depth */ 659 memset(&rsp, 0, sizeof(rsp)); 660 cmd.connect_cmd.sqsize = 32; 661 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 662 rc = nvmf_ctrlr_cmd_connect(&req); 663 poll_threads(); 664 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 665 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 666 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 667 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 668 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 669 CU_ASSERT(qpair.ctrlr == NULL); 670 cmd.connect_cmd.sqsize = 31; 671 672 /* Invalid I/O sqsize > max_queue_depth */ 673 memset(&rsp, 0, sizeof(rsp)); 674 cmd.connect_cmd.qid = 1; 675 cmd.connect_cmd.sqsize = 64; 676 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 677 rc = nvmf_ctrlr_cmd_connect(&req); 678 poll_threads(); 679 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 680 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 681 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 682 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 683 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 684 CU_ASSERT(qpair.ctrlr == NULL); 685 cmd.connect_cmd.qid = 0; 686 cmd.connect_cmd.sqsize = 31; 687 688 /* Invalid cntlid for admin queue */ 689 memset(&rsp, 0, sizeof(rsp)); 690 connect_data.cntlid = 0x1234; 691 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 692 rc = nvmf_ctrlr_cmd_connect(&req); 693 poll_threads(); 694 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 695 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 696 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 697 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 698 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 699 CU_ASSERT(qpair.ctrlr == NULL); 700 connect_data.cntlid = 0xFFFF; 701 702 ctrlr.admin_qpair = &admin_qpair; 703 ctrlr.subsys = &subsystem; 704 705 /* Valid I/O queue connect command */ 706 memset(&rsp, 0, sizeof(rsp)); 707 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 708 cmd.connect_cmd.qid = 1; 709 cmd.connect_cmd.sqsize = 63; 710 sgroups[subsystem.id].mgmt_io_outstanding++; 711 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 712 rc = nvmf_ctrlr_cmd_connect(&req); 713 poll_threads(); 714 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 715 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 716 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 717 CU_ASSERT(qpair.ctrlr == &ctrlr); 718 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 719 qpair.ctrlr = NULL; 720 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 721 cmd.connect_cmd.sqsize = 31; 722 723 /* Non-existent controller */ 724 memset(&rsp, 0, sizeof(rsp)); 725 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 726 sgroups[subsystem.id].mgmt_io_outstanding++; 727 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 728 rc = nvmf_ctrlr_cmd_connect(&req); 729 poll_threads(); 730 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 731 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 732 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 733 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 734 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 735 CU_ASSERT(qpair.ctrlr == NULL); 736 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 737 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 738 739 /* I/O connect to discovery controller */ 740 memset(&rsp, 0, sizeof(rsp)); 741 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 742 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 743 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 744 sgroups[subsystem.id].mgmt_io_outstanding++; 745 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 746 rc = nvmf_ctrlr_cmd_connect(&req); 747 poll_threads(); 748 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 749 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 750 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 751 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 752 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 753 CU_ASSERT(qpair.ctrlr == NULL); 754 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 755 756 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 757 cmd.connect_cmd.qid = 0; 758 cmd.connect_cmd.kato = 120000; 759 memset(&rsp, 0, sizeof(rsp)); 760 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 761 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 762 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 763 sgroups[subsystem.id].mgmt_io_outstanding++; 764 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 765 rc = nvmf_ctrlr_cmd_connect(&req); 766 poll_threads(); 767 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 768 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 769 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 770 CU_ASSERT(qpair.ctrlr != NULL); 771 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 772 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 773 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 774 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 775 free(qpair.ctrlr->visible_ns); 776 free(qpair.ctrlr); 777 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 778 qpair.ctrlr = NULL; 779 780 /* I/O connect to discovery controller with keep-alive-timeout == 0. 781 * Then, a fixed timeout value is set to keep-alive-timeout. 782 */ 783 cmd.connect_cmd.kato = 0; 784 memset(&rsp, 0, sizeof(rsp)); 785 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 786 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 787 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 788 sgroups[subsystem.id].mgmt_io_outstanding++; 789 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 790 rc = nvmf_ctrlr_cmd_connect(&req); 791 poll_threads(); 792 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 793 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 794 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 795 CU_ASSERT(qpair.ctrlr != NULL); 796 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 797 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 798 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 799 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 800 free(qpair.ctrlr->visible_ns); 801 free(qpair.ctrlr); 802 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 803 qpair.ctrlr = NULL; 804 cmd.connect_cmd.qid = 1; 805 cmd.connect_cmd.kato = 120000; 806 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 807 MOCK_SET(spdk_nvmf_subsystem_is_discovery, false); 808 809 /* I/O connect to disabled controller */ 810 memset(&rsp, 0, sizeof(rsp)); 811 ctrlr.vcprop.cc.bits.en = 0; 812 sgroups[subsystem.id].mgmt_io_outstanding++; 813 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 814 rc = nvmf_ctrlr_cmd_connect(&req); 815 poll_threads(); 816 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 817 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 818 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 819 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 820 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 821 CU_ASSERT(qpair.ctrlr == NULL); 822 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 823 ctrlr.vcprop.cc.bits.en = 1; 824 825 /* I/O connect with invalid IOSQES */ 826 memset(&rsp, 0, sizeof(rsp)); 827 ctrlr.vcprop.cc.bits.iosqes = 3; 828 sgroups[subsystem.id].mgmt_io_outstanding++; 829 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 830 rc = nvmf_ctrlr_cmd_connect(&req); 831 poll_threads(); 832 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 833 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 834 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 835 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 836 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 837 CU_ASSERT(qpair.ctrlr == NULL); 838 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 839 ctrlr.vcprop.cc.bits.iosqes = 6; 840 841 /* I/O connect with invalid IOCQES */ 842 memset(&rsp, 0, sizeof(rsp)); 843 ctrlr.vcprop.cc.bits.iocqes = 3; 844 sgroups[subsystem.id].mgmt_io_outstanding++; 845 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 846 rc = nvmf_ctrlr_cmd_connect(&req); 847 poll_threads(); 848 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 849 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 850 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 851 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 852 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 853 CU_ASSERT(qpair.ctrlr == NULL); 854 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 855 ctrlr.vcprop.cc.bits.iocqes = 4; 856 857 /* I/O connect with qid that is too large */ 858 memset(&rsp, 0, sizeof(rsp)); 859 cmd.connect_cmd.qid = 3; 860 sgroups[subsystem.id].mgmt_io_outstanding++; 861 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 862 rc = nvmf_ctrlr_cmd_connect(&req); 863 poll_threads(); 864 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 865 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 866 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 867 CU_ASSERT(qpair.ctrlr == NULL); 868 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 869 870 /* I/O connect with duplicate queue ID */ 871 memset(&rsp, 0, sizeof(rsp)); 872 spdk_bit_array_set(ctrlr.qpair_mask, 1); 873 cmd.connect_cmd.qid = 1; 874 sgroups[subsystem.id].mgmt_io_outstanding++; 875 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 876 rc = nvmf_ctrlr_cmd_connect(&req); 877 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 878 poll_threads(); 879 /* First time, it will detect duplicate QID and schedule a retry. So for 880 * now we should expect the response to still be all zeroes. 881 */ 882 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 883 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 884 885 /* Now advance the clock, so that the retry poller executes. */ 886 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 887 poll_threads(); 888 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 889 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 890 CU_ASSERT(qpair.ctrlr == NULL); 891 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 892 893 /* I/O connect with temporarily duplicate queue ID. This covers race 894 * where qpair_mask bit may not yet be cleared, even though initiator 895 * has closed the connection. See issue #2955. */ 896 memset(&rsp, 0, sizeof(rsp)); 897 sgroups[subsystem.id].mgmt_io_outstanding++; 898 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 899 rc = nvmf_ctrlr_cmd_connect(&req); 900 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 901 poll_threads(); 902 /* First time, it will detect duplicate QID and schedule a retry. So for 903 * now we should expect the response to still be all zeroes. 904 */ 905 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 906 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 907 908 /* Now advance the clock, so that the retry poller executes. */ 909 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 910 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 911 poll_threads(); 912 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 913 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 914 CU_ASSERT(qpair.ctrlr == &ctrlr); 915 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 916 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 917 qpair.ctrlr = NULL; 918 919 /* I/O connect when admin qpair is being destroyed */ 920 admin_qpair.group = NULL; 921 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 922 memset(&rsp, 0, sizeof(rsp)); 923 sgroups[subsystem.id].mgmt_io_outstanding++; 924 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 925 rc = nvmf_ctrlr_cmd_connect(&req); 926 poll_threads(); 927 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 928 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 929 CU_ASSERT(qpair.ctrlr == NULL); 930 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 931 admin_qpair.group = &group; 932 admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 933 934 /* I/O connect when admin qpair was destroyed */ 935 ctrlr.admin_qpair = NULL; 936 memset(&rsp, 0, sizeof(rsp)); 937 sgroups[subsystem.id].mgmt_io_outstanding++; 938 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 939 rc = nvmf_ctrlr_cmd_connect(&req); 940 poll_threads(); 941 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 942 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 943 CU_ASSERT(qpair.ctrlr == NULL); 944 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 945 ctrlr.admin_qpair = &admin_qpair; 946 947 /* Clean up globals */ 948 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 949 MOCK_CLEAR(spdk_nvmf_poll_group_create); 950 951 spdk_bit_array_free(&ctrlr.qpair_mask); 952 free(sgroups); 953 } 954 955 static void 956 test_get_ns_id_desc_list(void) 957 { 958 struct spdk_nvmf_subsystem subsystem; 959 struct spdk_nvmf_qpair qpair; 960 struct spdk_nvmf_ctrlr ctrlr; 961 struct spdk_nvmf_request req; 962 struct spdk_nvmf_ns *ns_ptrs[1]; 963 struct spdk_nvmf_ns ns; 964 union nvmf_h2c_msg cmd; 965 union nvmf_c2h_msg rsp; 966 struct spdk_bdev bdev; 967 uint8_t buf[4096]; 968 969 memset(&subsystem, 0, sizeof(subsystem)); 970 ns_ptrs[0] = &ns; 971 subsystem.ns = ns_ptrs; 972 subsystem.max_nsid = 1; 973 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 974 975 memset(&ns, 0, sizeof(ns)); 976 ns.opts.nsid = 1; 977 ns.bdev = &bdev; 978 979 memset(&qpair, 0, sizeof(qpair)); 980 qpair.ctrlr = &ctrlr; 981 982 memset(&ctrlr, 0, sizeof(ctrlr)); 983 ctrlr.subsys = &subsystem; 984 ctrlr.vcprop.cc.bits.en = 1; 985 ctrlr.thread = spdk_get_thread(); 986 ctrlr.visible_ns = spdk_bit_array_create(1); 987 988 memset(&req, 0, sizeof(req)); 989 req.qpair = &qpair; 990 req.cmd = &cmd; 991 req.rsp = &rsp; 992 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 993 req.length = sizeof(buf); 994 SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length); 995 996 memset(&cmd, 0, sizeof(cmd)); 997 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 998 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 999 1000 /* Invalid NSID */ 1001 cmd.nvme_cmd.nsid = 0; 1002 memset(&rsp, 0, sizeof(rsp)); 1003 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1004 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1005 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1006 1007 /* Valid NSID, but ns is inactive */ 1008 spdk_bit_array_clear(ctrlr.visible_ns, 0); 1009 cmd.nvme_cmd.nsid = 1; 1010 memset(&rsp, 0, sizeof(rsp)); 1011 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1012 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1013 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1014 1015 /* Valid NSID, but ns has no IDs defined */ 1016 spdk_bit_array_set(ctrlr.visible_ns, 0); 1017 cmd.nvme_cmd.nsid = 1; 1018 memset(&rsp, 0, sizeof(rsp)); 1019 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1020 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1021 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1022 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 1023 1024 /* Valid NSID, only EUI64 defined */ 1025 ns.opts.eui64[0] = 0x11; 1026 ns.opts.eui64[7] = 0xFF; 1027 memset(&rsp, 0, sizeof(rsp)); 1028 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1029 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1030 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1031 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1032 CU_ASSERT(buf[1] == 8); 1033 CU_ASSERT(buf[4] == 0x11); 1034 CU_ASSERT(buf[11] == 0xFF); 1035 CU_ASSERT(buf[13] == 0); 1036 1037 /* Valid NSID, only NGUID defined */ 1038 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 1039 ns.opts.nguid[0] = 0x22; 1040 ns.opts.nguid[15] = 0xEE; 1041 memset(&rsp, 0, sizeof(rsp)); 1042 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1043 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1044 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1045 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 1046 CU_ASSERT(buf[1] == 16); 1047 CU_ASSERT(buf[4] == 0x22); 1048 CU_ASSERT(buf[19] == 0xEE); 1049 CU_ASSERT(buf[21] == 0); 1050 1051 /* Valid NSID, both EUI64 and NGUID defined */ 1052 ns.opts.eui64[0] = 0x11; 1053 ns.opts.eui64[7] = 0xFF; 1054 ns.opts.nguid[0] = 0x22; 1055 ns.opts.nguid[15] = 0xEE; 1056 memset(&rsp, 0, sizeof(rsp)); 1057 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1058 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1059 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1060 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1061 CU_ASSERT(buf[1] == 8); 1062 CU_ASSERT(buf[4] == 0x11); 1063 CU_ASSERT(buf[11] == 0xFF); 1064 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 1065 CU_ASSERT(buf[13] == 16); 1066 CU_ASSERT(buf[16] == 0x22); 1067 CU_ASSERT(buf[31] == 0xEE); 1068 CU_ASSERT(buf[33] == 0); 1069 1070 /* Valid NSID, EUI64, NGUID, and UUID defined */ 1071 ns.opts.eui64[0] = 0x11; 1072 ns.opts.eui64[7] = 0xFF; 1073 ns.opts.nguid[0] = 0x22; 1074 ns.opts.nguid[15] = 0xEE; 1075 ns.opts.uuid.u.raw[0] = 0x33; 1076 ns.opts.uuid.u.raw[15] = 0xDD; 1077 memset(&rsp, 0, sizeof(rsp)); 1078 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1079 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1080 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1081 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1082 CU_ASSERT(buf[1] == 8); 1083 CU_ASSERT(buf[4] == 0x11); 1084 CU_ASSERT(buf[11] == 0xFF); 1085 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 1086 CU_ASSERT(buf[13] == 16); 1087 CU_ASSERT(buf[16] == 0x22); 1088 CU_ASSERT(buf[31] == 0xEE); 1089 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 1090 CU_ASSERT(buf[33] == 16); 1091 CU_ASSERT(buf[36] == 0x33); 1092 CU_ASSERT(buf[51] == 0xDD); 1093 CU_ASSERT(buf[53] == 0); 1094 1095 spdk_bit_array_free(&ctrlr.visible_ns); 1096 } 1097 1098 static void 1099 test_identify_ns(void) 1100 { 1101 struct spdk_nvmf_subsystem subsystem = {}; 1102 struct spdk_nvmf_transport transport = {}; 1103 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1104 struct spdk_nvmf_ctrlr ctrlr = { 1105 .subsys = &subsystem, 1106 .admin_qpair = &admin_qpair, 1107 }; 1108 struct spdk_nvme_cmd cmd = {}; 1109 struct spdk_nvme_cpl rsp = {}; 1110 struct spdk_nvme_ns_data nsdata = {}; 1111 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 1112 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 1113 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1114 1115 ctrlr.visible_ns = spdk_bit_array_create(3); 1116 spdk_bit_array_set(ctrlr.visible_ns, 0); 1117 spdk_bit_array_set(ctrlr.visible_ns, 2); 1118 1119 subsystem.ns = ns_arr; 1120 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1121 1122 /* Invalid NSID 0 */ 1123 cmd.nsid = 0; 1124 memset(&nsdata, 0, sizeof(nsdata)); 1125 memset(&rsp, 0, sizeof(rsp)); 1126 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1127 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1128 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1129 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1130 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1131 1132 /* Valid NSID 1 */ 1133 cmd.nsid = 1; 1134 memset(&nsdata, 0, sizeof(nsdata)); 1135 memset(&rsp, 0, sizeof(rsp)); 1136 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1137 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1138 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1139 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1140 CU_ASSERT(nsdata.nsze == 1234); 1141 1142 /* Valid but inactive NSID 1 */ 1143 spdk_bit_array_clear(ctrlr.visible_ns, 0); 1144 cmd.nsid = 1; 1145 memset(&nsdata, 0, sizeof(nsdata)); 1146 memset(&rsp, 0, sizeof(rsp)); 1147 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1148 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1149 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1150 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1151 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1152 1153 /* Valid but unallocated NSID 2 */ 1154 cmd.nsid = 2; 1155 memset(&nsdata, 0, sizeof(nsdata)); 1156 memset(&rsp, 0, sizeof(rsp)); 1157 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1158 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1159 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1160 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1161 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1162 1163 /* Valid NSID 3 */ 1164 cmd.nsid = 3; 1165 memset(&nsdata, 0, sizeof(nsdata)); 1166 memset(&rsp, 0, sizeof(rsp)); 1167 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1168 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1169 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1170 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1171 CU_ASSERT(nsdata.nsze == 5678); 1172 1173 /* Invalid NSID 4 */ 1174 cmd.nsid = 4; 1175 memset(&nsdata, 0, sizeof(nsdata)); 1176 memset(&rsp, 0, sizeof(rsp)); 1177 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1178 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1179 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1180 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1181 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1182 1183 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 1184 cmd.nsid = 0xFFFFFFFF; 1185 memset(&nsdata, 0, sizeof(nsdata)); 1186 memset(&rsp, 0, sizeof(rsp)); 1187 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1188 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1189 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1190 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1191 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1192 1193 spdk_bit_array_free(&ctrlr.visible_ns); 1194 } 1195 1196 static void 1197 test_identify_ns_iocs_specific(void) 1198 { 1199 struct spdk_nvmf_subsystem subsystem = {}; 1200 struct spdk_nvmf_transport transport = {}; 1201 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport }; 1202 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1203 struct spdk_nvme_cmd cmd = {}; 1204 struct spdk_nvme_cpl rsp = {}; 1205 struct spdk_nvme_zns_ns_data nsdata_zns = {}; 1206 struct spdk_nvme_nvm_ns_data nsdata_nvm = {}; 1207 struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}}; 1208 struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}}; 1209 struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]}; 1210 1211 ctrlr.visible_ns = spdk_bit_array_create(3); 1212 spdk_bit_array_set(ctrlr.visible_ns, 0); 1213 spdk_bit_array_set(ctrlr.visible_ns, 1); 1214 spdk_bit_array_set(ctrlr.visible_ns, 2); 1215 subsystem.ns = ns_arr; 1216 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1217 1218 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1219 1220 /* Invalid ZNS NSID 0 */ 1221 cmd.nsid = 0; 1222 memset(&nsdata_zns, 0xFF, sizeof(nsdata_zns)); 1223 memset(&rsp, 0, sizeof(rsp)); 1224 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1225 &nsdata_zns, sizeof(nsdata_zns)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1226 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1227 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1228 CU_ASSERT(spdk_mem_all_zero(&nsdata_zns, sizeof(nsdata_zns))); 1229 1230 /* Valid ZNS NSID 1 */ 1231 cmd.nsid = 1; 1232 memset(&nsdata_zns, 0xFF, sizeof(nsdata_zns)); 1233 memset(&rsp, 0, sizeof(rsp)); 1234 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1235 &nsdata_zns, sizeof(nsdata_zns)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1236 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1237 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1238 CU_ASSERT(nsdata_zns.ozcs.read_across_zone_boundaries == 1); 1239 CU_ASSERT(nsdata_zns.mar == MAX_ACTIVE_ZONES - 1); 1240 CU_ASSERT(nsdata_zns.mor == MAX_OPEN_ZONES - 1); 1241 CU_ASSERT(nsdata_zns.lbafe[0].zsze == ZONE_SIZE); 1242 nsdata_zns.ozcs.read_across_zone_boundaries = 0; 1243 nsdata_zns.mar = 0; 1244 nsdata_zns.mor = 0; 1245 nsdata_zns.lbafe[0].zsze = 0; 1246 CU_ASSERT(spdk_mem_all_zero(&nsdata_zns, sizeof(nsdata_zns))); 1247 1248 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1249 1250 /* Valid NVM NSID 2 with DIF type 1 */ 1251 bdev[1].dif_type = SPDK_DIF_TYPE1; 1252 cmd.nsid = 2; 1253 memset(&nsdata_nvm, 0xFF, sizeof(nsdata_nvm)); 1254 memset(&rsp, 0, sizeof(rsp)); 1255 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1256 &nsdata_nvm, sizeof(nsdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1257 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1258 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1259 CU_ASSERT(nsdata_nvm.lbstm == 0); 1260 CU_ASSERT(nsdata_nvm.pic._16bpists == 0); 1261 CU_ASSERT(nsdata_nvm.pic._16bpistm == 1); 1262 CU_ASSERT(nsdata_nvm.pic.stcrs == 0); 1263 CU_ASSERT(nsdata_nvm.elbaf[0].sts == 16); 1264 CU_ASSERT(nsdata_nvm.elbaf[0].pif == SPDK_DIF_PI_FORMAT_32); 1265 nsdata_nvm.pic._16bpistm = 0; 1266 nsdata_nvm.elbaf[0].sts = 0; 1267 nsdata_nvm.elbaf[0].pif = 0; 1268 CU_ASSERT(spdk_mem_all_zero(&nsdata_nvm, sizeof(nsdata_nvm))); 1269 1270 /* Invalid NVM NSID 3 */ 1271 cmd.nsid = 0; 1272 memset(&nsdata_nvm, 0xFF, sizeof(nsdata_nvm)); 1273 memset(&rsp, 0, sizeof(rsp)); 1274 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1275 &nsdata_nvm, sizeof(nsdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1276 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1277 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1278 CU_ASSERT(spdk_mem_all_zero(&nsdata_nvm, sizeof(nsdata_nvm))); 1279 1280 spdk_bit_array_free(&ctrlr.visible_ns); 1281 } 1282 1283 static void 1284 test_set_get_features(void) 1285 { 1286 struct spdk_nvmf_subsystem subsystem = {}; 1287 struct spdk_nvmf_qpair admin_qpair = {}; 1288 enum spdk_nvme_ana_state ana_state[3]; 1289 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1290 struct spdk_nvmf_ctrlr ctrlr = { 1291 .subsys = &subsystem, 1292 .admin_qpair = &admin_qpair, 1293 .listener = &listener 1294 }; 1295 union nvmf_h2c_msg cmd = {}; 1296 union nvmf_c2h_msg rsp = {}; 1297 struct spdk_nvmf_ns ns[3]; 1298 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1299 struct spdk_nvmf_request req; 1300 int rc; 1301 1302 ctrlr.visible_ns = spdk_bit_array_create(3); 1303 spdk_bit_array_set(ctrlr.visible_ns, 0); 1304 spdk_bit_array_set(ctrlr.visible_ns, 2); 1305 ns[0].anagrpid = 1; 1306 ns[2].anagrpid = 3; 1307 subsystem.ns = ns_arr; 1308 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1309 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1310 listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1311 admin_qpair.ctrlr = &ctrlr; 1312 req.qpair = &admin_qpair; 1313 cmd.nvme_cmd.nsid = 1; 1314 req.cmd = &cmd; 1315 req.rsp = &rsp; 1316 1317 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1318 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1319 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1320 ns[0].ptpl_file = "testcfg"; 1321 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1322 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1323 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1324 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1325 CU_ASSERT(ns[0].ptpl_activated == true); 1326 1327 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1328 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1329 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1330 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1331 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1332 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1333 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1334 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1335 1336 1337 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1338 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1339 cmd.nvme_cmd.cdw11 = 0x42; 1340 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1341 1342 rc = nvmf_ctrlr_get_features(&req); 1343 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1344 1345 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1346 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1347 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1348 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1349 1350 rc = nvmf_ctrlr_get_features(&req); 1351 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1352 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1353 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1354 1355 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1356 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1357 cmd.nvme_cmd.cdw11 = 0x42; 1358 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1359 1360 rc = nvmf_ctrlr_set_features(&req); 1361 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1362 1363 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1364 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1365 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1366 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1367 1368 rc = nvmf_ctrlr_set_features(&req); 1369 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1370 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1371 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1372 1373 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1374 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1375 cmd.nvme_cmd.cdw11 = 0x42; 1376 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1377 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1378 1379 rc = nvmf_ctrlr_set_features(&req); 1380 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1381 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1382 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1383 1384 1385 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1386 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1387 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1388 1389 rc = nvmf_ctrlr_get_features(&req); 1390 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1391 1392 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1393 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1394 cmd.nvme_cmd.cdw11 = 0x42; 1395 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1396 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1397 1398 rc = nvmf_ctrlr_set_features(&req); 1399 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1400 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1401 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1402 1403 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1404 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1405 cmd.nvme_cmd.cdw11 = 0x42; 1406 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1407 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1408 1409 rc = nvmf_ctrlr_set_features(&req); 1410 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1411 1412 spdk_bit_array_free(&ctrlr.visible_ns); 1413 } 1414 1415 /* 1416 * Reservation Unit Test Configuration 1417 * -------- -------- -------- 1418 * | Host A | | Host B | | Host C | 1419 * -------- -------- -------- 1420 * / \ | | 1421 * -------- -------- ------- ------- 1422 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1423 * -------- -------- ------- ------- 1424 * \ \ / / 1425 * \ \ / / 1426 * \ \ / / 1427 * -------------------------------------- 1428 * | NAMESPACE 1 | 1429 * -------------------------------------- 1430 */ 1431 1432 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1433 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1434 1435 static void 1436 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1437 { 1438 /* Host A has two controllers */ 1439 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1440 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1441 1442 /* Host B has 1 controller */ 1443 spdk_uuid_generate(&g_ctrlr_B.hostid); 1444 1445 /* Host C has 1 controller */ 1446 spdk_uuid_generate(&g_ctrlr_C.hostid); 1447 1448 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1449 g_ns_info.rtype = rtype; 1450 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1451 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1452 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1453 } 1454 1455 static void 1456 test_reservation_write_exclusive(void) 1457 { 1458 struct spdk_nvmf_request req = {}; 1459 union nvmf_h2c_msg cmd = {}; 1460 union nvmf_c2h_msg rsp = {}; 1461 int rc; 1462 1463 req.cmd = &cmd; 1464 req.rsp = &rsp; 1465 1466 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1467 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1468 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1469 1470 /* Test Case: Issue a Read command from Host A and Host B */ 1471 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1472 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1473 SPDK_CU_ASSERT_FATAL(rc == 0); 1474 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1475 SPDK_CU_ASSERT_FATAL(rc == 0); 1476 1477 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1478 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1479 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1480 SPDK_CU_ASSERT_FATAL(rc == 0); 1481 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1482 SPDK_CU_ASSERT_FATAL(rc < 0); 1483 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1484 1485 /* Test Case: Issue a Write command from Host C */ 1486 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1487 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1488 SPDK_CU_ASSERT_FATAL(rc < 0); 1489 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1490 1491 /* Test Case: Issue a Read command from Host B */ 1492 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1493 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1494 SPDK_CU_ASSERT_FATAL(rc == 0); 1495 1496 /* Unregister Host C */ 1497 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1498 1499 /* Test Case: Read and Write commands from non-registrant Host C */ 1500 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1501 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1502 SPDK_CU_ASSERT_FATAL(rc < 0); 1503 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1504 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1505 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1506 SPDK_CU_ASSERT_FATAL(rc == 0); 1507 } 1508 1509 static void 1510 test_reservation_exclusive_access(void) 1511 { 1512 struct spdk_nvmf_request req = {}; 1513 union nvmf_h2c_msg cmd = {}; 1514 union nvmf_c2h_msg rsp = {}; 1515 int rc; 1516 1517 req.cmd = &cmd; 1518 req.rsp = &rsp; 1519 1520 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1521 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1522 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1523 1524 /* Test Case: Issue a Read command from Host B */ 1525 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1526 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1527 SPDK_CU_ASSERT_FATAL(rc < 0); 1528 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1529 1530 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1531 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1532 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1533 SPDK_CU_ASSERT_FATAL(rc == 0); 1534 } 1535 1536 static void 1537 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1538 { 1539 struct spdk_nvmf_request req = {}; 1540 union nvmf_h2c_msg cmd = {}; 1541 union nvmf_c2h_msg rsp = {}; 1542 int rc; 1543 1544 req.cmd = &cmd; 1545 req.rsp = &rsp; 1546 1547 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1548 ut_reservation_init(rtype); 1549 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1550 1551 /* Test Case: Issue a Read command from Host A and Host C */ 1552 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1553 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1554 SPDK_CU_ASSERT_FATAL(rc == 0); 1555 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1556 SPDK_CU_ASSERT_FATAL(rc == 0); 1557 1558 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1559 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1560 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1561 SPDK_CU_ASSERT_FATAL(rc == 0); 1562 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1563 SPDK_CU_ASSERT_FATAL(rc == 0); 1564 1565 /* Unregister Host C */ 1566 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1567 1568 /* Test Case: Read and Write commands from non-registrant Host C */ 1569 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1570 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1571 SPDK_CU_ASSERT_FATAL(rc == 0); 1572 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1573 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1574 SPDK_CU_ASSERT_FATAL(rc < 0); 1575 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1576 } 1577 1578 static void 1579 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1580 { 1581 _test_reservation_write_exclusive_regs_only_and_all_regs( 1582 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1583 _test_reservation_write_exclusive_regs_only_and_all_regs( 1584 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1585 } 1586 1587 static void 1588 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1589 { 1590 struct spdk_nvmf_request req = {}; 1591 union nvmf_h2c_msg cmd = {}; 1592 union nvmf_c2h_msg rsp = {}; 1593 int rc; 1594 1595 req.cmd = &cmd; 1596 req.rsp = &rsp; 1597 1598 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1599 ut_reservation_init(rtype); 1600 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1601 1602 /* Test Case: Issue a Write command from Host B */ 1603 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1604 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1605 SPDK_CU_ASSERT_FATAL(rc == 0); 1606 1607 /* Unregister Host B */ 1608 spdk_uuid_set_null(&g_ns_info.reg_hostid[1]); 1609 1610 /* Test Case: Issue a Read command from Host B */ 1611 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1612 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1613 SPDK_CU_ASSERT_FATAL(rc < 0); 1614 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1615 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1616 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1617 SPDK_CU_ASSERT_FATAL(rc < 0); 1618 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1619 } 1620 1621 static void 1622 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1623 { 1624 _test_reservation_exclusive_access_regs_only_and_all_regs( 1625 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1626 _test_reservation_exclusive_access_regs_only_and_all_regs( 1627 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1628 } 1629 1630 static void 1631 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1632 { 1633 STAILQ_INIT(&ctrlr->async_events); 1634 } 1635 1636 static void 1637 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1638 { 1639 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1640 1641 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1642 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1643 free(event); 1644 } 1645 } 1646 1647 static int 1648 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1649 { 1650 int num = 0; 1651 struct spdk_nvmf_async_event_completion *event; 1652 1653 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1654 num++; 1655 } 1656 return num; 1657 } 1658 1659 static void 1660 test_reservation_notification_log_page(void) 1661 { 1662 struct spdk_nvmf_ctrlr ctrlr; 1663 struct spdk_nvmf_qpair qpair; 1664 struct spdk_nvmf_ns ns; 1665 struct spdk_nvmf_request req = {}; 1666 union nvmf_h2c_msg cmd = {}; 1667 union nvmf_c2h_msg rsp = {}; 1668 union spdk_nvme_async_event_completion event = {}; 1669 struct spdk_nvme_reservation_notification_log logs[3]; 1670 struct iovec iov; 1671 1672 memset(&ctrlr, 0, sizeof(ctrlr)); 1673 ctrlr.thread = spdk_get_thread(); 1674 TAILQ_INIT(&ctrlr.log_head); 1675 init_pending_async_events(&ctrlr); 1676 ns.nsid = 1; 1677 1678 /* Test Case: Mask all the reservation notifications */ 1679 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1680 SPDK_NVME_RESERVATION_RELEASED_MASK | 1681 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1682 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1683 SPDK_NVME_REGISTRATION_PREEMPTED); 1684 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1685 SPDK_NVME_RESERVATION_RELEASED); 1686 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1687 SPDK_NVME_RESERVATION_PREEMPTED); 1688 poll_threads(); 1689 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1690 1691 /* Test Case: Unmask all the reservation notifications, 1692 * 3 log pages are generated, and AER was triggered. 1693 */ 1694 ns.mask = 0; 1695 ctrlr.num_avail_log_pages = 0; 1696 req.cmd = &cmd; 1697 req.rsp = &rsp; 1698 ctrlr.aer_req[0] = &req; 1699 ctrlr.nr_aer_reqs = 1; 1700 req.qpair = &qpair; 1701 TAILQ_INIT(&qpair.outstanding); 1702 qpair.ctrlr = NULL; 1703 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1704 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1705 1706 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1707 SPDK_NVME_REGISTRATION_PREEMPTED); 1708 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1709 SPDK_NVME_RESERVATION_RELEASED); 1710 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1711 SPDK_NVME_RESERVATION_PREEMPTED); 1712 poll_threads(); 1713 event.raw = rsp.nvme_cpl.cdw0; 1714 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1715 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1716 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1717 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1718 1719 /* Test Case: Get Log Page to clear the log pages */ 1720 iov.iov_base = &logs[0]; 1721 iov.iov_len = sizeof(logs); 1722 nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0); 1723 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1724 1725 cleanup_pending_async_events(&ctrlr); 1726 } 1727 1728 static void 1729 test_get_dif_ctx(void) 1730 { 1731 struct spdk_nvmf_subsystem subsystem = {}; 1732 struct spdk_nvmf_request req = {}; 1733 struct spdk_nvmf_qpair qpair = {}; 1734 struct spdk_nvmf_ctrlr ctrlr = {}; 1735 struct spdk_nvmf_ns ns = {}; 1736 struct spdk_nvmf_ns *_ns = NULL; 1737 struct spdk_bdev bdev = {}; 1738 union nvmf_h2c_msg cmd = {}; 1739 struct spdk_dif_ctx dif_ctx = {}; 1740 bool ret; 1741 1742 ctrlr.subsys = &subsystem; 1743 ctrlr.visible_ns = spdk_bit_array_create(1); 1744 spdk_bit_array_set(ctrlr.visible_ns, 0); 1745 1746 qpair.ctrlr = &ctrlr; 1747 1748 req.qpair = &qpair; 1749 req.cmd = &cmd; 1750 1751 ns.bdev = &bdev; 1752 1753 ctrlr.dif_insert_or_strip = false; 1754 1755 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1756 CU_ASSERT(ret == false); 1757 1758 ctrlr.dif_insert_or_strip = true; 1759 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1760 1761 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1762 CU_ASSERT(ret == false); 1763 1764 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1765 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1766 1767 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1768 CU_ASSERT(ret == false); 1769 1770 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1771 1772 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1773 CU_ASSERT(ret == false); 1774 1775 qpair.qid = 1; 1776 1777 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1778 CU_ASSERT(ret == false); 1779 1780 cmd.nvme_cmd.nsid = 1; 1781 1782 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1783 CU_ASSERT(ret == false); 1784 1785 subsystem.max_nsid = 1; 1786 subsystem.ns = &_ns; 1787 subsystem.ns[0] = &ns; 1788 1789 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1790 CU_ASSERT(ret == false); 1791 1792 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1793 1794 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1795 CU_ASSERT(ret == true); 1796 1797 spdk_bit_array_free(&ctrlr.visible_ns); 1798 } 1799 1800 static void 1801 test_identify_ctrlr(void) 1802 { 1803 struct spdk_nvmf_tgt tgt = {}; 1804 struct spdk_nvmf_subsystem subsystem = { 1805 .subtype = SPDK_NVMF_SUBTYPE_NVME, 1806 .tgt = &tgt, 1807 }; 1808 struct spdk_nvmf_transport_ops tops = {}; 1809 struct spdk_nvmf_transport transport = { 1810 .ops = &tops, 1811 .opts = { 1812 .in_capsule_data_size = 4096, 1813 }, 1814 }; 1815 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1816 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1817 struct spdk_nvme_ctrlr_data cdata = {}; 1818 uint32_t expected_ioccsz; 1819 1820 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1821 1822 /* Check ioccsz, TCP transport */ 1823 tops.type = SPDK_NVME_TRANSPORT_TCP; 1824 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1825 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1826 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1827 1828 /* Check ioccsz, RDMA transport */ 1829 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1830 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1831 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1832 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1833 1834 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1835 tops.type = SPDK_NVME_TRANSPORT_TCP; 1836 ctrlr.dif_insert_or_strip = true; 1837 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1838 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1839 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1840 } 1841 1842 static void 1843 test_identify_ctrlr_iocs_specific(void) 1844 { 1845 struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 }; 1846 struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 }; 1847 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop }; 1848 struct spdk_nvme_cmd cmd = {}; 1849 struct spdk_nvme_cpl rsp = {}; 1850 struct spdk_nvme_zns_ctrlr_data ctrlr_data = {}; 1851 struct spdk_nvme_nvm_ctrlr_data cdata_nvm = {}; 1852 1853 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1854 1855 /* ZNS max_zone_append_size_kib no limit */ 1856 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1857 memset(&rsp, 0, sizeof(rsp)); 1858 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1859 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1860 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1861 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1862 CU_ASSERT(ctrlr_data.zasl == 0); 1863 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1864 1865 /* ZNS max_zone_append_size_kib = 4096 */ 1866 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1867 memset(&rsp, 0, sizeof(rsp)); 1868 subsystem.max_zone_append_size_kib = 4096; 1869 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1870 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1871 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1872 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1873 CU_ASSERT(ctrlr_data.zasl == 0); 1874 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1875 1876 /* ZNS max_zone_append_size_kib = 60000 */ 1877 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1878 memset(&rsp, 0, sizeof(rsp)); 1879 subsystem.max_zone_append_size_kib = 60000; 1880 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1881 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1882 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1883 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1884 CU_ASSERT(ctrlr_data.zasl == 3); 1885 ctrlr_data.zasl = 0; 1886 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1887 1888 /* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */ 1889 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1890 memset(&rsp, 0, sizeof(rsp)); 1891 ctrlr.vcprop.cap.bits.mpsmin = 2; 1892 subsystem.max_zone_append_size_kib = 60000; 1893 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1894 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1895 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1896 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1897 CU_ASSERT(ctrlr_data.zasl == 1); 1898 ctrlr_data.zasl = 0; 1899 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1900 ctrlr.vcprop.cap.bits.mpsmin = 0; 1901 1902 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1903 1904 /* NVM max_discard_size_kib = 1024; 1905 * max_write_zeroes_size_kib = 1024; 1906 * mpsmin = 0; 1907 */ 1908 memset(&cdata_nvm, 0xFF, sizeof(cdata_nvm)); 1909 memset(&rsp, 0, sizeof(rsp)); 1910 subsystem.max_discard_size_kib = (uint64_t)1024; 1911 subsystem.max_write_zeroes_size_kib = (uint64_t)1024; 1912 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1913 &cdata_nvm, sizeof(cdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1914 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1915 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1916 CU_ASSERT(cdata_nvm.wzsl == 8); 1917 CU_ASSERT(cdata_nvm.dmrsl == 2048); 1918 CU_ASSERT(cdata_nvm.dmrl == 1); 1919 } 1920 1921 static int 1922 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1923 { 1924 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1925 1926 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1927 }; 1928 1929 static void 1930 test_custom_admin_cmd(void) 1931 { 1932 struct spdk_nvmf_subsystem subsystem; 1933 struct spdk_nvmf_qpair qpair; 1934 struct spdk_nvmf_ctrlr ctrlr; 1935 struct spdk_nvmf_request req; 1936 struct spdk_nvmf_ns *ns_ptrs[1]; 1937 struct spdk_nvmf_ns ns; 1938 union nvmf_h2c_msg cmd; 1939 union nvmf_c2h_msg rsp; 1940 struct spdk_bdev bdev; 1941 uint8_t buf[4096]; 1942 int rc; 1943 1944 memset(&subsystem, 0, sizeof(subsystem)); 1945 ns_ptrs[0] = &ns; 1946 subsystem.ns = ns_ptrs; 1947 subsystem.max_nsid = 1; 1948 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1949 1950 memset(&ns, 0, sizeof(ns)); 1951 ns.opts.nsid = 1; 1952 ns.bdev = &bdev; 1953 1954 memset(&qpair, 0, sizeof(qpair)); 1955 qpair.ctrlr = &ctrlr; 1956 1957 memset(&ctrlr, 0, sizeof(ctrlr)); 1958 ctrlr.subsys = &subsystem; 1959 ctrlr.vcprop.cc.bits.en = 1; 1960 ctrlr.thread = spdk_get_thread(); 1961 1962 memset(&req, 0, sizeof(req)); 1963 req.qpair = &qpair; 1964 req.cmd = &cmd; 1965 req.rsp = &rsp; 1966 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1967 req.length = sizeof(buf); 1968 SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length); 1969 1970 memset(&cmd, 0, sizeof(cmd)); 1971 cmd.nvme_cmd.opc = 0xc1; 1972 cmd.nvme_cmd.nsid = 0; 1973 memset(&rsp, 0, sizeof(rsp)); 1974 1975 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1976 1977 /* Ensure that our hdlr is being called */ 1978 rc = nvmf_ctrlr_process_admin_cmd(&req); 1979 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1980 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1981 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1982 } 1983 1984 static void 1985 test_fused_compare_and_write(void) 1986 { 1987 struct spdk_nvmf_request req = {}; 1988 struct spdk_nvmf_qpair qpair = {}; 1989 struct spdk_nvme_cmd cmd = {}; 1990 union nvmf_c2h_msg rsp = {}; 1991 struct spdk_nvmf_ctrlr ctrlr = {}; 1992 struct spdk_nvmf_subsystem subsystem = {}; 1993 struct spdk_nvmf_ns ns = {}; 1994 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1995 enum spdk_nvme_ana_state ana_state[1]; 1996 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1997 struct spdk_bdev bdev = {}; 1998 1999 struct spdk_nvmf_poll_group group = {}; 2000 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2001 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2002 struct spdk_io_channel io_ch = {}; 2003 2004 ns.bdev = &bdev; 2005 ns.anagrpid = 1; 2006 2007 subsystem.id = 0; 2008 subsystem.max_nsid = 1; 2009 subsys_ns[0] = &ns; 2010 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2011 2012 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2013 2014 /* Enable controller */ 2015 ctrlr.vcprop.cc.bits.en = 1; 2016 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2017 ctrlr.listener = &listener; 2018 ctrlr.visible_ns = spdk_bit_array_create(1); 2019 spdk_bit_array_set(ctrlr.visible_ns, 0); 2020 2021 group.num_sgroups = 1; 2022 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2023 sgroups.num_ns = 1; 2024 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2025 ns_info.channel = &io_ch; 2026 sgroups.ns_info = &ns_info; 2027 TAILQ_INIT(&sgroups.queued); 2028 group.sgroups = &sgroups; 2029 TAILQ_INIT(&qpair.outstanding); 2030 2031 qpair.ctrlr = &ctrlr; 2032 qpair.group = &group; 2033 qpair.qid = 1; 2034 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2035 2036 cmd.nsid = 1; 2037 2038 req.qpair = &qpair; 2039 req.cmd = (union nvmf_h2c_msg *)&cmd; 2040 req.rsp = &rsp; 2041 2042 /* SUCCESS/SUCCESS */ 2043 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2044 cmd.opc = SPDK_NVME_OPC_COMPARE; 2045 2046 spdk_nvmf_request_exec(&req); 2047 CU_ASSERT(qpair.first_fused_req != NULL); 2048 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2049 2050 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2051 cmd.opc = SPDK_NVME_OPC_WRITE; 2052 2053 spdk_nvmf_request_exec(&req); 2054 CU_ASSERT(qpair.first_fused_req == NULL); 2055 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2056 2057 /* Wrong sequence */ 2058 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2059 cmd.opc = SPDK_NVME_OPC_WRITE; 2060 2061 spdk_nvmf_request_exec(&req); 2062 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 2063 CU_ASSERT(qpair.first_fused_req == NULL); 2064 2065 /* Write as FUSE_FIRST (Wrong op code) */ 2066 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2067 cmd.opc = SPDK_NVME_OPC_WRITE; 2068 2069 spdk_nvmf_request_exec(&req); 2070 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 2071 CU_ASSERT(qpair.first_fused_req == NULL); 2072 2073 /* Compare as FUSE_SECOND (Wrong op code) */ 2074 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2075 cmd.opc = SPDK_NVME_OPC_COMPARE; 2076 2077 spdk_nvmf_request_exec(&req); 2078 CU_ASSERT(qpair.first_fused_req != NULL); 2079 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2080 2081 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2082 cmd.opc = SPDK_NVME_OPC_COMPARE; 2083 2084 spdk_nvmf_request_exec(&req); 2085 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 2086 CU_ASSERT(qpair.first_fused_req == NULL); 2087 2088 spdk_bit_array_free(&ctrlr.visible_ns); 2089 } 2090 2091 static void 2092 test_multi_async_event_reqs(void) 2093 { 2094 struct spdk_nvmf_subsystem subsystem = {}; 2095 struct spdk_nvmf_qpair qpair = {}; 2096 struct spdk_nvmf_ctrlr ctrlr = {}; 2097 struct spdk_nvmf_request req[5] = {}; 2098 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2099 struct spdk_nvmf_ns ns = {}; 2100 union nvmf_h2c_msg cmd[5] = {}; 2101 union nvmf_c2h_msg rsp[5] = {}; 2102 2103 struct spdk_nvmf_poll_group group = {}; 2104 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2105 2106 int i; 2107 2108 ns_ptrs[0] = &ns; 2109 subsystem.ns = ns_ptrs; 2110 subsystem.max_nsid = 1; 2111 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2112 2113 ns.opts.nsid = 1; 2114 group.sgroups = &sgroups; 2115 2116 qpair.ctrlr = &ctrlr; 2117 qpair.group = &group; 2118 TAILQ_INIT(&qpair.outstanding); 2119 2120 ctrlr.subsys = &subsystem; 2121 ctrlr.vcprop.cc.bits.en = 1; 2122 ctrlr.thread = spdk_get_thread(); 2123 2124 for (i = 0; i < 5; i++) { 2125 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2126 cmd[i].nvme_cmd.nsid = 1; 2127 cmd[i].nvme_cmd.cid = i; 2128 2129 req[i].qpair = &qpair; 2130 req[i].cmd = &cmd[i]; 2131 req[i].rsp = &rsp[i]; 2132 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2133 } 2134 2135 /* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */ 2136 sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS; 2137 for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) { 2138 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2139 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 2140 } 2141 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2142 2143 /* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */ 2144 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2145 CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS); 2146 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 2147 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 2148 2149 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 2150 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 2151 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2152 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2153 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 2154 2155 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 2156 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2157 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2158 CU_ASSERT(ctrlr.aer_req[2] == NULL); 2159 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 2160 2161 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 2162 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 2163 } 2164 2165 static void 2166 test_get_ana_log_page_one_ns_per_anagrp(void) 2167 { 2168 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 2169 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 2170 uint32_t ana_group[3]; 2171 struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group }; 2172 struct spdk_nvmf_ctrlr ctrlr = {}; 2173 enum spdk_nvme_ana_state ana_state[3]; 2174 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2175 struct spdk_nvmf_ns ns[3]; 2176 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 2177 uint64_t offset; 2178 uint32_t length; 2179 int i; 2180 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2181 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2182 struct iovec iov, iovs[2]; 2183 struct spdk_nvme_ana_page *ana_hdr; 2184 char _ana_desc[UT_ANA_DESC_SIZE]; 2185 struct spdk_nvme_ana_group_descriptor *ana_desc; 2186 2187 subsystem.ns = ns_arr; 2188 subsystem.max_nsid = 3; 2189 for (i = 0; i < 3; i++) { 2190 subsystem.ana_group[i] = 1; 2191 } 2192 ctrlr.subsys = &subsystem; 2193 ctrlr.listener = &listener; 2194 2195 for (i = 0; i < 3; i++) { 2196 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2197 } 2198 2199 for (i = 0; i < 3; i++) { 2200 ns_arr[i]->nsid = i + 1; 2201 ns_arr[i]->anagrpid = i + 1; 2202 } 2203 2204 /* create expected page */ 2205 ana_hdr = (void *)&expected_page[0]; 2206 ana_hdr->num_ana_group_desc = 3; 2207 ana_hdr->change_count = 0; 2208 2209 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2210 ana_desc = (void *)_ana_desc; 2211 offset = sizeof(struct spdk_nvme_ana_page); 2212 2213 for (i = 0; i < 3; i++) { 2214 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 2215 ana_desc->ana_group_id = ns_arr[i]->nsid; 2216 ana_desc->num_of_nsid = 1; 2217 ana_desc->change_count = 0; 2218 ana_desc->ana_state = ctrlr.listener->ana_state[i]; 2219 ana_desc->nsid[0] = ns_arr[i]->nsid; 2220 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 2221 offset += UT_ANA_DESC_SIZE; 2222 } 2223 2224 /* read entire actual log page */ 2225 offset = 0; 2226 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2227 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2228 iov.iov_base = &actual_page[offset]; 2229 iov.iov_len = length; 2230 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2231 offset += length; 2232 } 2233 2234 /* compare expected page and actual page */ 2235 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2236 2237 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2238 offset = 0; 2239 iovs[0].iov_base = &actual_page[offset]; 2240 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2241 offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2242 iovs[1].iov_base = &actual_page[offset]; 2243 iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset; 2244 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2245 2246 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2247 2248 #undef UT_ANA_DESC_SIZE 2249 #undef UT_ANA_LOG_PAGE_SIZE 2250 } 2251 2252 static void 2253 test_get_ana_log_page_multi_ns_per_anagrp(void) 2254 { 2255 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + \ 2256 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 + \ 2257 sizeof(uint32_t) * 5) 2258 struct spdk_nvmf_ns ns[5]; 2259 struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]}; 2260 uint32_t ana_group[5] = {0}; 2261 struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, }; 2262 enum spdk_nvme_ana_state ana_state[5]; 2263 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, }; 2264 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, }; 2265 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2266 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2267 struct iovec iov, iovs[2]; 2268 struct spdk_nvme_ana_page *ana_hdr; 2269 char _ana_desc[UT_ANA_LOG_PAGE_SIZE]; 2270 struct spdk_nvme_ana_group_descriptor *ana_desc; 2271 uint64_t offset; 2272 uint32_t length; 2273 int i; 2274 2275 subsystem.max_nsid = 5; 2276 subsystem.ana_group[1] = 3; 2277 subsystem.ana_group[2] = 2; 2278 for (i = 0; i < 5; i++) { 2279 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2280 } 2281 2282 for (i = 0; i < 5; i++) { 2283 ns_arr[i]->nsid = i + 1; 2284 } 2285 ns_arr[0]->anagrpid = 2; 2286 ns_arr[1]->anagrpid = 3; 2287 ns_arr[2]->anagrpid = 2; 2288 ns_arr[3]->anagrpid = 3; 2289 ns_arr[4]->anagrpid = 2; 2290 2291 /* create expected page */ 2292 ana_hdr = (void *)&expected_page[0]; 2293 ana_hdr->num_ana_group_desc = 2; 2294 ana_hdr->change_count = 0; 2295 2296 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2297 ana_desc = (void *)_ana_desc; 2298 offset = sizeof(struct spdk_nvme_ana_page); 2299 2300 memset(_ana_desc, 0, sizeof(_ana_desc)); 2301 ana_desc->ana_group_id = 2; 2302 ana_desc->num_of_nsid = 3; 2303 ana_desc->change_count = 0; 2304 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2305 ana_desc->nsid[0] = 1; 2306 ana_desc->nsid[1] = 3; 2307 ana_desc->nsid[2] = 5; 2308 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2309 sizeof(uint32_t) * 3); 2310 offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3; 2311 2312 memset(_ana_desc, 0, sizeof(_ana_desc)); 2313 ana_desc->ana_group_id = 3; 2314 ana_desc->num_of_nsid = 2; 2315 ana_desc->change_count = 0; 2316 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2317 ana_desc->nsid[0] = 2; 2318 ana_desc->nsid[1] = 4; 2319 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2320 sizeof(uint32_t) * 2); 2321 2322 /* read entire actual log page, and compare expected page and actual page. */ 2323 offset = 0; 2324 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2325 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2326 iov.iov_base = &actual_page[offset]; 2327 iov.iov_len = length; 2328 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2329 offset += length; 2330 } 2331 2332 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2333 2334 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2335 offset = 0; 2336 iovs[0].iov_base = &actual_page[offset]; 2337 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2338 offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2339 iovs[1].iov_base = &actual_page[offset]; 2340 iovs[1].iov_len = sizeof(uint32_t) * 5; 2341 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2342 2343 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2344 2345 #undef UT_ANA_LOG_PAGE_SIZE 2346 } 2347 static void 2348 test_multi_async_events(void) 2349 { 2350 struct spdk_nvmf_subsystem subsystem = {}; 2351 struct spdk_nvmf_qpair qpair = {}; 2352 struct spdk_nvmf_ctrlr ctrlr = {}; 2353 struct spdk_nvmf_request req[4] = {}; 2354 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2355 struct spdk_nvmf_ns ns = {}; 2356 union nvmf_h2c_msg cmd[4] = {}; 2357 union nvmf_c2h_msg rsp[4] = {}; 2358 union spdk_nvme_async_event_completion event = {}; 2359 struct spdk_nvmf_poll_group group = {}; 2360 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2361 int i; 2362 2363 ns_ptrs[0] = &ns; 2364 subsystem.ns = ns_ptrs; 2365 subsystem.max_nsid = 1; 2366 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2367 2368 ns.opts.nsid = 1; 2369 group.sgroups = &sgroups; 2370 2371 qpair.ctrlr = &ctrlr; 2372 qpair.group = &group; 2373 TAILQ_INIT(&qpair.outstanding); 2374 2375 ctrlr.subsys = &subsystem; 2376 ctrlr.vcprop.cc.bits.en = 1; 2377 ctrlr.thread = spdk_get_thread(); 2378 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2379 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 2380 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 2381 init_pending_async_events(&ctrlr); 2382 2383 /* Target queue pending events when there is no outstanding AER request */ 2384 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2385 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 2386 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 2387 2388 for (i = 0; i < 4; i++) { 2389 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2390 cmd[i].nvme_cmd.nsid = 1; 2391 cmd[i].nvme_cmd.cid = i; 2392 2393 req[i].qpair = &qpair; 2394 req[i].cmd = &cmd[i]; 2395 req[i].rsp = &rsp[i]; 2396 2397 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2398 2399 sgroups.mgmt_io_outstanding = 1; 2400 if (i < 3) { 2401 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2402 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2403 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 2404 } else { 2405 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2406 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2407 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 2408 } 2409 } 2410 2411 event.raw = rsp[0].nvme_cpl.cdw0; 2412 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2413 event.raw = rsp[1].nvme_cpl.cdw0; 2414 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 2415 event.raw = rsp[2].nvme_cpl.cdw0; 2416 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 2417 2418 cleanup_pending_async_events(&ctrlr); 2419 } 2420 2421 static void 2422 test_rae(void) 2423 { 2424 struct spdk_nvmf_subsystem subsystem = {}; 2425 struct spdk_nvmf_qpair qpair = {}; 2426 struct spdk_nvmf_ctrlr ctrlr = {}; 2427 struct spdk_nvmf_request req[3] = {}; 2428 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2429 struct spdk_nvmf_ns ns = {}; 2430 union nvmf_h2c_msg cmd[3] = {}; 2431 union nvmf_c2h_msg rsp[3] = {}; 2432 union spdk_nvme_async_event_completion event = {}; 2433 struct spdk_nvmf_poll_group group = {}; 2434 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2435 int i; 2436 char data[4096]; 2437 2438 ns_ptrs[0] = &ns; 2439 subsystem.ns = ns_ptrs; 2440 subsystem.max_nsid = 1; 2441 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2442 2443 ns.opts.nsid = 1; 2444 group.sgroups = &sgroups; 2445 2446 qpair.ctrlr = &ctrlr; 2447 qpair.group = &group; 2448 TAILQ_INIT(&qpair.outstanding); 2449 2450 ctrlr.subsys = &subsystem; 2451 ctrlr.vcprop.cc.bits.en = 1; 2452 ctrlr.thread = spdk_get_thread(); 2453 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2454 init_pending_async_events(&ctrlr); 2455 2456 /* Target queue pending events when there is no outstanding AER request */ 2457 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2458 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2459 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2460 /* only one event will be queued before RAE is clear */ 2461 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2462 2463 req[0].qpair = &qpair; 2464 req[0].cmd = &cmd[0]; 2465 req[0].rsp = &rsp[0]; 2466 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2467 cmd[0].nvme_cmd.nsid = 1; 2468 cmd[0].nvme_cmd.cid = 0; 2469 2470 for (i = 1; i < 3; i++) { 2471 req[i].qpair = &qpair; 2472 req[i].cmd = &cmd[i]; 2473 req[i].rsp = &rsp[i]; 2474 req[i].length = sizeof(data); 2475 SPDK_IOV_ONE(req[i].iov, &req[i].iovcnt, &data, req[i].length); 2476 2477 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 2478 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 2479 SPDK_NVME_LOG_CHANGED_NS_LIST; 2480 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 2481 spdk_nvme_bytes_to_numd(req[i].length); 2482 cmd[i].nvme_cmd.cid = i; 2483 } 2484 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 2485 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 2486 2487 /* consume the pending event */ 2488 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 2489 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2490 event.raw = rsp[0].nvme_cpl.cdw0; 2491 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2492 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2493 2494 /* get log with RAE set */ 2495 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2496 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2497 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2498 2499 /* will not generate new event until RAE is clear */ 2500 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2501 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2502 2503 /* get log with RAE clear */ 2504 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2505 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2506 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2507 2508 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2509 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2510 2511 cleanup_pending_async_events(&ctrlr); 2512 } 2513 2514 static void 2515 test_nvmf_ctrlr_create_destruct(void) 2516 { 2517 struct spdk_nvmf_fabric_connect_data connect_data = {}; 2518 struct spdk_nvmf_poll_group group = {}; 2519 struct spdk_nvmf_subsystem_poll_group sgroups[2] = {}; 2520 struct spdk_nvmf_transport transport = {}; 2521 struct spdk_nvmf_transport_ops tops = {}; 2522 struct spdk_nvmf_subsystem subsystem = {}; 2523 struct spdk_nvmf_ns *ns_arr[1] = { NULL }; 2524 struct spdk_nvmf_request req = {}; 2525 struct spdk_nvmf_qpair qpair = {}; 2526 struct spdk_nvmf_ctrlr *ctrlr = NULL; 2527 struct spdk_nvmf_tgt tgt = {}; 2528 union nvmf_h2c_msg cmd = {}; 2529 union nvmf_c2h_msg rsp = {}; 2530 const uint8_t hostid[16] = { 2531 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2532 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 2533 }; 2534 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 2535 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 2536 2537 group.thread = spdk_get_thread(); 2538 transport.ops = &tops; 2539 transport.opts.max_aq_depth = 32; 2540 transport.opts.max_queue_depth = 64; 2541 transport.opts.max_qpairs_per_ctrlr = 3; 2542 transport.opts.dif_insert_or_strip = true; 2543 transport.tgt = &tgt; 2544 qpair.transport = &transport; 2545 qpair.group = &group; 2546 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 2547 TAILQ_INIT(&qpair.outstanding); 2548 2549 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 2550 connect_data.cntlid = 0xFFFF; 2551 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 2552 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 2553 2554 subsystem.thread = spdk_get_thread(); 2555 subsystem.id = 1; 2556 TAILQ_INIT(&subsystem.ctrlrs); 2557 subsystem.tgt = &tgt; 2558 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2559 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2560 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 2561 subsystem.ns = ns_arr; 2562 2563 group.sgroups = sgroups; 2564 2565 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2566 cmd.connect_cmd.cid = 1; 2567 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 2568 cmd.connect_cmd.recfmt = 0; 2569 cmd.connect_cmd.qid = 0; 2570 cmd.connect_cmd.sqsize = 31; 2571 cmd.connect_cmd.cattr = 0; 2572 cmd.connect_cmd.kato = 120000; 2573 2574 req.qpair = &qpair; 2575 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 2576 req.length = sizeof(connect_data); 2577 SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length); 2578 req.cmd = &cmd; 2579 req.rsp = &rsp; 2580 2581 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 2582 sgroups[subsystem.id].mgmt_io_outstanding++; 2583 2584 ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base); 2585 poll_threads(); 2586 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2587 CU_ASSERT(req.qpair->ctrlr == ctrlr); 2588 CU_ASSERT(ctrlr->subsys == &subsystem); 2589 CU_ASSERT(ctrlr->thread == req.qpair->group->thread); 2590 CU_ASSERT(ctrlr->disconnect_in_progress == false); 2591 CU_ASSERT(ctrlr->qpair_mask != NULL); 2592 CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000); 2593 CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1); 2594 CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1); 2595 CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1); 2596 CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1); 2597 CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16)); 2598 CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1); 2599 CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63); 2600 CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0); 2601 CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500); 2602 CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0); 2603 CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM); 2604 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0); 2605 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0); 2606 CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1); 2607 CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3); 2608 CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0); 2609 CU_ASSERT(ctrlr->vcprop.cc.raw == 0); 2610 CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0); 2611 CU_ASSERT(ctrlr->vcprop.csts.raw == 0); 2612 CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0); 2613 CU_ASSERT(ctrlr->dif_insert_or_strip == true); 2614 2615 ctrlr->in_destruct = true; 2616 nvmf_ctrlr_destruct(ctrlr); 2617 poll_threads(); 2618 CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs)); 2619 CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding)); 2620 } 2621 2622 static void 2623 test_nvmf_ctrlr_use_zcopy(void) 2624 { 2625 struct spdk_nvmf_subsystem subsystem = {}; 2626 struct spdk_nvmf_transport transport = {}; 2627 struct spdk_nvmf_request req = {}; 2628 struct spdk_nvmf_qpair qpair = {}; 2629 struct spdk_nvmf_ctrlr ctrlr = {}; 2630 union nvmf_h2c_msg cmd = {}; 2631 struct spdk_nvmf_ns ns = {}; 2632 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2633 struct spdk_bdev bdev = {}; 2634 struct spdk_nvmf_poll_group group = {}; 2635 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2636 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2637 struct spdk_io_channel io_ch = {}; 2638 int opc; 2639 2640 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2641 ns.bdev = &bdev; 2642 2643 subsystem.id = 0; 2644 subsystem.max_nsid = 1; 2645 subsys_ns[0] = &ns; 2646 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2647 2648 ctrlr.subsys = &subsystem; 2649 ctrlr.visible_ns = spdk_bit_array_create(1); 2650 spdk_bit_array_set(ctrlr.visible_ns, 0); 2651 2652 transport.opts.zcopy = true; 2653 2654 qpair.ctrlr = &ctrlr; 2655 qpair.group = &group; 2656 qpair.qid = 1; 2657 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2658 qpair.transport = &transport; 2659 2660 group.thread = spdk_get_thread(); 2661 group.num_sgroups = 1; 2662 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2663 sgroups.num_ns = 1; 2664 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2665 ns_info.channel = &io_ch; 2666 sgroups.ns_info = &ns_info; 2667 TAILQ_INIT(&sgroups.queued); 2668 group.sgroups = &sgroups; 2669 TAILQ_INIT(&qpair.outstanding); 2670 2671 req.qpair = &qpair; 2672 req.cmd = &cmd; 2673 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2674 2675 /* Admin queue */ 2676 qpair.qid = 0; 2677 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2678 qpair.qid = 1; 2679 2680 /* Invalid Opcodes */ 2681 for (opc = 0; opc <= 255; opc++) { 2682 cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc; 2683 if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) && 2684 (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) { 2685 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2686 } 2687 } 2688 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 2689 2690 /* Fused WRITE */ 2691 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2692 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2693 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE; 2694 2695 /* Non bdev */ 2696 cmd.nvme_cmd.nsid = 4; 2697 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2698 cmd.nvme_cmd.nsid = 1; 2699 2700 /* ZCOPY Not supported */ 2701 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2702 ns.zcopy = true; 2703 2704 /* ZCOPY disabled on transport level */ 2705 transport.opts.zcopy = false; 2706 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2707 transport.opts.zcopy = true; 2708 2709 /* Success */ 2710 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2711 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2712 2713 spdk_bit_array_free(&ctrlr.visible_ns); 2714 } 2715 2716 static void 2717 qpair_state_change_done(void *cb_arg, int status) 2718 { 2719 } 2720 2721 static void 2722 test_spdk_nvmf_request_zcopy_start(void) 2723 { 2724 struct spdk_nvmf_request req = {}; 2725 struct spdk_nvmf_qpair qpair = {}; 2726 struct spdk_nvmf_transport transport = {}; 2727 struct spdk_nvme_cmd cmd = {}; 2728 union nvmf_c2h_msg rsp = {}; 2729 struct spdk_nvmf_ctrlr ctrlr = {}; 2730 struct spdk_nvmf_subsystem subsystem = {}; 2731 struct spdk_nvmf_ns ns = {}; 2732 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2733 enum spdk_nvme_ana_state ana_state[1]; 2734 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2735 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2736 2737 struct spdk_nvmf_poll_group group = {}; 2738 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2739 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2740 struct spdk_io_channel io_ch = {}; 2741 2742 ns.bdev = &bdev; 2743 ns.zcopy = true; 2744 ns.anagrpid = 1; 2745 2746 subsystem.id = 0; 2747 subsystem.max_nsid = 1; 2748 subsys_ns[0] = &ns; 2749 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2750 2751 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2752 2753 /* Enable controller */ 2754 ctrlr.vcprop.cc.bits.en = 1; 2755 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2756 ctrlr.listener = &listener; 2757 ctrlr.visible_ns = spdk_bit_array_create(1); 2758 spdk_bit_array_set(ctrlr.visible_ns, 0); 2759 2760 transport.opts.zcopy = true; 2761 2762 group.thread = spdk_get_thread(); 2763 group.num_sgroups = 1; 2764 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2765 sgroups.num_ns = 1; 2766 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2767 ns_info.channel = &io_ch; 2768 sgroups.ns_info = &ns_info; 2769 TAILQ_INIT(&sgroups.queued); 2770 group.sgroups = &sgroups; 2771 TAILQ_INIT(&qpair.outstanding); 2772 2773 qpair.ctrlr = &ctrlr; 2774 qpair.group = &group; 2775 qpair.transport = &transport; 2776 qpair.qid = 1; 2777 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2778 2779 cmd.nsid = 1; 2780 2781 req.qpair = &qpair; 2782 req.cmd = (union nvmf_h2c_msg *)&cmd; 2783 req.rsp = &rsp; 2784 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2785 cmd.opc = SPDK_NVME_OPC_READ; 2786 2787 /* Fail because no controller */ 2788 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2789 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2790 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 2791 qpair.ctrlr = NULL; 2792 spdk_nvmf_request_zcopy_start(&req); 2793 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2794 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2795 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 2796 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2797 qpair.ctrlr = &ctrlr; 2798 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2799 2800 /* Fail because bad NSID */ 2801 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2802 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2803 cmd.nsid = 0; 2804 spdk_nvmf_request_zcopy_start(&req); 2805 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2806 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2807 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2808 cmd.nsid = 1; 2809 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2810 2811 /* Fail because bad Channel */ 2812 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2813 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2814 ns_info.channel = NULL; 2815 spdk_nvmf_request_zcopy_start(&req); 2816 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2817 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2818 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2819 ns_info.channel = &io_ch; 2820 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2821 2822 /* Queue the request because NSID is not active */ 2823 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2824 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2825 ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING; 2826 spdk_nvmf_request_zcopy_start(&req); 2827 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT); 2828 CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req); 2829 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2830 TAILQ_REMOVE(&sgroups.queued, &req, link); 2831 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2832 2833 /* Fail because QPair is not active */ 2834 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2835 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2836 qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 2837 qpair.state_cb = qpair_state_change_done; 2838 spdk_nvmf_request_zcopy_start(&req); 2839 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED); 2840 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2841 qpair.state_cb = NULL; 2842 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2843 2844 /* Fail because nvmf_bdev_ctrlr_zcopy_start fails */ 2845 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2846 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2847 cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */ 2848 cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 2849 req.length = (cmd.cdw12 + 1) * bdev.blocklen; 2850 spdk_nvmf_request_zcopy_start(&req); 2851 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2852 cmd.cdw10 = 0; 2853 cmd.cdw12 = 0; 2854 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2855 2856 /* Success */ 2857 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2858 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2859 spdk_nvmf_request_zcopy_start(&req); 2860 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2861 2862 spdk_bit_array_free(&ctrlr.visible_ns); 2863 } 2864 2865 static void 2866 test_zcopy_read(void) 2867 { 2868 struct spdk_nvmf_request req = {}; 2869 struct spdk_nvmf_qpair qpair = {}; 2870 struct spdk_nvmf_transport transport = {}; 2871 struct spdk_nvme_cmd cmd = {}; 2872 union nvmf_c2h_msg rsp = {}; 2873 struct spdk_nvmf_ctrlr ctrlr = {}; 2874 struct spdk_nvmf_subsystem subsystem = {}; 2875 struct spdk_nvmf_ns ns = {}; 2876 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2877 enum spdk_nvme_ana_state ana_state[1]; 2878 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2879 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2880 2881 struct spdk_nvmf_poll_group group = {}; 2882 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2883 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2884 struct spdk_io_channel io_ch = {}; 2885 2886 ns.bdev = &bdev; 2887 ns.zcopy = true; 2888 ns.anagrpid = 1; 2889 2890 subsystem.id = 0; 2891 subsystem.max_nsid = 1; 2892 subsys_ns[0] = &ns; 2893 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2894 2895 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2896 2897 /* Enable controller */ 2898 ctrlr.vcprop.cc.bits.en = 1; 2899 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2900 ctrlr.listener = &listener; 2901 ctrlr.visible_ns = spdk_bit_array_create(1); 2902 spdk_bit_array_set(ctrlr.visible_ns, 0); 2903 2904 transport.opts.zcopy = true; 2905 2906 group.thread = spdk_get_thread(); 2907 group.num_sgroups = 1; 2908 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2909 sgroups.num_ns = 1; 2910 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2911 ns_info.channel = &io_ch; 2912 sgroups.ns_info = &ns_info; 2913 TAILQ_INIT(&sgroups.queued); 2914 group.sgroups = &sgroups; 2915 TAILQ_INIT(&qpair.outstanding); 2916 2917 qpair.ctrlr = &ctrlr; 2918 qpair.group = &group; 2919 qpair.transport = &transport; 2920 qpair.qid = 1; 2921 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2922 2923 cmd.nsid = 1; 2924 2925 req.qpair = &qpair; 2926 req.cmd = (union nvmf_h2c_msg *)&cmd; 2927 req.rsp = &rsp; 2928 cmd.opc = SPDK_NVME_OPC_READ; 2929 2930 /* Prepare for zcopy */ 2931 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2932 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2933 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2934 CU_ASSERT(ns_info.io_outstanding == 0); 2935 2936 /* Perform the zcopy start */ 2937 spdk_nvmf_request_zcopy_start(&req); 2938 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2939 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2940 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2941 CU_ASSERT(ns_info.io_outstanding == 1); 2942 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2943 2944 /* Perform the zcopy end */ 2945 spdk_nvmf_request_zcopy_end(&req, false); 2946 CU_ASSERT(req.zcopy_bdev_io == NULL); 2947 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2948 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2949 CU_ASSERT(ns_info.io_outstanding == 0); 2950 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2951 2952 spdk_bit_array_free(&ctrlr.visible_ns); 2953 } 2954 2955 static void 2956 test_zcopy_write(void) 2957 { 2958 struct spdk_nvmf_request req = {}; 2959 struct spdk_nvmf_qpair qpair = {}; 2960 struct spdk_nvmf_transport transport = {}; 2961 struct spdk_nvme_cmd cmd = {}; 2962 union nvmf_c2h_msg rsp = {}; 2963 struct spdk_nvmf_ctrlr ctrlr = {}; 2964 struct spdk_nvmf_subsystem subsystem = {}; 2965 struct spdk_nvmf_ns ns = {}; 2966 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2967 enum spdk_nvme_ana_state ana_state[1]; 2968 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2969 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2970 2971 struct spdk_nvmf_poll_group group = {}; 2972 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2973 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2974 struct spdk_io_channel io_ch = {}; 2975 2976 ns.bdev = &bdev; 2977 ns.zcopy = true; 2978 ns.anagrpid = 1; 2979 2980 subsystem.id = 0; 2981 subsystem.max_nsid = 1; 2982 subsys_ns[0] = &ns; 2983 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2984 2985 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2986 2987 /* Enable controller */ 2988 ctrlr.vcprop.cc.bits.en = 1; 2989 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2990 ctrlr.listener = &listener; 2991 ctrlr.visible_ns = spdk_bit_array_create(1); 2992 spdk_bit_array_set(ctrlr.visible_ns, 0); 2993 2994 transport.opts.zcopy = true; 2995 2996 group.thread = spdk_get_thread(); 2997 group.num_sgroups = 1; 2998 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2999 sgroups.num_ns = 1; 3000 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 3001 ns_info.channel = &io_ch; 3002 sgroups.ns_info = &ns_info; 3003 TAILQ_INIT(&sgroups.queued); 3004 group.sgroups = &sgroups; 3005 TAILQ_INIT(&qpair.outstanding); 3006 3007 qpair.ctrlr = &ctrlr; 3008 qpair.group = &group; 3009 qpair.transport = &transport; 3010 qpair.qid = 1; 3011 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 3012 3013 cmd.nsid = 1; 3014 3015 req.qpair = &qpair; 3016 req.cmd = (union nvmf_h2c_msg *)&cmd; 3017 req.rsp = &rsp; 3018 cmd.opc = SPDK_NVME_OPC_WRITE; 3019 3020 /* Prepare for zcopy */ 3021 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 3022 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 3023 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 3024 CU_ASSERT(ns_info.io_outstanding == 0); 3025 3026 /* Perform the zcopy start */ 3027 spdk_nvmf_request_zcopy_start(&req); 3028 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 3029 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 3030 CU_ASSERT(qpair.outstanding.tqh_first == &req); 3031 CU_ASSERT(ns_info.io_outstanding == 1); 3032 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 3033 3034 /* Perform the zcopy end */ 3035 spdk_nvmf_request_zcopy_end(&req, true); 3036 CU_ASSERT(req.zcopy_bdev_io == NULL); 3037 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 3038 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 3039 CU_ASSERT(ns_info.io_outstanding == 0); 3040 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 3041 3042 spdk_bit_array_free(&ctrlr.visible_ns); 3043 } 3044 3045 static void 3046 test_nvmf_property_set(void) 3047 { 3048 int rc; 3049 struct spdk_nvmf_request req = {}; 3050 struct spdk_nvmf_qpair qpair = {}; 3051 struct spdk_nvmf_ctrlr ctrlr = {}; 3052 union nvmf_h2c_msg cmd = {}; 3053 union nvmf_c2h_msg rsp = {}; 3054 3055 req.qpair = &qpair; 3056 qpair.ctrlr = &ctrlr; 3057 req.cmd = &cmd; 3058 req.rsp = &rsp; 3059 3060 /* Invalid parameters */ 3061 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 3062 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs); 3063 3064 rc = nvmf_property_set(&req); 3065 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3066 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 3067 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 3068 3069 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms); 3070 3071 rc = nvmf_property_get(&req); 3072 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3073 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 3074 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 3075 3076 /* Set cc with same property size */ 3077 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 3078 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc); 3079 3080 rc = nvmf_property_set(&req); 3081 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3082 3083 /* Emulate cc data */ 3084 ctrlr.vcprop.cc.raw = 0xDEADBEEF; 3085 3086 rc = nvmf_property_get(&req); 3087 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3088 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF); 3089 3090 /* Set asq with different property size */ 3091 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 3092 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 3093 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq); 3094 3095 rc = nvmf_property_set(&req); 3096 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3097 3098 /* Emulate asq data */ 3099 ctrlr.vcprop.asq = 0xAADDADBEEF; 3100 3101 rc = nvmf_property_get(&req); 3102 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3103 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF); 3104 } 3105 3106 static void 3107 test_nvmf_ctrlr_get_features_host_behavior_support(void) 3108 { 3109 int rc; 3110 struct spdk_nvmf_request req = {}; 3111 struct spdk_nvmf_qpair qpair = {}; 3112 struct spdk_nvmf_ctrlr ctrlr = {}; 3113 struct spdk_nvme_host_behavior behavior = {}; 3114 union nvmf_h2c_msg cmd = {}; 3115 union nvmf_c2h_msg rsp = {}; 3116 3117 qpair.ctrlr = &ctrlr; 3118 req.qpair = &qpair; 3119 req.cmd = &cmd; 3120 req.rsp = &rsp; 3121 3122 /* Invalid data */ 3123 req.length = sizeof(struct spdk_nvme_host_behavior); 3124 req.iovcnt = 0; 3125 3126 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3127 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3128 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3129 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3130 3131 /* Wrong structure length */ 3132 req.length = sizeof(struct spdk_nvme_host_behavior) - 1; 3133 SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length); 3134 3135 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3136 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3137 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3138 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3139 3140 /* Get Features Host Behavior Support Success */ 3141 req.length = sizeof(struct spdk_nvme_host_behavior); 3142 SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length); 3143 3144 ctrlr.acre_enabled = true; 3145 ctrlr.lbafee_enabled = true; 3146 behavior.acre = false; 3147 behavior.lbafee = false; 3148 3149 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3150 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3151 CU_ASSERT(behavior.acre == true); 3152 CU_ASSERT(behavior.lbafee == true); 3153 } 3154 3155 static void 3156 test_nvmf_ctrlr_set_features_host_behavior_support(void) 3157 { 3158 int rc; 3159 struct spdk_nvmf_request req = {}; 3160 struct spdk_nvmf_qpair qpair = {}; 3161 struct spdk_nvmf_ctrlr ctrlr = {}; 3162 struct spdk_nvme_host_behavior host_behavior = {}; 3163 union nvmf_h2c_msg cmd = {}; 3164 union nvmf_c2h_msg rsp = {}; 3165 3166 qpair.ctrlr = &ctrlr; 3167 req.qpair = &qpair; 3168 req.cmd = &cmd; 3169 req.rsp = &rsp; 3170 req.iov[0].iov_base = &host_behavior; 3171 req.iov[0].iov_len = sizeof(host_behavior); 3172 3173 /* Invalid iovcnt */ 3174 req.iovcnt = 0; 3175 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3176 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3177 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3178 3179 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3180 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3181 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3182 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3183 3184 /* Invalid iov_len */ 3185 req.iovcnt = 1; 3186 req.iov[0].iov_len = 0; 3187 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3188 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3189 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3190 3191 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3192 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3193 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3194 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3195 3196 /* acre is false but lbafee is true */ 3197 host_behavior.acre = 0; 3198 host_behavior.lbafee = 1; 3199 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3200 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3201 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3202 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3203 3204 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3205 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3206 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3207 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3208 CU_ASSERT(ctrlr.acre_enabled == false); 3209 CU_ASSERT(ctrlr.lbafee_enabled == true); 3210 3211 /* acre is true but lbafee is false */ 3212 host_behavior.acre = 1; 3213 host_behavior.lbafee = 0; 3214 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3215 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3216 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3217 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3218 3219 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3220 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3221 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3222 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3223 CU_ASSERT(ctrlr.acre_enabled == true); 3224 CU_ASSERT(ctrlr.lbafee_enabled == false); 3225 3226 /* Invalid acre */ 3227 host_behavior.acre = 2; 3228 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3229 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3230 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3231 3232 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3233 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3234 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3235 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3236 3237 /* Invalid lbafee */ 3238 host_behavior.lbafee = 3; 3239 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3240 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3241 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3242 3243 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3244 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3245 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3246 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3247 } 3248 3249 static void 3250 test_nvmf_ctrlr_ns_attachment(void) 3251 { 3252 struct spdk_nvmf_subsystem subsystem = {}; 3253 struct spdk_nvmf_ns ns1 = { 3254 .nsid = 1, 3255 .always_visible = false 3256 }; 3257 struct spdk_nvmf_ns ns3 = { 3258 .nsid = 3, 3259 .always_visible = false 3260 }; 3261 struct spdk_nvmf_ctrlr ctrlrA = { 3262 .subsys = &subsystem 3263 }; 3264 struct spdk_nvmf_ctrlr ctrlrB = { 3265 .subsys = &subsystem 3266 }; 3267 struct spdk_nvmf_host *host; 3268 uint32_t nsid; 3269 3270 subsystem.max_nsid = 3; 3271 subsystem.ns = calloc(subsystem.max_nsid, sizeof(subsystem.ns)); 3272 SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL); 3273 3274 /* nsid = 2 -> unallocated, nsid = 1,3 -> allocated */ 3275 subsystem.ns[0] = &ns1; 3276 subsystem.ns[2] = &ns3; 3277 3278 snprintf(ctrlrA.hostnqn, sizeof(ctrlrA.hostnqn), "nqn.2016-06.io.spdk:host1"); 3279 ctrlrA.visible_ns = spdk_bit_array_create(subsystem.max_nsid); 3280 SPDK_CU_ASSERT_FATAL(ctrlrA.visible_ns != NULL); 3281 snprintf(ctrlrB.hostnqn, sizeof(ctrlrB.hostnqn), "nqn.2016-06.io.spdk:host2"); 3282 ctrlrB.visible_ns = spdk_bit_array_create(subsystem.max_nsid); 3283 SPDK_CU_ASSERT_FATAL(ctrlrB.visible_ns != NULL); 3284 3285 /* Do not auto attach and no cold attach of any ctrlr */ 3286 nsid = 1; 3287 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3288 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3289 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3290 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3291 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3292 nsid = 3; 3293 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3294 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3295 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3296 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3297 3298 /* Cold attach ctrlrA to namespace 1 */ 3299 nsid = 1; 3300 host = calloc(1, sizeof(*host)); 3301 SPDK_CU_ASSERT_FATAL(host != NULL); 3302 snprintf(host->nqn, sizeof(host->nqn), "%s", ctrlrA.hostnqn); 3303 TAILQ_INSERT_HEAD(&ns1.hosts, host, link); 3304 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host); 3305 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3306 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3307 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3308 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3309 nsid = 3; 3310 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3311 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3312 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host); 3313 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3314 3315 /* Detach ctrlrA from namespace 1 */ 3316 nsid = 1; 3317 spdk_bit_array_clear(ctrlrA.visible_ns, nsid - 1); 3318 TAILQ_REMOVE(&ns1.hosts, host, link); 3319 free(host); 3320 3321 /* Auto attach any ctrlr to namespace 2 */ 3322 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3323 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3324 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3325 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3326 nsid = 3; 3327 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3328 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3329 ns1.always_visible = true; 3330 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3331 nsid = 1; 3332 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3333 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3334 nsid = 3; 3335 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3336 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3337 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3338 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3339 nvmf_ctrlr_init_visible_ns(&ctrlrB); 3340 nsid = 1; 3341 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3342 CU_ASSERT(spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3343 nsid = 3; 3344 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3345 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3346 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3347 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3348 3349 free(ctrlrA.visible_ns); 3350 free(ctrlrB.visible_ns); 3351 free(subsystem.ns); 3352 } 3353 3354 static void 3355 test_nvmf_check_qpair_active(void) 3356 { 3357 union nvmf_c2h_msg rsp = {}; 3358 union nvmf_h2c_msg cmd = {}; 3359 struct spdk_nvmf_qpair qpair = { .outstanding = TAILQ_HEAD_INITIALIZER(qpair.outstanding) }; 3360 struct spdk_nvmf_request req = { .qpair = &qpair, .cmd = &cmd, .rsp = &rsp }; 3361 size_t i; 3362 3363 /* qpair is active */ 3364 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3365 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 3366 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3367 3368 /* qpair is connecting - CONNECT is allowed */ 3369 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 3370 cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 3371 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 3372 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3373 3374 /* qpair is connecting - other commands are disallowed */ 3375 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3376 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 3377 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false); 3378 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 3379 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 3380 3381 /* qpair is authenticating - AUTHENTICATION_SEND is allowed */ 3382 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 3383 cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND; 3384 qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING; 3385 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3386 3387 /* qpair is authenticating - AUTHENTICATION_RECV is allowed */ 3388 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 3389 cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV; 3390 qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING; 3391 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3392 3393 /* qpair is authenticating - other commands are disallowed */ 3394 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3395 qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING; 3396 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false); 3397 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_COMMAND_SPECIFIC); 3398 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVMF_FABRIC_SC_AUTH_REQUIRED); 3399 3400 /* qpair is in one of the other states - all commands are disallowed */ 3401 int disallowed_states[] = { 3402 SPDK_NVMF_QPAIR_UNINITIALIZED, 3403 SPDK_NVMF_QPAIR_DEACTIVATING, 3404 SPDK_NVMF_QPAIR_ERROR, 3405 }; 3406 qpair.state_cb = qpair_state_change_done; 3407 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3408 for (i = 0; i < SPDK_COUNTOF(disallowed_states); ++i) { 3409 qpair.state = disallowed_states[i]; 3410 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false); 3411 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 3412 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 3413 } 3414 } 3415 3416 int 3417 main(int argc, char **argv) 3418 { 3419 CU_pSuite suite = NULL; 3420 unsigned int num_failures; 3421 3422 CU_initialize_registry(); 3423 3424 suite = CU_add_suite("nvmf", NULL, NULL); 3425 CU_ADD_TEST(suite, test_get_log_page); 3426 CU_ADD_TEST(suite, test_process_fabrics_cmd); 3427 CU_ADD_TEST(suite, test_connect); 3428 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 3429 CU_ADD_TEST(suite, test_identify_ns); 3430 CU_ADD_TEST(suite, test_identify_ns_iocs_specific); 3431 CU_ADD_TEST(suite, test_reservation_write_exclusive); 3432 CU_ADD_TEST(suite, test_reservation_exclusive_access); 3433 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 3434 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 3435 CU_ADD_TEST(suite, test_reservation_notification_log_page); 3436 CU_ADD_TEST(suite, test_get_dif_ctx); 3437 CU_ADD_TEST(suite, test_set_get_features); 3438 CU_ADD_TEST(suite, test_identify_ctrlr); 3439 CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific); 3440 CU_ADD_TEST(suite, test_custom_admin_cmd); 3441 CU_ADD_TEST(suite, test_fused_compare_and_write); 3442 CU_ADD_TEST(suite, test_multi_async_event_reqs); 3443 CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp); 3444 CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp); 3445 CU_ADD_TEST(suite, test_multi_async_events); 3446 CU_ADD_TEST(suite, test_rae); 3447 CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct); 3448 CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy); 3449 CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start); 3450 CU_ADD_TEST(suite, test_zcopy_read); 3451 CU_ADD_TEST(suite, test_zcopy_write); 3452 CU_ADD_TEST(suite, test_nvmf_property_set); 3453 CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support); 3454 CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support); 3455 CU_ADD_TEST(suite, test_nvmf_ctrlr_ns_attachment); 3456 CU_ADD_TEST(suite, test_nvmf_check_qpair_active); 3457 3458 allocate_threads(1); 3459 set_thread(0); 3460 3461 num_failures = spdk_ut_run_tests(argc, argv, NULL); 3462 CU_cleanup_registry(); 3463 3464 free_threads(); 3465 3466 return num_failures; 3467 } 3468