1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/bdev_zone.h" 8 #include "spdk/nvme_spec.h" 9 #include "spdk/stdinc.h" 10 11 #include "spdk_internal/cunit.h" 12 #include "spdk_internal/mock.h" 13 #include "thread/thread_internal.h" 14 15 #include "common/lib/ut_multithread.c" 16 #include "nvmf/ctrlr.c" 17 18 SPDK_LOG_REGISTER_COMPONENT(nvmf) 19 20 struct spdk_bdev { 21 int ut_mock; 22 uint64_t blockcnt; 23 uint32_t blocklen; 24 bool zoned; 25 uint32_t zone_size; 26 uint32_t max_open_zones; 27 uint32_t max_active_zones; 28 }; 29 30 #define MAX_OPEN_ZONES 12 31 #define MAX_ACTIVE_ZONES 34 32 #define ZONE_SIZE 56 33 34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 36 37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL; 38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *) 39 0x8877665544332211UL; 40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL; 41 42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 43 struct spdk_nvmf_subsystem *, 44 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 45 NULL); 46 47 DEFINE_STUB(spdk_nvmf_poll_group_create, 48 struct spdk_nvmf_poll_group *, 49 (struct spdk_nvmf_tgt *tgt), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 53 const char *, 54 (const struct spdk_nvmf_subsystem *subsystem), 55 subsystem_default_sn); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 58 const char *, 59 (const struct spdk_nvmf_subsystem *subsystem), 60 subsystem_default_mn); 61 62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 63 bool, 64 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 65 true); 66 67 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 68 int, 69 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 70 0); 71 72 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 73 struct spdk_nvmf_ctrlr *, 74 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 75 NULL); 76 77 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 78 bool, 79 (struct spdk_nvmf_ctrlr *ctrlr), 80 false); 81 82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 83 bool, 84 (struct spdk_nvmf_ctrlr *ctrlr), 85 false); 86 87 DEFINE_STUB(nvmf_ctrlr_copy_supported, 88 bool, 89 (struct spdk_nvmf_ctrlr *ctrlr), 90 false); 91 92 DEFINE_STUB_V(nvmf_get_discovery_log_page, 93 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 94 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 95 96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 97 int, 98 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 99 0); 100 101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 102 bool, 103 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 104 true); 105 106 DEFINE_STUB(nvmf_subsystem_find_listener, 107 struct spdk_nvmf_subsystem_listener *, 108 (struct spdk_nvmf_subsystem *subsystem, 109 const struct spdk_nvme_transport_id *trid), 110 (void *)0x1); 111 112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 113 int, 114 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 115 struct spdk_nvmf_request *req), 116 0); 117 118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 119 int, 120 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 121 struct spdk_nvmf_request *req), 122 0); 123 124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 125 int, 126 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 127 struct spdk_nvmf_request *req), 128 0); 129 130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 131 int, 132 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 134 0); 135 136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 137 int, 138 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 139 struct spdk_nvmf_request *req), 140 0); 141 142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 143 int, 144 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 145 struct spdk_nvmf_request *req), 146 0); 147 148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 149 int, 150 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 151 struct spdk_nvmf_request *req), 152 0); 153 154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 155 int, 156 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 157 struct spdk_nvmf_request *req), 158 0); 159 160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 161 int, 162 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 163 struct spdk_nvmf_request *req), 164 0); 165 166 DEFINE_STUB(nvmf_transport_req_complete, 167 int, 168 (struct spdk_nvmf_request *req), 169 0); 170 171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 172 173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 174 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 175 struct spdk_dif_ctx *dif_ctx), 176 true); 177 178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 179 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 180 181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 183 184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 185 struct spdk_nvmf_ctrlr *ctrlr)); 186 187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 188 int, 189 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 190 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 191 0); 192 193 DEFINE_STUB(nvmf_transport_req_free, 194 int, 195 (struct spdk_nvmf_request *req), 196 0); 197 198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 199 int, 200 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 201 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 202 0); 203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 204 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 206 207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev), 208 MAX_ACTIVE_ZONES); 209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES); 210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE); 211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 212 213 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 214 (const struct spdk_nvme_ns_data *nsdata), 0); 215 216 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 217 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n), 218 false); 219 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0); 220 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r), 221 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 222 223 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 224 (const struct spdk_nvmf_subsystem *subsystem), NULL); 225 226 void 227 nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, enum spdk_nvmf_qpair_state state) 228 { 229 qpair->state = state; 230 } 231 232 int 233 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair) 234 { 235 return 0; 236 } 237 238 void 239 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 240 bool dif_insert_or_strip) 241 { 242 uint64_t num_blocks; 243 244 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 245 num_blocks = ns->bdev->blockcnt; 246 nsdata->nsze = num_blocks; 247 nsdata->ncap = num_blocks; 248 nsdata->nuse = num_blocks; 249 nsdata->nlbaf = 0; 250 nsdata->flbas.format = 0; 251 nsdata->flbas.msb_format = 0; 252 nsdata->lbaf[0].lbads = spdk_u32log2(512); 253 } 254 255 struct spdk_nvmf_ns * 256 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 257 { 258 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 259 return subsystem->ns[0]; 260 } 261 262 struct spdk_nvmf_ns * 263 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 264 struct spdk_nvmf_ns *prev_ns) 265 { 266 uint32_t nsid; 267 268 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 269 nsid = prev_ns->nsid; 270 271 if (nsid >= subsystem->max_nsid) { 272 return NULL; 273 } 274 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 275 if (subsystem->ns[nsid - 1]) { 276 return subsystem->ns[nsid - 1]; 277 } 278 } 279 return NULL; 280 } 281 282 bool 283 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 284 { 285 return true; 286 } 287 288 int 289 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 290 struct spdk_bdev_desc *desc, 291 struct spdk_io_channel *ch, 292 struct spdk_nvmf_request *req) 293 { 294 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 295 uint64_t start_lba; 296 uint64_t num_blocks; 297 298 start_lba = from_le64(&req->cmd->nvme_cmd.cdw10); 299 num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1; 300 301 if ((start_lba + num_blocks) > bdev->blockcnt) { 302 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 303 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 304 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 305 } 306 307 if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) { 308 req->zcopy_bdev_io = zcopy_start_bdev_io_write; 309 } else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) { 310 req->zcopy_bdev_io = zcopy_start_bdev_io_read; 311 } else { 312 req->zcopy_bdev_io = zcopy_start_bdev_io_fail; 313 } 314 315 316 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 317 } 318 319 void 320 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 321 { 322 req->zcopy_bdev_io = NULL; 323 spdk_nvmf_request_complete(req); 324 } 325 326 bool 327 nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns) 328 { 329 return ns->ptpl_file != NULL; 330 } 331 332 static void 333 test_get_log_page(void) 334 { 335 struct spdk_nvmf_subsystem subsystem = {}; 336 struct spdk_nvmf_request req = {}; 337 struct spdk_nvmf_qpair qpair = {}; 338 struct spdk_nvmf_ctrlr ctrlr = {}; 339 union nvmf_h2c_msg cmd = {}; 340 union nvmf_c2h_msg rsp = {}; 341 char data[4096]; 342 343 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 344 345 ctrlr.subsys = &subsystem; 346 347 qpair.ctrlr = &ctrlr; 348 349 req.qpair = &qpair; 350 req.cmd = &cmd; 351 req.rsp = &rsp; 352 req.length = sizeof(data); 353 SPDK_IOV_ONE(req.iov, &req.iovcnt, &data, req.length); 354 355 /* Get Log Page - all valid */ 356 memset(&cmd, 0, sizeof(cmd)); 357 memset(&rsp, 0, sizeof(rsp)); 358 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 359 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 360 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 361 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 362 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 363 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 364 365 /* Get Log Page with invalid log ID */ 366 memset(&cmd, 0, sizeof(cmd)); 367 memset(&rsp, 0, sizeof(rsp)); 368 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 369 cmd.nvme_cmd.cdw10 = 0; 370 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 371 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 372 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 373 374 /* Get Log Page with invalid offset (not dword aligned) */ 375 memset(&cmd, 0, sizeof(cmd)); 376 memset(&rsp, 0, sizeof(rsp)); 377 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 378 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 379 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 380 cmd.nvme_cmd.cdw12 = 2; 381 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 382 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 383 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 384 385 /* Get Log Page without data buffer */ 386 memset(&cmd, 0, sizeof(cmd)); 387 memset(&rsp, 0, sizeof(rsp)); 388 req.iovcnt = 0; 389 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 390 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 391 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 392 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 393 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 394 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 395 } 396 397 static void 398 test_process_fabrics_cmd(void) 399 { 400 struct spdk_nvmf_request req = {}; 401 bool ret; 402 struct spdk_nvmf_qpair req_qpair = {}; 403 union nvmf_h2c_msg req_cmd = {}; 404 union nvmf_c2h_msg req_rsp = {}; 405 406 TAILQ_INIT(&req_qpair.outstanding); 407 req_qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 408 req.qpair = &req_qpair; 409 req.cmd = &req_cmd; 410 req.rsp = &req_rsp; 411 req.qpair->ctrlr = NULL; 412 413 /* No ctrlr and invalid command check */ 414 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 415 ret = nvmf_check_qpair_active(&req); 416 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 417 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 418 CU_ASSERT(ret == false); 419 } 420 421 static bool 422 nvme_status_success(const struct spdk_nvme_status *status) 423 { 424 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 425 } 426 427 static void 428 test_connect(void) 429 { 430 struct spdk_nvmf_fabric_connect_data connect_data; 431 struct spdk_nvmf_poll_group group; 432 struct spdk_nvmf_subsystem_poll_group *sgroups; 433 struct spdk_nvmf_transport transport; 434 struct spdk_nvmf_transport_ops tops = {}; 435 struct spdk_nvmf_subsystem subsystem; 436 struct spdk_nvmf_ns *ns_arr[1] = { NULL }; 437 struct spdk_nvmf_request req; 438 struct spdk_nvmf_qpair admin_qpair; 439 struct spdk_nvmf_qpair qpair; 440 struct spdk_nvmf_ctrlr ctrlr; 441 struct spdk_nvmf_tgt tgt; 442 union nvmf_h2c_msg cmd; 443 union nvmf_c2h_msg rsp; 444 const uint8_t hostid[16] = { 445 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 446 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 447 }; 448 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 449 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 450 int rc; 451 452 memset(&group, 0, sizeof(group)); 453 group.thread = spdk_get_thread(); 454 455 memset(&ctrlr, 0, sizeof(ctrlr)); 456 ctrlr.subsys = &subsystem; 457 ctrlr.qpair_mask = spdk_bit_array_create(3); 458 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 459 ctrlr.vcprop.cc.bits.en = 1; 460 ctrlr.vcprop.cc.bits.iosqes = 6; 461 ctrlr.vcprop.cc.bits.iocqes = 4; 462 463 memset(&admin_qpair, 0, sizeof(admin_qpair)); 464 admin_qpair.group = &group; 465 admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 466 467 memset(&tgt, 0, sizeof(tgt)); 468 memset(&transport, 0, sizeof(transport)); 469 transport.ops = &tops; 470 transport.opts.max_aq_depth = 32; 471 transport.opts.max_queue_depth = 64; 472 transport.opts.max_qpairs_per_ctrlr = 3; 473 transport.tgt = &tgt; 474 475 memset(&qpair, 0, sizeof(qpair)); 476 qpair.transport = &transport; 477 qpair.group = &group; 478 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 479 TAILQ_INIT(&qpair.outstanding); 480 481 memset(&connect_data, 0, sizeof(connect_data)); 482 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 483 connect_data.cntlid = 0xFFFF; 484 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 485 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 486 487 memset(&subsystem, 0, sizeof(subsystem)); 488 subsystem.thread = spdk_get_thread(); 489 subsystem.id = 1; 490 TAILQ_INIT(&subsystem.ctrlrs); 491 subsystem.tgt = &tgt; 492 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 493 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 494 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 495 subsystem.ns = ns_arr; 496 subsystem.max_nsid = 1; 497 498 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 499 group.sgroups = sgroups; 500 501 memset(&cmd, 0, sizeof(cmd)); 502 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 503 cmd.connect_cmd.cid = 1; 504 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 505 cmd.connect_cmd.recfmt = 0; 506 cmd.connect_cmd.qid = 0; 507 cmd.connect_cmd.sqsize = 31; 508 cmd.connect_cmd.cattr = 0; 509 cmd.connect_cmd.kato = 120000; 510 511 memset(&req, 0, sizeof(req)); 512 req.qpair = &qpair; 513 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 514 req.length = sizeof(connect_data); 515 SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length); 516 req.cmd = &cmd; 517 req.rsp = &rsp; 518 519 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 520 MOCK_SET(spdk_nvmf_poll_group_create, &group); 521 522 /* Valid admin connect command */ 523 memset(&rsp, 0, sizeof(rsp)); 524 sgroups[subsystem.id].mgmt_io_outstanding++; 525 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 526 rc = nvmf_ctrlr_cmd_connect(&req); 527 poll_threads(); 528 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 529 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 530 CU_ASSERT(qpair.ctrlr != NULL); 531 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 532 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 533 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 534 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 535 free(qpair.ctrlr->visible_ns); 536 free(qpair.ctrlr); 537 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 538 qpair.ctrlr = NULL; 539 540 /* Valid admin connect command with kato = 0 */ 541 cmd.connect_cmd.kato = 0; 542 memset(&rsp, 0, sizeof(rsp)); 543 sgroups[subsystem.id].mgmt_io_outstanding++; 544 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 545 rc = nvmf_ctrlr_cmd_connect(&req); 546 poll_threads(); 547 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 548 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 549 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 550 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 551 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 552 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 553 free(qpair.ctrlr->visible_ns); 554 free(qpair.ctrlr); 555 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 556 qpair.ctrlr = NULL; 557 cmd.connect_cmd.kato = 120000; 558 559 /* Invalid data length */ 560 memset(&rsp, 0, sizeof(rsp)); 561 req.length = sizeof(connect_data) - 1; 562 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 563 rc = nvmf_ctrlr_cmd_connect(&req); 564 poll_threads(); 565 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 566 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 567 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 568 CU_ASSERT(qpair.ctrlr == NULL); 569 req.length = sizeof(connect_data); 570 571 /* Invalid recfmt */ 572 memset(&rsp, 0, sizeof(rsp)); 573 cmd.connect_cmd.recfmt = 1234; 574 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 575 rc = nvmf_ctrlr_cmd_connect(&req); 576 poll_threads(); 577 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 578 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 579 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 580 CU_ASSERT(qpair.ctrlr == NULL); 581 cmd.connect_cmd.recfmt = 0; 582 583 /* Subsystem not found */ 584 memset(&rsp, 0, sizeof(rsp)); 585 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 586 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 587 rc = nvmf_ctrlr_cmd_connect(&req); 588 poll_threads(); 589 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 590 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 591 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 592 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 593 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 594 CU_ASSERT(qpair.ctrlr == NULL); 595 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 596 597 /* Unterminated hostnqn */ 598 memset(&rsp, 0, sizeof(rsp)); 599 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 600 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 601 rc = nvmf_ctrlr_cmd_connect(&req); 602 poll_threads(); 603 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 604 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 605 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 606 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 607 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 608 CU_ASSERT(qpair.ctrlr == NULL); 609 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 610 611 /* Host not allowed */ 612 memset(&rsp, 0, sizeof(rsp)); 613 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 614 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 615 rc = nvmf_ctrlr_cmd_connect(&req); 616 poll_threads(); 617 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 618 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 619 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 620 CU_ASSERT(qpair.ctrlr == NULL); 621 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 622 623 /* Invalid sqsize == 0 */ 624 memset(&rsp, 0, sizeof(rsp)); 625 cmd.connect_cmd.sqsize = 0; 626 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 627 rc = nvmf_ctrlr_cmd_connect(&req); 628 poll_threads(); 629 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 630 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 631 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 632 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 633 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 634 CU_ASSERT(qpair.ctrlr == NULL); 635 cmd.connect_cmd.sqsize = 31; 636 637 /* Invalid admin sqsize > max_aq_depth */ 638 memset(&rsp, 0, sizeof(rsp)); 639 cmd.connect_cmd.sqsize = 32; 640 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 641 rc = nvmf_ctrlr_cmd_connect(&req); 642 poll_threads(); 643 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 644 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 645 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 646 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 647 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 648 CU_ASSERT(qpair.ctrlr == NULL); 649 cmd.connect_cmd.sqsize = 31; 650 651 /* Invalid I/O sqsize > max_queue_depth */ 652 memset(&rsp, 0, sizeof(rsp)); 653 cmd.connect_cmd.qid = 1; 654 cmd.connect_cmd.sqsize = 64; 655 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 656 rc = nvmf_ctrlr_cmd_connect(&req); 657 poll_threads(); 658 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 659 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 660 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 661 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 662 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 663 CU_ASSERT(qpair.ctrlr == NULL); 664 cmd.connect_cmd.qid = 0; 665 cmd.connect_cmd.sqsize = 31; 666 667 /* Invalid cntlid for admin queue */ 668 memset(&rsp, 0, sizeof(rsp)); 669 connect_data.cntlid = 0x1234; 670 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 671 rc = nvmf_ctrlr_cmd_connect(&req); 672 poll_threads(); 673 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 674 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 675 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 676 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 677 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 678 CU_ASSERT(qpair.ctrlr == NULL); 679 connect_data.cntlid = 0xFFFF; 680 681 ctrlr.admin_qpair = &admin_qpair; 682 ctrlr.subsys = &subsystem; 683 684 /* Valid I/O queue connect command */ 685 memset(&rsp, 0, sizeof(rsp)); 686 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 687 cmd.connect_cmd.qid = 1; 688 cmd.connect_cmd.sqsize = 63; 689 sgroups[subsystem.id].mgmt_io_outstanding++; 690 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 691 rc = nvmf_ctrlr_cmd_connect(&req); 692 poll_threads(); 693 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 694 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 695 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 696 CU_ASSERT(qpair.ctrlr == &ctrlr); 697 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 698 qpair.ctrlr = NULL; 699 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 700 cmd.connect_cmd.sqsize = 31; 701 702 /* Non-existent controller */ 703 memset(&rsp, 0, sizeof(rsp)); 704 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 705 sgroups[subsystem.id].mgmt_io_outstanding++; 706 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 707 rc = nvmf_ctrlr_cmd_connect(&req); 708 poll_threads(); 709 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 710 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 711 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 712 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 713 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 714 CU_ASSERT(qpair.ctrlr == NULL); 715 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 716 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 717 718 /* I/O connect to discovery controller */ 719 memset(&rsp, 0, sizeof(rsp)); 720 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 721 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 722 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 723 sgroups[subsystem.id].mgmt_io_outstanding++; 724 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 725 rc = nvmf_ctrlr_cmd_connect(&req); 726 poll_threads(); 727 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 728 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 729 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 730 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 731 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 732 CU_ASSERT(qpair.ctrlr == NULL); 733 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 734 735 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 736 cmd.connect_cmd.qid = 0; 737 cmd.connect_cmd.kato = 120000; 738 memset(&rsp, 0, sizeof(rsp)); 739 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 740 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 741 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 742 sgroups[subsystem.id].mgmt_io_outstanding++; 743 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 744 rc = nvmf_ctrlr_cmd_connect(&req); 745 poll_threads(); 746 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 747 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 748 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 749 CU_ASSERT(qpair.ctrlr != NULL); 750 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 751 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 752 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 753 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 754 free(qpair.ctrlr->visible_ns); 755 free(qpair.ctrlr); 756 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 757 qpair.ctrlr = NULL; 758 759 /* I/O connect to discovery controller with keep-alive-timeout == 0. 760 * Then, a fixed timeout value is set to keep-alive-timeout. 761 */ 762 cmd.connect_cmd.kato = 0; 763 memset(&rsp, 0, sizeof(rsp)); 764 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT; 765 MOCK_SET(spdk_nvmf_subsystem_is_discovery, true); 766 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 767 sgroups[subsystem.id].mgmt_io_outstanding++; 768 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 769 rc = nvmf_ctrlr_cmd_connect(&req); 770 poll_threads(); 771 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 772 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 773 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 774 CU_ASSERT(qpair.ctrlr != NULL); 775 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 776 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 777 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 778 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 779 free(qpair.ctrlr->visible_ns); 780 free(qpair.ctrlr); 781 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 782 qpair.ctrlr = NULL; 783 cmd.connect_cmd.qid = 1; 784 cmd.connect_cmd.kato = 120000; 785 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 786 MOCK_SET(spdk_nvmf_subsystem_is_discovery, false); 787 788 /* I/O connect to disabled controller */ 789 memset(&rsp, 0, sizeof(rsp)); 790 ctrlr.vcprop.cc.bits.en = 0; 791 sgroups[subsystem.id].mgmt_io_outstanding++; 792 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 793 rc = nvmf_ctrlr_cmd_connect(&req); 794 poll_threads(); 795 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 796 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 797 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 798 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 799 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 800 CU_ASSERT(qpair.ctrlr == NULL); 801 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 802 ctrlr.vcprop.cc.bits.en = 1; 803 804 /* I/O connect with invalid IOSQES */ 805 memset(&rsp, 0, sizeof(rsp)); 806 ctrlr.vcprop.cc.bits.iosqes = 3; 807 sgroups[subsystem.id].mgmt_io_outstanding++; 808 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 809 rc = nvmf_ctrlr_cmd_connect(&req); 810 poll_threads(); 811 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 812 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 813 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 814 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 815 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 816 CU_ASSERT(qpair.ctrlr == NULL); 817 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 818 ctrlr.vcprop.cc.bits.iosqes = 6; 819 820 /* I/O connect with invalid IOCQES */ 821 memset(&rsp, 0, sizeof(rsp)); 822 ctrlr.vcprop.cc.bits.iocqes = 3; 823 sgroups[subsystem.id].mgmt_io_outstanding++; 824 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 825 rc = nvmf_ctrlr_cmd_connect(&req); 826 poll_threads(); 827 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 828 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 829 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 830 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 831 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 832 CU_ASSERT(qpair.ctrlr == NULL); 833 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 834 ctrlr.vcprop.cc.bits.iocqes = 4; 835 836 /* I/O connect with qid that is too large */ 837 memset(&rsp, 0, sizeof(rsp)); 838 cmd.connect_cmd.qid = 3; 839 sgroups[subsystem.id].mgmt_io_outstanding++; 840 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 841 rc = nvmf_ctrlr_cmd_connect(&req); 842 poll_threads(); 843 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 844 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 845 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 846 CU_ASSERT(qpair.ctrlr == NULL); 847 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 848 849 /* I/O connect with duplicate queue ID */ 850 memset(&rsp, 0, sizeof(rsp)); 851 spdk_bit_array_set(ctrlr.qpair_mask, 1); 852 cmd.connect_cmd.qid = 1; 853 sgroups[subsystem.id].mgmt_io_outstanding++; 854 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 855 rc = nvmf_ctrlr_cmd_connect(&req); 856 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 857 poll_threads(); 858 /* First time, it will detect duplicate QID and schedule a retry. So for 859 * now we should expect the response to still be all zeroes. 860 */ 861 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 862 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 863 864 /* Now advance the clock, so that the retry poller executes. */ 865 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 866 poll_threads(); 867 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 868 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 869 CU_ASSERT(qpair.ctrlr == NULL); 870 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 871 872 /* I/O connect with temporarily duplicate queue ID. This covers race 873 * where qpair_mask bit may not yet be cleared, even though initiator 874 * has closed the connection. See issue #2955. */ 875 memset(&rsp, 0, sizeof(rsp)); 876 sgroups[subsystem.id].mgmt_io_outstanding++; 877 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 878 rc = nvmf_ctrlr_cmd_connect(&req); 879 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 880 poll_threads(); 881 /* First time, it will detect duplicate QID and schedule a retry. So for 882 * now we should expect the response to still be all zeroes. 883 */ 884 CU_ASSERT(spdk_mem_all_zero(&rsp, sizeof(rsp))); 885 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 1); 886 887 /* Now advance the clock, so that the retry poller executes. */ 888 spdk_bit_array_clear(ctrlr.qpair_mask, 1); 889 spdk_delay_us(DUPLICATE_QID_RETRY_US * 2); 890 poll_threads(); 891 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 892 CU_ASSERT(qpair.state == SPDK_NVMF_QPAIR_ENABLED); 893 CU_ASSERT(qpair.ctrlr == &ctrlr); 894 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 895 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 896 qpair.ctrlr = NULL; 897 898 /* I/O connect when admin qpair is being destroyed */ 899 admin_qpair.group = NULL; 900 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 901 memset(&rsp, 0, sizeof(rsp)); 902 sgroups[subsystem.id].mgmt_io_outstanding++; 903 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 904 rc = nvmf_ctrlr_cmd_connect(&req); 905 poll_threads(); 906 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 907 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 908 CU_ASSERT(qpair.ctrlr == NULL); 909 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 910 admin_qpair.group = &group; 911 admin_qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 912 913 /* I/O connect when admin qpair was destroyed */ 914 ctrlr.admin_qpair = NULL; 915 memset(&rsp, 0, sizeof(rsp)); 916 sgroups[subsystem.id].mgmt_io_outstanding++; 917 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 918 rc = nvmf_ctrlr_cmd_connect(&req); 919 poll_threads(); 920 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 921 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 922 CU_ASSERT(qpair.ctrlr == NULL); 923 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 924 ctrlr.admin_qpair = &admin_qpair; 925 926 /* Clean up globals */ 927 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 928 MOCK_CLEAR(spdk_nvmf_poll_group_create); 929 930 spdk_bit_array_free(&ctrlr.qpair_mask); 931 free(sgroups); 932 } 933 934 static void 935 test_get_ns_id_desc_list(void) 936 { 937 struct spdk_nvmf_subsystem subsystem; 938 struct spdk_nvmf_qpair qpair; 939 struct spdk_nvmf_ctrlr ctrlr; 940 struct spdk_nvmf_request req; 941 struct spdk_nvmf_ns *ns_ptrs[1]; 942 struct spdk_nvmf_ns ns; 943 union nvmf_h2c_msg cmd; 944 union nvmf_c2h_msg rsp; 945 struct spdk_bdev bdev; 946 uint8_t buf[4096]; 947 948 memset(&subsystem, 0, sizeof(subsystem)); 949 ns_ptrs[0] = &ns; 950 subsystem.ns = ns_ptrs; 951 subsystem.max_nsid = 1; 952 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 953 954 memset(&ns, 0, sizeof(ns)); 955 ns.opts.nsid = 1; 956 ns.bdev = &bdev; 957 958 memset(&qpair, 0, sizeof(qpair)); 959 qpair.ctrlr = &ctrlr; 960 961 memset(&ctrlr, 0, sizeof(ctrlr)); 962 ctrlr.subsys = &subsystem; 963 ctrlr.vcprop.cc.bits.en = 1; 964 ctrlr.thread = spdk_get_thread(); 965 ctrlr.visible_ns = spdk_bit_array_create(1); 966 967 memset(&req, 0, sizeof(req)); 968 req.qpair = &qpair; 969 req.cmd = &cmd; 970 req.rsp = &rsp; 971 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 972 req.length = sizeof(buf); 973 SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length); 974 975 memset(&cmd, 0, sizeof(cmd)); 976 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 977 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 978 979 /* Invalid NSID */ 980 cmd.nvme_cmd.nsid = 0; 981 memset(&rsp, 0, sizeof(rsp)); 982 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 983 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 984 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 985 986 /* Valid NSID, but ns is inactive */ 987 spdk_bit_array_clear(ctrlr.visible_ns, 0); 988 cmd.nvme_cmd.nsid = 1; 989 memset(&rsp, 0, sizeof(rsp)); 990 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 991 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 992 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 993 994 /* Valid NSID, but ns has no IDs defined */ 995 spdk_bit_array_set(ctrlr.visible_ns, 0); 996 cmd.nvme_cmd.nsid = 1; 997 memset(&rsp, 0, sizeof(rsp)); 998 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 999 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1000 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1001 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 1002 1003 /* Valid NSID, only EUI64 defined */ 1004 ns.opts.eui64[0] = 0x11; 1005 ns.opts.eui64[7] = 0xFF; 1006 memset(&rsp, 0, sizeof(rsp)); 1007 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1008 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1009 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1010 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1011 CU_ASSERT(buf[1] == 8); 1012 CU_ASSERT(buf[4] == 0x11); 1013 CU_ASSERT(buf[11] == 0xFF); 1014 CU_ASSERT(buf[13] == 0); 1015 1016 /* Valid NSID, only NGUID defined */ 1017 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 1018 ns.opts.nguid[0] = 0x22; 1019 ns.opts.nguid[15] = 0xEE; 1020 memset(&rsp, 0, sizeof(rsp)); 1021 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1022 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1023 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1024 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 1025 CU_ASSERT(buf[1] == 16); 1026 CU_ASSERT(buf[4] == 0x22); 1027 CU_ASSERT(buf[19] == 0xEE); 1028 CU_ASSERT(buf[21] == 0); 1029 1030 /* Valid NSID, both EUI64 and NGUID defined */ 1031 ns.opts.eui64[0] = 0x11; 1032 ns.opts.eui64[7] = 0xFF; 1033 ns.opts.nguid[0] = 0x22; 1034 ns.opts.nguid[15] = 0xEE; 1035 memset(&rsp, 0, sizeof(rsp)); 1036 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1037 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1038 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1039 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1040 CU_ASSERT(buf[1] == 8); 1041 CU_ASSERT(buf[4] == 0x11); 1042 CU_ASSERT(buf[11] == 0xFF); 1043 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 1044 CU_ASSERT(buf[13] == 16); 1045 CU_ASSERT(buf[16] == 0x22); 1046 CU_ASSERT(buf[31] == 0xEE); 1047 CU_ASSERT(buf[33] == 0); 1048 1049 /* Valid NSID, EUI64, NGUID, and UUID defined */ 1050 ns.opts.eui64[0] = 0x11; 1051 ns.opts.eui64[7] = 0xFF; 1052 ns.opts.nguid[0] = 0x22; 1053 ns.opts.nguid[15] = 0xEE; 1054 ns.opts.uuid.u.raw[0] = 0x33; 1055 ns.opts.uuid.u.raw[15] = 0xDD; 1056 memset(&rsp, 0, sizeof(rsp)); 1057 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1058 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1059 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1060 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 1061 CU_ASSERT(buf[1] == 8); 1062 CU_ASSERT(buf[4] == 0x11); 1063 CU_ASSERT(buf[11] == 0xFF); 1064 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 1065 CU_ASSERT(buf[13] == 16); 1066 CU_ASSERT(buf[16] == 0x22); 1067 CU_ASSERT(buf[31] == 0xEE); 1068 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 1069 CU_ASSERT(buf[33] == 16); 1070 CU_ASSERT(buf[36] == 0x33); 1071 CU_ASSERT(buf[51] == 0xDD); 1072 CU_ASSERT(buf[53] == 0); 1073 1074 spdk_bit_array_free(&ctrlr.visible_ns); 1075 } 1076 1077 static void 1078 test_identify_ns(void) 1079 { 1080 struct spdk_nvmf_subsystem subsystem = {}; 1081 struct spdk_nvmf_transport transport = {}; 1082 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1083 struct spdk_nvmf_ctrlr ctrlr = { 1084 .subsys = &subsystem, 1085 .admin_qpair = &admin_qpair, 1086 }; 1087 struct spdk_nvme_cmd cmd = {}; 1088 struct spdk_nvme_cpl rsp = {}; 1089 struct spdk_nvme_ns_data nsdata = {}; 1090 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 1091 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 1092 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1093 1094 ctrlr.visible_ns = spdk_bit_array_create(3); 1095 spdk_bit_array_set(ctrlr.visible_ns, 0); 1096 spdk_bit_array_set(ctrlr.visible_ns, 2); 1097 1098 subsystem.ns = ns_arr; 1099 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1100 1101 /* Invalid NSID 0 */ 1102 cmd.nsid = 0; 1103 memset(&nsdata, 0, sizeof(nsdata)); 1104 memset(&rsp, 0, sizeof(rsp)); 1105 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1106 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1107 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1108 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1109 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1110 1111 /* Valid NSID 1 */ 1112 cmd.nsid = 1; 1113 memset(&nsdata, 0, sizeof(nsdata)); 1114 memset(&rsp, 0, sizeof(rsp)); 1115 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1116 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1117 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1118 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1119 CU_ASSERT(nsdata.nsze == 1234); 1120 1121 /* Valid but inactive NSID 1 */ 1122 spdk_bit_array_clear(ctrlr.visible_ns, 0); 1123 cmd.nsid = 1; 1124 memset(&nsdata, 0, sizeof(nsdata)); 1125 memset(&rsp, 0, sizeof(rsp)); 1126 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1127 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1128 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1129 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1130 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1131 1132 /* Valid but unallocated NSID 2 */ 1133 cmd.nsid = 2; 1134 memset(&nsdata, 0, sizeof(nsdata)); 1135 memset(&rsp, 0, sizeof(rsp)); 1136 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1137 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1138 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1139 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1140 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1141 1142 /* Valid NSID 3 */ 1143 cmd.nsid = 3; 1144 memset(&nsdata, 0, sizeof(nsdata)); 1145 memset(&rsp, 0, sizeof(rsp)); 1146 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1147 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1148 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1149 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1150 CU_ASSERT(nsdata.nsze == 5678); 1151 1152 /* Invalid NSID 4 */ 1153 cmd.nsid = 4; 1154 memset(&nsdata, 0, sizeof(nsdata)); 1155 memset(&rsp, 0, sizeof(rsp)); 1156 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1157 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1158 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1159 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1160 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1161 1162 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 1163 cmd.nsid = 0xFFFFFFFF; 1164 memset(&nsdata, 0, sizeof(nsdata)); 1165 memset(&rsp, 0, sizeof(rsp)); 1166 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1167 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1168 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1169 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1170 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1171 1172 spdk_bit_array_free(&ctrlr.visible_ns); 1173 } 1174 1175 static void 1176 test_identify_ns_iocs_specific(void) 1177 { 1178 struct spdk_nvmf_subsystem subsystem = {}; 1179 struct spdk_nvmf_transport transport = {}; 1180 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport }; 1181 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1182 struct spdk_nvme_cmd cmd = {}; 1183 struct spdk_nvme_cpl rsp = {}; 1184 struct spdk_nvme_zns_ns_data nsdata = {}; 1185 struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}}; 1186 struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}}; 1187 struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]}; 1188 1189 ctrlr.visible_ns = spdk_bit_array_create(3); 1190 spdk_bit_array_set(ctrlr.visible_ns, 0); 1191 spdk_bit_array_set(ctrlr.visible_ns, 1); 1192 spdk_bit_array_set(ctrlr.visible_ns, 2); 1193 subsystem.ns = ns_arr; 1194 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1195 1196 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1197 1198 /* Invalid ZNS NSID 0 */ 1199 cmd.nsid = 0; 1200 memset(&nsdata, 0xFF, sizeof(nsdata)); 1201 memset(&rsp, 0, sizeof(rsp)); 1202 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1203 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1204 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1205 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1206 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1207 1208 /* Valid ZNS NSID 1 */ 1209 cmd.nsid = 1; 1210 memset(&nsdata, 0xFF, sizeof(nsdata)); 1211 memset(&rsp, 0, sizeof(rsp)); 1212 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1213 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1214 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1215 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1216 CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1); 1217 CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1); 1218 CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1); 1219 CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE); 1220 nsdata.ozcs.read_across_zone_boundaries = 0; 1221 nsdata.mar = 0; 1222 nsdata.mor = 0; 1223 nsdata.lbafe[0].zsze = 0; 1224 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1225 1226 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1227 1228 /* Valid NVM NSID 2 */ 1229 cmd.nsid = 2; 1230 memset(&nsdata, 0xFF, sizeof(nsdata)); 1231 memset(&rsp, 0, sizeof(rsp)); 1232 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1233 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1234 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1235 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1236 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1237 1238 /* Invalid NVM NSID 3 */ 1239 cmd.nsid = 0; 1240 memset(&nsdata, 0xFF, sizeof(nsdata)); 1241 memset(&rsp, 0, sizeof(rsp)); 1242 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1243 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1244 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1245 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1246 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1247 1248 spdk_bit_array_free(&ctrlr.visible_ns); 1249 } 1250 1251 static void 1252 test_set_get_features(void) 1253 { 1254 struct spdk_nvmf_subsystem subsystem = {}; 1255 struct spdk_nvmf_qpair admin_qpair = {}; 1256 enum spdk_nvme_ana_state ana_state[3]; 1257 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1258 struct spdk_nvmf_ctrlr ctrlr = { 1259 .subsys = &subsystem, 1260 .admin_qpair = &admin_qpair, 1261 .listener = &listener 1262 }; 1263 union nvmf_h2c_msg cmd = {}; 1264 union nvmf_c2h_msg rsp = {}; 1265 struct spdk_nvmf_ns ns[3]; 1266 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1267 struct spdk_nvmf_request req; 1268 int rc; 1269 1270 ctrlr.visible_ns = spdk_bit_array_create(3); 1271 spdk_bit_array_set(ctrlr.visible_ns, 0); 1272 spdk_bit_array_set(ctrlr.visible_ns, 2); 1273 ns[0].anagrpid = 1; 1274 ns[2].anagrpid = 3; 1275 subsystem.ns = ns_arr; 1276 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1277 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1278 listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1279 admin_qpair.ctrlr = &ctrlr; 1280 req.qpair = &admin_qpair; 1281 cmd.nvme_cmd.nsid = 1; 1282 req.cmd = &cmd; 1283 req.rsp = &rsp; 1284 1285 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1286 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1287 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1288 ns[0].ptpl_file = "testcfg"; 1289 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1290 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1291 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1292 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1293 CU_ASSERT(ns[0].ptpl_activated == true); 1294 1295 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1296 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1297 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1298 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1299 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1300 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1301 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1302 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1303 1304 1305 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1306 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1307 cmd.nvme_cmd.cdw11 = 0x42; 1308 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1309 1310 rc = nvmf_ctrlr_get_features(&req); 1311 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1312 1313 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1314 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1315 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1316 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1317 1318 rc = nvmf_ctrlr_get_features(&req); 1319 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1320 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1321 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1322 1323 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1324 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1325 cmd.nvme_cmd.cdw11 = 0x42; 1326 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1327 1328 rc = nvmf_ctrlr_set_features(&req); 1329 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1330 1331 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1332 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1333 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1334 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1335 1336 rc = nvmf_ctrlr_set_features(&req); 1337 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1338 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1339 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1340 1341 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1342 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1343 cmd.nvme_cmd.cdw11 = 0x42; 1344 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1345 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1346 1347 rc = nvmf_ctrlr_set_features(&req); 1348 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1349 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1350 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1351 1352 1353 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1354 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1355 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1356 1357 rc = nvmf_ctrlr_get_features(&req); 1358 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1359 1360 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1361 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1362 cmd.nvme_cmd.cdw11 = 0x42; 1363 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1364 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1365 1366 rc = nvmf_ctrlr_set_features(&req); 1367 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1368 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1369 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1370 1371 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1372 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1373 cmd.nvme_cmd.cdw11 = 0x42; 1374 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1375 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1376 1377 rc = nvmf_ctrlr_set_features(&req); 1378 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1379 1380 spdk_bit_array_free(&ctrlr.visible_ns); 1381 } 1382 1383 /* 1384 * Reservation Unit Test Configuration 1385 * -------- -------- -------- 1386 * | Host A | | Host B | | Host C | 1387 * -------- -------- -------- 1388 * / \ | | 1389 * -------- -------- ------- ------- 1390 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1391 * -------- -------- ------- ------- 1392 * \ \ / / 1393 * \ \ / / 1394 * \ \ / / 1395 * -------------------------------------- 1396 * | NAMESPACE 1 | 1397 * -------------------------------------- 1398 */ 1399 1400 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1401 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1402 1403 static void 1404 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1405 { 1406 /* Host A has two controllers */ 1407 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1408 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1409 1410 /* Host B has 1 controller */ 1411 spdk_uuid_generate(&g_ctrlr_B.hostid); 1412 1413 /* Host C has 1 controller */ 1414 spdk_uuid_generate(&g_ctrlr_C.hostid); 1415 1416 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1417 g_ns_info.rtype = rtype; 1418 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1419 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1420 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1421 } 1422 1423 static void 1424 test_reservation_write_exclusive(void) 1425 { 1426 struct spdk_nvmf_request req = {}; 1427 union nvmf_h2c_msg cmd = {}; 1428 union nvmf_c2h_msg rsp = {}; 1429 int rc; 1430 1431 req.cmd = &cmd; 1432 req.rsp = &rsp; 1433 1434 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1435 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1436 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1437 1438 /* Test Case: Issue a Read command from Host A and Host B */ 1439 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1440 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1441 SPDK_CU_ASSERT_FATAL(rc == 0); 1442 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1443 SPDK_CU_ASSERT_FATAL(rc == 0); 1444 1445 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1446 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1447 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1448 SPDK_CU_ASSERT_FATAL(rc == 0); 1449 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1450 SPDK_CU_ASSERT_FATAL(rc < 0); 1451 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1452 1453 /* Test Case: Issue a Write command from Host C */ 1454 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1455 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1456 SPDK_CU_ASSERT_FATAL(rc < 0); 1457 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1458 1459 /* Test Case: Issue a Read command from Host B */ 1460 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1461 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1462 SPDK_CU_ASSERT_FATAL(rc == 0); 1463 1464 /* Unregister Host C */ 1465 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1466 1467 /* Test Case: Read and Write commands from non-registrant Host C */ 1468 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1469 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1470 SPDK_CU_ASSERT_FATAL(rc < 0); 1471 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1472 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1473 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1474 SPDK_CU_ASSERT_FATAL(rc == 0); 1475 } 1476 1477 static void 1478 test_reservation_exclusive_access(void) 1479 { 1480 struct spdk_nvmf_request req = {}; 1481 union nvmf_h2c_msg cmd = {}; 1482 union nvmf_c2h_msg rsp = {}; 1483 int rc; 1484 1485 req.cmd = &cmd; 1486 req.rsp = &rsp; 1487 1488 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1489 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1490 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1491 1492 /* Test Case: Issue a Read command from Host B */ 1493 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1494 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1495 SPDK_CU_ASSERT_FATAL(rc < 0); 1496 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1497 1498 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1499 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1500 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1501 SPDK_CU_ASSERT_FATAL(rc == 0); 1502 } 1503 1504 static void 1505 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1506 { 1507 struct spdk_nvmf_request req = {}; 1508 union nvmf_h2c_msg cmd = {}; 1509 union nvmf_c2h_msg rsp = {}; 1510 int rc; 1511 1512 req.cmd = &cmd; 1513 req.rsp = &rsp; 1514 1515 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1516 ut_reservation_init(rtype); 1517 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1518 1519 /* Test Case: Issue a Read command from Host A and Host C */ 1520 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1521 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1522 SPDK_CU_ASSERT_FATAL(rc == 0); 1523 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1524 SPDK_CU_ASSERT_FATAL(rc == 0); 1525 1526 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1527 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1528 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1529 SPDK_CU_ASSERT_FATAL(rc == 0); 1530 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1531 SPDK_CU_ASSERT_FATAL(rc == 0); 1532 1533 /* Unregister Host C */ 1534 spdk_uuid_set_null(&g_ns_info.reg_hostid[2]); 1535 1536 /* Test Case: Read and Write commands from non-registrant Host C */ 1537 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1538 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1539 SPDK_CU_ASSERT_FATAL(rc == 0); 1540 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1541 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1542 SPDK_CU_ASSERT_FATAL(rc < 0); 1543 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1544 } 1545 1546 static void 1547 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1548 { 1549 _test_reservation_write_exclusive_regs_only_and_all_regs( 1550 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1551 _test_reservation_write_exclusive_regs_only_and_all_regs( 1552 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1553 } 1554 1555 static void 1556 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1557 { 1558 struct spdk_nvmf_request req = {}; 1559 union nvmf_h2c_msg cmd = {}; 1560 union nvmf_c2h_msg rsp = {}; 1561 int rc; 1562 1563 req.cmd = &cmd; 1564 req.rsp = &rsp; 1565 1566 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1567 ut_reservation_init(rtype); 1568 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1569 1570 /* Test Case: Issue a Write command from Host B */ 1571 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1572 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1573 SPDK_CU_ASSERT_FATAL(rc == 0); 1574 1575 /* Unregister Host B */ 1576 spdk_uuid_set_null(&g_ns_info.reg_hostid[1]); 1577 1578 /* Test Case: Issue a Read command from Host B */ 1579 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1580 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1581 SPDK_CU_ASSERT_FATAL(rc < 0); 1582 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1583 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1584 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1585 SPDK_CU_ASSERT_FATAL(rc < 0); 1586 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1587 } 1588 1589 static void 1590 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1591 { 1592 _test_reservation_exclusive_access_regs_only_and_all_regs( 1593 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1594 _test_reservation_exclusive_access_regs_only_and_all_regs( 1595 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1596 } 1597 1598 static void 1599 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1600 { 1601 STAILQ_INIT(&ctrlr->async_events); 1602 } 1603 1604 static void 1605 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1606 { 1607 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1608 1609 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1610 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1611 free(event); 1612 } 1613 } 1614 1615 static int 1616 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1617 { 1618 int num = 0; 1619 struct spdk_nvmf_async_event_completion *event; 1620 1621 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1622 num++; 1623 } 1624 return num; 1625 } 1626 1627 static void 1628 test_reservation_notification_log_page(void) 1629 { 1630 struct spdk_nvmf_ctrlr ctrlr; 1631 struct spdk_nvmf_qpair qpair; 1632 struct spdk_nvmf_ns ns; 1633 struct spdk_nvmf_request req = {}; 1634 union nvmf_h2c_msg cmd = {}; 1635 union nvmf_c2h_msg rsp = {}; 1636 union spdk_nvme_async_event_completion event = {}; 1637 struct spdk_nvme_reservation_notification_log logs[3]; 1638 struct iovec iov; 1639 1640 memset(&ctrlr, 0, sizeof(ctrlr)); 1641 ctrlr.thread = spdk_get_thread(); 1642 TAILQ_INIT(&ctrlr.log_head); 1643 init_pending_async_events(&ctrlr); 1644 ns.nsid = 1; 1645 1646 /* Test Case: Mask all the reservation notifications */ 1647 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1648 SPDK_NVME_RESERVATION_RELEASED_MASK | 1649 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1650 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1651 SPDK_NVME_REGISTRATION_PREEMPTED); 1652 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1653 SPDK_NVME_RESERVATION_RELEASED); 1654 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1655 SPDK_NVME_RESERVATION_PREEMPTED); 1656 poll_threads(); 1657 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1658 1659 /* Test Case: Unmask all the reservation notifications, 1660 * 3 log pages are generated, and AER was triggered. 1661 */ 1662 ns.mask = 0; 1663 ctrlr.num_avail_log_pages = 0; 1664 req.cmd = &cmd; 1665 req.rsp = &rsp; 1666 ctrlr.aer_req[0] = &req; 1667 ctrlr.nr_aer_reqs = 1; 1668 req.qpair = &qpair; 1669 TAILQ_INIT(&qpair.outstanding); 1670 qpair.ctrlr = NULL; 1671 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1672 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1673 1674 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1675 SPDK_NVME_REGISTRATION_PREEMPTED); 1676 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1677 SPDK_NVME_RESERVATION_RELEASED); 1678 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1679 SPDK_NVME_RESERVATION_PREEMPTED); 1680 poll_threads(); 1681 event.raw = rsp.nvme_cpl.cdw0; 1682 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1683 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1684 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1685 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1686 1687 /* Test Case: Get Log Page to clear the log pages */ 1688 iov.iov_base = &logs[0]; 1689 iov.iov_len = sizeof(logs); 1690 nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0); 1691 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1692 1693 cleanup_pending_async_events(&ctrlr); 1694 } 1695 1696 static void 1697 test_get_dif_ctx(void) 1698 { 1699 struct spdk_nvmf_subsystem subsystem = {}; 1700 struct spdk_nvmf_request req = {}; 1701 struct spdk_nvmf_qpair qpair = {}; 1702 struct spdk_nvmf_ctrlr ctrlr = {}; 1703 struct spdk_nvmf_ns ns = {}; 1704 struct spdk_nvmf_ns *_ns = NULL; 1705 struct spdk_bdev bdev = {}; 1706 union nvmf_h2c_msg cmd = {}; 1707 struct spdk_dif_ctx dif_ctx = {}; 1708 bool ret; 1709 1710 ctrlr.subsys = &subsystem; 1711 ctrlr.visible_ns = spdk_bit_array_create(1); 1712 spdk_bit_array_set(ctrlr.visible_ns, 0); 1713 1714 qpair.ctrlr = &ctrlr; 1715 1716 req.qpair = &qpair; 1717 req.cmd = &cmd; 1718 1719 ns.bdev = &bdev; 1720 1721 ctrlr.dif_insert_or_strip = false; 1722 1723 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1724 CU_ASSERT(ret == false); 1725 1726 ctrlr.dif_insert_or_strip = true; 1727 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1728 1729 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1730 CU_ASSERT(ret == false); 1731 1732 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1733 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1734 1735 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1736 CU_ASSERT(ret == false); 1737 1738 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1739 1740 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1741 CU_ASSERT(ret == false); 1742 1743 qpair.qid = 1; 1744 1745 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1746 CU_ASSERT(ret == false); 1747 1748 cmd.nvme_cmd.nsid = 1; 1749 1750 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1751 CU_ASSERT(ret == false); 1752 1753 subsystem.max_nsid = 1; 1754 subsystem.ns = &_ns; 1755 subsystem.ns[0] = &ns; 1756 1757 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1758 CU_ASSERT(ret == false); 1759 1760 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1761 1762 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1763 CU_ASSERT(ret == true); 1764 1765 spdk_bit_array_free(&ctrlr.visible_ns); 1766 } 1767 1768 static void 1769 test_identify_ctrlr(void) 1770 { 1771 struct spdk_nvmf_tgt tgt = {}; 1772 struct spdk_nvmf_subsystem subsystem = { 1773 .subtype = SPDK_NVMF_SUBTYPE_NVME, 1774 .tgt = &tgt, 1775 }; 1776 struct spdk_nvmf_transport_ops tops = {}; 1777 struct spdk_nvmf_transport transport = { 1778 .ops = &tops, 1779 .opts = { 1780 .in_capsule_data_size = 4096, 1781 }, 1782 }; 1783 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1784 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1785 struct spdk_nvme_ctrlr_data cdata = {}; 1786 uint32_t expected_ioccsz; 1787 1788 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1789 1790 /* Check ioccsz, TCP transport */ 1791 tops.type = SPDK_NVME_TRANSPORT_TCP; 1792 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1793 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1794 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1795 1796 /* Check ioccsz, RDMA transport */ 1797 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1798 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1799 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1800 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1801 1802 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1803 tops.type = SPDK_NVME_TRANSPORT_TCP; 1804 ctrlr.dif_insert_or_strip = true; 1805 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1806 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1807 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1808 } 1809 1810 static void 1811 test_identify_ctrlr_iocs_specific(void) 1812 { 1813 struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 }; 1814 struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 }; 1815 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop }; 1816 struct spdk_nvme_cmd cmd = {}; 1817 struct spdk_nvme_cpl rsp = {}; 1818 struct spdk_nvme_zns_ctrlr_data ctrlr_data = {}; 1819 struct spdk_nvme_nvm_ctrlr_data cdata_nvm = {}; 1820 1821 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1822 1823 /* ZNS max_zone_append_size_kib no limit */ 1824 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1825 memset(&rsp, 0, sizeof(rsp)); 1826 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1827 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1828 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1829 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1830 CU_ASSERT(ctrlr_data.zasl == 0); 1831 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1832 1833 /* ZNS max_zone_append_size_kib = 4096 */ 1834 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1835 memset(&rsp, 0, sizeof(rsp)); 1836 subsystem.max_zone_append_size_kib = 4096; 1837 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1838 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1839 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1840 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1841 CU_ASSERT(ctrlr_data.zasl == 0); 1842 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1843 1844 /* ZNS max_zone_append_size_kib = 60000 */ 1845 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1846 memset(&rsp, 0, sizeof(rsp)); 1847 subsystem.max_zone_append_size_kib = 60000; 1848 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1849 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1850 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1851 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1852 CU_ASSERT(ctrlr_data.zasl == 3); 1853 ctrlr_data.zasl = 0; 1854 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1855 1856 /* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */ 1857 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1858 memset(&rsp, 0, sizeof(rsp)); 1859 ctrlr.vcprop.cap.bits.mpsmin = 2; 1860 subsystem.max_zone_append_size_kib = 60000; 1861 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1862 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1863 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1864 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1865 CU_ASSERT(ctrlr_data.zasl == 1); 1866 ctrlr_data.zasl = 0; 1867 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1868 ctrlr.vcprop.cap.bits.mpsmin = 0; 1869 1870 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1871 1872 /* NVM max_discard_size_kib = 1024; 1873 * max_write_zeroes_size_kib = 1024; 1874 * mpsmin = 0; 1875 */ 1876 memset(&cdata_nvm, 0xFF, sizeof(cdata_nvm)); 1877 memset(&rsp, 0, sizeof(rsp)); 1878 subsystem.max_discard_size_kib = (uint64_t)1024; 1879 subsystem.max_write_zeroes_size_kib = (uint64_t)1024; 1880 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1881 &cdata_nvm, sizeof(cdata_nvm)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1882 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1883 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1884 CU_ASSERT(cdata_nvm.wzsl == 8); 1885 CU_ASSERT(cdata_nvm.dmrsl == 2048); 1886 CU_ASSERT(cdata_nvm.dmrl == 1); 1887 } 1888 1889 static int 1890 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1891 { 1892 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1893 1894 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1895 }; 1896 1897 static void 1898 test_custom_admin_cmd(void) 1899 { 1900 struct spdk_nvmf_subsystem subsystem; 1901 struct spdk_nvmf_qpair qpair; 1902 struct spdk_nvmf_ctrlr ctrlr; 1903 struct spdk_nvmf_request req; 1904 struct spdk_nvmf_ns *ns_ptrs[1]; 1905 struct spdk_nvmf_ns ns; 1906 union nvmf_h2c_msg cmd; 1907 union nvmf_c2h_msg rsp; 1908 struct spdk_bdev bdev; 1909 uint8_t buf[4096]; 1910 int rc; 1911 1912 memset(&subsystem, 0, sizeof(subsystem)); 1913 ns_ptrs[0] = &ns; 1914 subsystem.ns = ns_ptrs; 1915 subsystem.max_nsid = 1; 1916 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1917 1918 memset(&ns, 0, sizeof(ns)); 1919 ns.opts.nsid = 1; 1920 ns.bdev = &bdev; 1921 1922 memset(&qpair, 0, sizeof(qpair)); 1923 qpair.ctrlr = &ctrlr; 1924 1925 memset(&ctrlr, 0, sizeof(ctrlr)); 1926 ctrlr.subsys = &subsystem; 1927 ctrlr.vcprop.cc.bits.en = 1; 1928 ctrlr.thread = spdk_get_thread(); 1929 1930 memset(&req, 0, sizeof(req)); 1931 req.qpair = &qpair; 1932 req.cmd = &cmd; 1933 req.rsp = &rsp; 1934 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1935 req.length = sizeof(buf); 1936 SPDK_IOV_ONE(req.iov, &req.iovcnt, &buf, req.length); 1937 1938 memset(&cmd, 0, sizeof(cmd)); 1939 cmd.nvme_cmd.opc = 0xc1; 1940 cmd.nvme_cmd.nsid = 0; 1941 memset(&rsp, 0, sizeof(rsp)); 1942 1943 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1944 1945 /* Ensure that our hdlr is being called */ 1946 rc = nvmf_ctrlr_process_admin_cmd(&req); 1947 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1948 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1949 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1950 } 1951 1952 static void 1953 test_fused_compare_and_write(void) 1954 { 1955 struct spdk_nvmf_request req = {}; 1956 struct spdk_nvmf_qpair qpair = {}; 1957 struct spdk_nvme_cmd cmd = {}; 1958 union nvmf_c2h_msg rsp = {}; 1959 struct spdk_nvmf_ctrlr ctrlr = {}; 1960 struct spdk_nvmf_subsystem subsystem = {}; 1961 struct spdk_nvmf_ns ns = {}; 1962 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1963 enum spdk_nvme_ana_state ana_state[1]; 1964 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1965 struct spdk_bdev bdev = {}; 1966 1967 struct spdk_nvmf_poll_group group = {}; 1968 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1969 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1970 struct spdk_io_channel io_ch = {}; 1971 1972 ns.bdev = &bdev; 1973 ns.anagrpid = 1; 1974 1975 subsystem.id = 0; 1976 subsystem.max_nsid = 1; 1977 subsys_ns[0] = &ns; 1978 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1979 1980 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1981 1982 /* Enable controller */ 1983 ctrlr.vcprop.cc.bits.en = 1; 1984 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1985 ctrlr.listener = &listener; 1986 ctrlr.visible_ns = spdk_bit_array_create(1); 1987 spdk_bit_array_set(ctrlr.visible_ns, 0); 1988 1989 group.num_sgroups = 1; 1990 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1991 sgroups.num_ns = 1; 1992 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1993 ns_info.channel = &io_ch; 1994 sgroups.ns_info = &ns_info; 1995 TAILQ_INIT(&sgroups.queued); 1996 group.sgroups = &sgroups; 1997 TAILQ_INIT(&qpair.outstanding); 1998 1999 qpair.ctrlr = &ctrlr; 2000 qpair.group = &group; 2001 qpair.qid = 1; 2002 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2003 2004 cmd.nsid = 1; 2005 2006 req.qpair = &qpair; 2007 req.cmd = (union nvmf_h2c_msg *)&cmd; 2008 req.rsp = &rsp; 2009 2010 /* SUCCESS/SUCCESS */ 2011 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2012 cmd.opc = SPDK_NVME_OPC_COMPARE; 2013 2014 spdk_nvmf_request_exec(&req); 2015 CU_ASSERT(qpair.first_fused_req != NULL); 2016 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2017 2018 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2019 cmd.opc = SPDK_NVME_OPC_WRITE; 2020 2021 spdk_nvmf_request_exec(&req); 2022 CU_ASSERT(qpair.first_fused_req == NULL); 2023 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2024 2025 /* Wrong sequence */ 2026 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2027 cmd.opc = SPDK_NVME_OPC_WRITE; 2028 2029 spdk_nvmf_request_exec(&req); 2030 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 2031 CU_ASSERT(qpair.first_fused_req == NULL); 2032 2033 /* Write as FUSE_FIRST (Wrong op code) */ 2034 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2035 cmd.opc = SPDK_NVME_OPC_WRITE; 2036 2037 spdk_nvmf_request_exec(&req); 2038 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 2039 CU_ASSERT(qpair.first_fused_req == NULL); 2040 2041 /* Compare as FUSE_SECOND (Wrong op code) */ 2042 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 2043 cmd.opc = SPDK_NVME_OPC_COMPARE; 2044 2045 spdk_nvmf_request_exec(&req); 2046 CU_ASSERT(qpair.first_fused_req != NULL); 2047 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2048 2049 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2050 cmd.opc = SPDK_NVME_OPC_COMPARE; 2051 2052 spdk_nvmf_request_exec(&req); 2053 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 2054 CU_ASSERT(qpair.first_fused_req == NULL); 2055 2056 spdk_bit_array_free(&ctrlr.visible_ns); 2057 } 2058 2059 static void 2060 test_multi_async_event_reqs(void) 2061 { 2062 struct spdk_nvmf_subsystem subsystem = {}; 2063 struct spdk_nvmf_qpair qpair = {}; 2064 struct spdk_nvmf_ctrlr ctrlr = {}; 2065 struct spdk_nvmf_request req[5] = {}; 2066 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2067 struct spdk_nvmf_ns ns = {}; 2068 union nvmf_h2c_msg cmd[5] = {}; 2069 union nvmf_c2h_msg rsp[5] = {}; 2070 2071 struct spdk_nvmf_poll_group group = {}; 2072 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2073 2074 int i; 2075 2076 ns_ptrs[0] = &ns; 2077 subsystem.ns = ns_ptrs; 2078 subsystem.max_nsid = 1; 2079 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2080 2081 ns.opts.nsid = 1; 2082 group.sgroups = &sgroups; 2083 2084 qpair.ctrlr = &ctrlr; 2085 qpair.group = &group; 2086 TAILQ_INIT(&qpair.outstanding); 2087 2088 ctrlr.subsys = &subsystem; 2089 ctrlr.vcprop.cc.bits.en = 1; 2090 ctrlr.thread = spdk_get_thread(); 2091 2092 for (i = 0; i < 5; i++) { 2093 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2094 cmd[i].nvme_cmd.nsid = 1; 2095 cmd[i].nvme_cmd.cid = i; 2096 2097 req[i].qpair = &qpair; 2098 req[i].cmd = &cmd[i]; 2099 req[i].rsp = &rsp[i]; 2100 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2101 } 2102 2103 /* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */ 2104 sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS; 2105 for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) { 2106 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2107 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 2108 } 2109 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2110 2111 /* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */ 2112 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2113 CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS); 2114 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 2115 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 2116 2117 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 2118 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 2119 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2120 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2121 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 2122 2123 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 2124 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 2125 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 2126 CU_ASSERT(ctrlr.aer_req[2] == NULL); 2127 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 2128 2129 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 2130 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 2131 } 2132 2133 static void 2134 test_get_ana_log_page_one_ns_per_anagrp(void) 2135 { 2136 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 2137 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 2138 uint32_t ana_group[3]; 2139 struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group }; 2140 struct spdk_nvmf_ctrlr ctrlr = {}; 2141 enum spdk_nvme_ana_state ana_state[3]; 2142 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2143 struct spdk_nvmf_ns ns[3]; 2144 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 2145 uint64_t offset; 2146 uint32_t length; 2147 int i; 2148 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2149 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2150 struct iovec iov, iovs[2]; 2151 struct spdk_nvme_ana_page *ana_hdr; 2152 char _ana_desc[UT_ANA_DESC_SIZE]; 2153 struct spdk_nvme_ana_group_descriptor *ana_desc; 2154 2155 subsystem.ns = ns_arr; 2156 subsystem.max_nsid = 3; 2157 for (i = 0; i < 3; i++) { 2158 subsystem.ana_group[i] = 1; 2159 } 2160 ctrlr.subsys = &subsystem; 2161 ctrlr.listener = &listener; 2162 2163 for (i = 0; i < 3; i++) { 2164 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2165 } 2166 2167 for (i = 0; i < 3; i++) { 2168 ns_arr[i]->nsid = i + 1; 2169 ns_arr[i]->anagrpid = i + 1; 2170 } 2171 2172 /* create expected page */ 2173 ana_hdr = (void *)&expected_page[0]; 2174 ana_hdr->num_ana_group_desc = 3; 2175 ana_hdr->change_count = 0; 2176 2177 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2178 ana_desc = (void *)_ana_desc; 2179 offset = sizeof(struct spdk_nvme_ana_page); 2180 2181 for (i = 0; i < 3; i++) { 2182 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 2183 ana_desc->ana_group_id = ns_arr[i]->nsid; 2184 ana_desc->num_of_nsid = 1; 2185 ana_desc->change_count = 0; 2186 ana_desc->ana_state = ctrlr.listener->ana_state[i]; 2187 ana_desc->nsid[0] = ns_arr[i]->nsid; 2188 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 2189 offset += UT_ANA_DESC_SIZE; 2190 } 2191 2192 /* read entire actual log page */ 2193 offset = 0; 2194 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2195 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2196 iov.iov_base = &actual_page[offset]; 2197 iov.iov_len = length; 2198 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2199 offset += length; 2200 } 2201 2202 /* compare expected page and actual page */ 2203 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2204 2205 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2206 offset = 0; 2207 iovs[0].iov_base = &actual_page[offset]; 2208 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2209 offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2210 iovs[1].iov_base = &actual_page[offset]; 2211 iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset; 2212 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2213 2214 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2215 2216 #undef UT_ANA_DESC_SIZE 2217 #undef UT_ANA_LOG_PAGE_SIZE 2218 } 2219 2220 static void 2221 test_get_ana_log_page_multi_ns_per_anagrp(void) 2222 { 2223 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + \ 2224 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 + \ 2225 sizeof(uint32_t) * 5) 2226 struct spdk_nvmf_ns ns[5]; 2227 struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]}; 2228 uint32_t ana_group[5] = {0}; 2229 struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, }; 2230 enum spdk_nvme_ana_state ana_state[5]; 2231 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, }; 2232 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, }; 2233 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2234 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2235 struct iovec iov, iovs[2]; 2236 struct spdk_nvme_ana_page *ana_hdr; 2237 char _ana_desc[UT_ANA_LOG_PAGE_SIZE]; 2238 struct spdk_nvme_ana_group_descriptor *ana_desc; 2239 uint64_t offset; 2240 uint32_t length; 2241 int i; 2242 2243 subsystem.max_nsid = 5; 2244 subsystem.ana_group[1] = 3; 2245 subsystem.ana_group[2] = 2; 2246 for (i = 0; i < 5; i++) { 2247 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2248 } 2249 2250 for (i = 0; i < 5; i++) { 2251 ns_arr[i]->nsid = i + 1; 2252 } 2253 ns_arr[0]->anagrpid = 2; 2254 ns_arr[1]->anagrpid = 3; 2255 ns_arr[2]->anagrpid = 2; 2256 ns_arr[3]->anagrpid = 3; 2257 ns_arr[4]->anagrpid = 2; 2258 2259 /* create expected page */ 2260 ana_hdr = (void *)&expected_page[0]; 2261 ana_hdr->num_ana_group_desc = 2; 2262 ana_hdr->change_count = 0; 2263 2264 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2265 ana_desc = (void *)_ana_desc; 2266 offset = sizeof(struct spdk_nvme_ana_page); 2267 2268 memset(_ana_desc, 0, sizeof(_ana_desc)); 2269 ana_desc->ana_group_id = 2; 2270 ana_desc->num_of_nsid = 3; 2271 ana_desc->change_count = 0; 2272 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2273 ana_desc->nsid[0] = 1; 2274 ana_desc->nsid[1] = 3; 2275 ana_desc->nsid[2] = 5; 2276 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2277 sizeof(uint32_t) * 3); 2278 offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3; 2279 2280 memset(_ana_desc, 0, sizeof(_ana_desc)); 2281 ana_desc->ana_group_id = 3; 2282 ana_desc->num_of_nsid = 2; 2283 ana_desc->change_count = 0; 2284 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2285 ana_desc->nsid[0] = 2; 2286 ana_desc->nsid[1] = 4; 2287 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2288 sizeof(uint32_t) * 2); 2289 2290 /* read entire actual log page, and compare expected page and actual page. */ 2291 offset = 0; 2292 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2293 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2294 iov.iov_base = &actual_page[offset]; 2295 iov.iov_len = length; 2296 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2297 offset += length; 2298 } 2299 2300 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2301 2302 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2303 offset = 0; 2304 iovs[0].iov_base = &actual_page[offset]; 2305 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2306 offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2307 iovs[1].iov_base = &actual_page[offset]; 2308 iovs[1].iov_len = sizeof(uint32_t) * 5; 2309 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2310 2311 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2312 2313 #undef UT_ANA_LOG_PAGE_SIZE 2314 } 2315 static void 2316 test_multi_async_events(void) 2317 { 2318 struct spdk_nvmf_subsystem subsystem = {}; 2319 struct spdk_nvmf_qpair qpair = {}; 2320 struct spdk_nvmf_ctrlr ctrlr = {}; 2321 struct spdk_nvmf_request req[4] = {}; 2322 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2323 struct spdk_nvmf_ns ns = {}; 2324 union nvmf_h2c_msg cmd[4] = {}; 2325 union nvmf_c2h_msg rsp[4] = {}; 2326 union spdk_nvme_async_event_completion event = {}; 2327 struct spdk_nvmf_poll_group group = {}; 2328 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2329 int i; 2330 2331 ns_ptrs[0] = &ns; 2332 subsystem.ns = ns_ptrs; 2333 subsystem.max_nsid = 1; 2334 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2335 2336 ns.opts.nsid = 1; 2337 group.sgroups = &sgroups; 2338 2339 qpair.ctrlr = &ctrlr; 2340 qpair.group = &group; 2341 TAILQ_INIT(&qpair.outstanding); 2342 2343 ctrlr.subsys = &subsystem; 2344 ctrlr.vcprop.cc.bits.en = 1; 2345 ctrlr.thread = spdk_get_thread(); 2346 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2347 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 2348 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 2349 init_pending_async_events(&ctrlr); 2350 2351 /* Target queue pending events when there is no outstanding AER request */ 2352 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2353 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 2354 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 2355 2356 for (i = 0; i < 4; i++) { 2357 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2358 cmd[i].nvme_cmd.nsid = 1; 2359 cmd[i].nvme_cmd.cid = i; 2360 2361 req[i].qpair = &qpair; 2362 req[i].cmd = &cmd[i]; 2363 req[i].rsp = &rsp[i]; 2364 2365 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2366 2367 sgroups.mgmt_io_outstanding = 1; 2368 if (i < 3) { 2369 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2370 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2371 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 2372 } else { 2373 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2374 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2375 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 2376 } 2377 } 2378 2379 event.raw = rsp[0].nvme_cpl.cdw0; 2380 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2381 event.raw = rsp[1].nvme_cpl.cdw0; 2382 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 2383 event.raw = rsp[2].nvme_cpl.cdw0; 2384 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 2385 2386 cleanup_pending_async_events(&ctrlr); 2387 } 2388 2389 static void 2390 test_rae(void) 2391 { 2392 struct spdk_nvmf_subsystem subsystem = {}; 2393 struct spdk_nvmf_qpair qpair = {}; 2394 struct spdk_nvmf_ctrlr ctrlr = {}; 2395 struct spdk_nvmf_request req[3] = {}; 2396 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2397 struct spdk_nvmf_ns ns = {}; 2398 union nvmf_h2c_msg cmd[3] = {}; 2399 union nvmf_c2h_msg rsp[3] = {}; 2400 union spdk_nvme_async_event_completion event = {}; 2401 struct spdk_nvmf_poll_group group = {}; 2402 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2403 int i; 2404 char data[4096]; 2405 2406 ns_ptrs[0] = &ns; 2407 subsystem.ns = ns_ptrs; 2408 subsystem.max_nsid = 1; 2409 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2410 2411 ns.opts.nsid = 1; 2412 group.sgroups = &sgroups; 2413 2414 qpair.ctrlr = &ctrlr; 2415 qpair.group = &group; 2416 TAILQ_INIT(&qpair.outstanding); 2417 2418 ctrlr.subsys = &subsystem; 2419 ctrlr.vcprop.cc.bits.en = 1; 2420 ctrlr.thread = spdk_get_thread(); 2421 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2422 init_pending_async_events(&ctrlr); 2423 2424 /* Target queue pending events when there is no outstanding AER request */ 2425 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2426 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2427 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2428 /* only one event will be queued before RAE is clear */ 2429 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2430 2431 req[0].qpair = &qpair; 2432 req[0].cmd = &cmd[0]; 2433 req[0].rsp = &rsp[0]; 2434 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2435 cmd[0].nvme_cmd.nsid = 1; 2436 cmd[0].nvme_cmd.cid = 0; 2437 2438 for (i = 1; i < 3; i++) { 2439 req[i].qpair = &qpair; 2440 req[i].cmd = &cmd[i]; 2441 req[i].rsp = &rsp[i]; 2442 req[i].length = sizeof(data); 2443 SPDK_IOV_ONE(req[i].iov, &req[i].iovcnt, &data, req[i].length); 2444 2445 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 2446 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 2447 SPDK_NVME_LOG_CHANGED_NS_LIST; 2448 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 2449 spdk_nvme_bytes_to_numd(req[i].length); 2450 cmd[i].nvme_cmd.cid = i; 2451 } 2452 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 2453 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 2454 2455 /* consume the pending event */ 2456 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 2457 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2458 event.raw = rsp[0].nvme_cpl.cdw0; 2459 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2460 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2461 2462 /* get log with RAE set */ 2463 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2464 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2465 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2466 2467 /* will not generate new event until RAE is clear */ 2468 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2469 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2470 2471 /* get log with RAE clear */ 2472 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2473 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2474 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2475 2476 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2477 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2478 2479 cleanup_pending_async_events(&ctrlr); 2480 } 2481 2482 static void 2483 test_nvmf_ctrlr_create_destruct(void) 2484 { 2485 struct spdk_nvmf_fabric_connect_data connect_data = {}; 2486 struct spdk_nvmf_poll_group group = {}; 2487 struct spdk_nvmf_subsystem_poll_group sgroups[2] = {}; 2488 struct spdk_nvmf_transport transport = {}; 2489 struct spdk_nvmf_transport_ops tops = {}; 2490 struct spdk_nvmf_subsystem subsystem = {}; 2491 struct spdk_nvmf_ns *ns_arr[1] = { NULL }; 2492 struct spdk_nvmf_request req = {}; 2493 struct spdk_nvmf_qpair qpair = {}; 2494 struct spdk_nvmf_ctrlr *ctrlr = NULL; 2495 struct spdk_nvmf_tgt tgt = {}; 2496 union nvmf_h2c_msg cmd = {}; 2497 union nvmf_c2h_msg rsp = {}; 2498 const uint8_t hostid[16] = { 2499 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2500 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 2501 }; 2502 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 2503 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 2504 2505 group.thread = spdk_get_thread(); 2506 transport.ops = &tops; 2507 transport.opts.max_aq_depth = 32; 2508 transport.opts.max_queue_depth = 64; 2509 transport.opts.max_qpairs_per_ctrlr = 3; 2510 transport.opts.dif_insert_or_strip = true; 2511 transport.tgt = &tgt; 2512 qpair.transport = &transport; 2513 qpair.group = &group; 2514 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 2515 TAILQ_INIT(&qpair.outstanding); 2516 2517 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 2518 connect_data.cntlid = 0xFFFF; 2519 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 2520 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 2521 2522 subsystem.thread = spdk_get_thread(); 2523 subsystem.id = 1; 2524 TAILQ_INIT(&subsystem.ctrlrs); 2525 subsystem.tgt = &tgt; 2526 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2527 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2528 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 2529 subsystem.ns = ns_arr; 2530 2531 group.sgroups = sgroups; 2532 2533 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2534 cmd.connect_cmd.cid = 1; 2535 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 2536 cmd.connect_cmd.recfmt = 0; 2537 cmd.connect_cmd.qid = 0; 2538 cmd.connect_cmd.sqsize = 31; 2539 cmd.connect_cmd.cattr = 0; 2540 cmd.connect_cmd.kato = 120000; 2541 2542 req.qpair = &qpair; 2543 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 2544 req.length = sizeof(connect_data); 2545 SPDK_IOV_ONE(req.iov, &req.iovcnt, &connect_data, req.length); 2546 req.cmd = &cmd; 2547 req.rsp = &rsp; 2548 2549 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 2550 sgroups[subsystem.id].mgmt_io_outstanding++; 2551 2552 ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base); 2553 poll_threads(); 2554 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2555 CU_ASSERT(req.qpair->ctrlr == ctrlr); 2556 CU_ASSERT(ctrlr->subsys == &subsystem); 2557 CU_ASSERT(ctrlr->thread == req.qpair->group->thread); 2558 CU_ASSERT(ctrlr->disconnect_in_progress == false); 2559 CU_ASSERT(ctrlr->qpair_mask != NULL); 2560 CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000); 2561 CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1); 2562 CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1); 2563 CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1); 2564 CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1); 2565 CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16)); 2566 CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1); 2567 CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63); 2568 CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0); 2569 CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500); 2570 CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0); 2571 CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM); 2572 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0); 2573 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0); 2574 CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1); 2575 CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3); 2576 CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0); 2577 CU_ASSERT(ctrlr->vcprop.cc.raw == 0); 2578 CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0); 2579 CU_ASSERT(ctrlr->vcprop.csts.raw == 0); 2580 CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0); 2581 CU_ASSERT(ctrlr->dif_insert_or_strip == true); 2582 2583 ctrlr->in_destruct = true; 2584 nvmf_ctrlr_destruct(ctrlr); 2585 poll_threads(); 2586 CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs)); 2587 CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding)); 2588 } 2589 2590 static void 2591 test_nvmf_ctrlr_use_zcopy(void) 2592 { 2593 struct spdk_nvmf_subsystem subsystem = {}; 2594 struct spdk_nvmf_transport transport = {}; 2595 struct spdk_nvmf_request req = {}; 2596 struct spdk_nvmf_qpair qpair = {}; 2597 struct spdk_nvmf_ctrlr ctrlr = {}; 2598 union nvmf_h2c_msg cmd = {}; 2599 struct spdk_nvmf_ns ns = {}; 2600 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2601 struct spdk_bdev bdev = {}; 2602 struct spdk_nvmf_poll_group group = {}; 2603 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2604 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2605 struct spdk_io_channel io_ch = {}; 2606 int opc; 2607 2608 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2609 ns.bdev = &bdev; 2610 2611 subsystem.id = 0; 2612 subsystem.max_nsid = 1; 2613 subsys_ns[0] = &ns; 2614 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2615 2616 ctrlr.subsys = &subsystem; 2617 ctrlr.visible_ns = spdk_bit_array_create(1); 2618 spdk_bit_array_set(ctrlr.visible_ns, 0); 2619 2620 transport.opts.zcopy = true; 2621 2622 qpair.ctrlr = &ctrlr; 2623 qpair.group = &group; 2624 qpair.qid = 1; 2625 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2626 qpair.transport = &transport; 2627 2628 group.thread = spdk_get_thread(); 2629 group.num_sgroups = 1; 2630 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2631 sgroups.num_ns = 1; 2632 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2633 ns_info.channel = &io_ch; 2634 sgroups.ns_info = &ns_info; 2635 TAILQ_INIT(&sgroups.queued); 2636 group.sgroups = &sgroups; 2637 TAILQ_INIT(&qpair.outstanding); 2638 2639 req.qpair = &qpair; 2640 req.cmd = &cmd; 2641 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2642 2643 /* Admin queue */ 2644 qpair.qid = 0; 2645 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2646 qpair.qid = 1; 2647 2648 /* Invalid Opcodes */ 2649 for (opc = 0; opc <= 255; opc++) { 2650 cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc; 2651 if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) && 2652 (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) { 2653 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2654 } 2655 } 2656 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 2657 2658 /* Fused WRITE */ 2659 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2660 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2661 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE; 2662 2663 /* Non bdev */ 2664 cmd.nvme_cmd.nsid = 4; 2665 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2666 cmd.nvme_cmd.nsid = 1; 2667 2668 /* ZCOPY Not supported */ 2669 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2670 ns.zcopy = true; 2671 2672 /* ZCOPY disabled on transport level */ 2673 transport.opts.zcopy = false; 2674 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2675 transport.opts.zcopy = true; 2676 2677 /* Success */ 2678 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2679 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2680 2681 spdk_bit_array_free(&ctrlr.visible_ns); 2682 } 2683 2684 static void 2685 qpair_state_change_done(void *cb_arg, int status) 2686 { 2687 } 2688 2689 static void 2690 test_spdk_nvmf_request_zcopy_start(void) 2691 { 2692 struct spdk_nvmf_request req = {}; 2693 struct spdk_nvmf_qpair qpair = {}; 2694 struct spdk_nvmf_transport transport = {}; 2695 struct spdk_nvme_cmd cmd = {}; 2696 union nvmf_c2h_msg rsp = {}; 2697 struct spdk_nvmf_ctrlr ctrlr = {}; 2698 struct spdk_nvmf_subsystem subsystem = {}; 2699 struct spdk_nvmf_ns ns = {}; 2700 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2701 enum spdk_nvme_ana_state ana_state[1]; 2702 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2703 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2704 2705 struct spdk_nvmf_poll_group group = {}; 2706 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2707 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2708 struct spdk_io_channel io_ch = {}; 2709 2710 ns.bdev = &bdev; 2711 ns.zcopy = true; 2712 ns.anagrpid = 1; 2713 2714 subsystem.id = 0; 2715 subsystem.max_nsid = 1; 2716 subsys_ns[0] = &ns; 2717 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2718 2719 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2720 2721 /* Enable controller */ 2722 ctrlr.vcprop.cc.bits.en = 1; 2723 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2724 ctrlr.listener = &listener; 2725 ctrlr.visible_ns = spdk_bit_array_create(1); 2726 spdk_bit_array_set(ctrlr.visible_ns, 0); 2727 2728 transport.opts.zcopy = true; 2729 2730 group.thread = spdk_get_thread(); 2731 group.num_sgroups = 1; 2732 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2733 sgroups.num_ns = 1; 2734 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2735 ns_info.channel = &io_ch; 2736 sgroups.ns_info = &ns_info; 2737 TAILQ_INIT(&sgroups.queued); 2738 group.sgroups = &sgroups; 2739 TAILQ_INIT(&qpair.outstanding); 2740 2741 qpair.ctrlr = &ctrlr; 2742 qpair.group = &group; 2743 qpair.transport = &transport; 2744 qpair.qid = 1; 2745 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2746 2747 cmd.nsid = 1; 2748 2749 req.qpair = &qpair; 2750 req.cmd = (union nvmf_h2c_msg *)&cmd; 2751 req.rsp = &rsp; 2752 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2753 cmd.opc = SPDK_NVME_OPC_READ; 2754 2755 /* Fail because no controller */ 2756 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2757 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2758 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 2759 qpair.ctrlr = NULL; 2760 spdk_nvmf_request_zcopy_start(&req); 2761 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2762 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2763 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 2764 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2765 qpair.ctrlr = &ctrlr; 2766 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2767 2768 /* Fail because bad NSID */ 2769 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2770 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2771 cmd.nsid = 0; 2772 spdk_nvmf_request_zcopy_start(&req); 2773 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2774 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2775 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2776 cmd.nsid = 1; 2777 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2778 2779 /* Fail because bad Channel */ 2780 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2781 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2782 ns_info.channel = NULL; 2783 spdk_nvmf_request_zcopy_start(&req); 2784 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2785 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2786 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2787 ns_info.channel = &io_ch; 2788 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2789 2790 /* Queue the requet because NSID is not active */ 2791 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2792 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2793 ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING; 2794 spdk_nvmf_request_zcopy_start(&req); 2795 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT); 2796 CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req); 2797 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2798 TAILQ_REMOVE(&sgroups.queued, &req, link); 2799 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2800 2801 /* Fail because QPair is not active */ 2802 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2803 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2804 qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 2805 qpair.state_cb = qpair_state_change_done; 2806 spdk_nvmf_request_zcopy_start(&req); 2807 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED); 2808 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2809 qpair.state_cb = NULL; 2810 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2811 2812 /* Fail because nvmf_bdev_ctrlr_zcopy_start fails */ 2813 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2814 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2815 cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */ 2816 cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 2817 req.length = (cmd.cdw12 + 1) * bdev.blocklen; 2818 spdk_nvmf_request_zcopy_start(&req); 2819 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2820 cmd.cdw10 = 0; 2821 cmd.cdw12 = 0; 2822 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2823 2824 /* Success */ 2825 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2826 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2827 spdk_nvmf_request_zcopy_start(&req); 2828 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2829 2830 spdk_bit_array_free(&ctrlr.visible_ns); 2831 } 2832 2833 static void 2834 test_zcopy_read(void) 2835 { 2836 struct spdk_nvmf_request req = {}; 2837 struct spdk_nvmf_qpair qpair = {}; 2838 struct spdk_nvmf_transport transport = {}; 2839 struct spdk_nvme_cmd cmd = {}; 2840 union nvmf_c2h_msg rsp = {}; 2841 struct spdk_nvmf_ctrlr ctrlr = {}; 2842 struct spdk_nvmf_subsystem subsystem = {}; 2843 struct spdk_nvmf_ns ns = {}; 2844 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2845 enum spdk_nvme_ana_state ana_state[1]; 2846 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2847 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2848 2849 struct spdk_nvmf_poll_group group = {}; 2850 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2851 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2852 struct spdk_io_channel io_ch = {}; 2853 2854 ns.bdev = &bdev; 2855 ns.zcopy = true; 2856 ns.anagrpid = 1; 2857 2858 subsystem.id = 0; 2859 subsystem.max_nsid = 1; 2860 subsys_ns[0] = &ns; 2861 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2862 2863 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2864 2865 /* Enable controller */ 2866 ctrlr.vcprop.cc.bits.en = 1; 2867 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2868 ctrlr.listener = &listener; 2869 ctrlr.visible_ns = spdk_bit_array_create(1); 2870 spdk_bit_array_set(ctrlr.visible_ns, 0); 2871 2872 transport.opts.zcopy = true; 2873 2874 group.thread = spdk_get_thread(); 2875 group.num_sgroups = 1; 2876 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2877 sgroups.num_ns = 1; 2878 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2879 ns_info.channel = &io_ch; 2880 sgroups.ns_info = &ns_info; 2881 TAILQ_INIT(&sgroups.queued); 2882 group.sgroups = &sgroups; 2883 TAILQ_INIT(&qpair.outstanding); 2884 2885 qpair.ctrlr = &ctrlr; 2886 qpair.group = &group; 2887 qpair.transport = &transport; 2888 qpair.qid = 1; 2889 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2890 2891 cmd.nsid = 1; 2892 2893 req.qpair = &qpair; 2894 req.cmd = (union nvmf_h2c_msg *)&cmd; 2895 req.rsp = &rsp; 2896 cmd.opc = SPDK_NVME_OPC_READ; 2897 2898 /* Prepare for zcopy */ 2899 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2900 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2901 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2902 CU_ASSERT(ns_info.io_outstanding == 0); 2903 2904 /* Perform the zcopy start */ 2905 spdk_nvmf_request_zcopy_start(&req); 2906 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2907 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2908 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2909 CU_ASSERT(ns_info.io_outstanding == 1); 2910 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2911 2912 /* Perform the zcopy end */ 2913 spdk_nvmf_request_zcopy_end(&req, false); 2914 CU_ASSERT(req.zcopy_bdev_io == NULL); 2915 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2916 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2917 CU_ASSERT(ns_info.io_outstanding == 0); 2918 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2919 2920 spdk_bit_array_free(&ctrlr.visible_ns); 2921 } 2922 2923 static void 2924 test_zcopy_write(void) 2925 { 2926 struct spdk_nvmf_request req = {}; 2927 struct spdk_nvmf_qpair qpair = {}; 2928 struct spdk_nvmf_transport transport = {}; 2929 struct spdk_nvme_cmd cmd = {}; 2930 union nvmf_c2h_msg rsp = {}; 2931 struct spdk_nvmf_ctrlr ctrlr = {}; 2932 struct spdk_nvmf_subsystem subsystem = {}; 2933 struct spdk_nvmf_ns ns = {}; 2934 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2935 enum spdk_nvme_ana_state ana_state[1]; 2936 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2937 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2938 2939 struct spdk_nvmf_poll_group group = {}; 2940 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2941 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2942 struct spdk_io_channel io_ch = {}; 2943 2944 ns.bdev = &bdev; 2945 ns.zcopy = true; 2946 ns.anagrpid = 1; 2947 2948 subsystem.id = 0; 2949 subsystem.max_nsid = 1; 2950 subsys_ns[0] = &ns; 2951 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2952 2953 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2954 2955 /* Enable controller */ 2956 ctrlr.vcprop.cc.bits.en = 1; 2957 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2958 ctrlr.listener = &listener; 2959 ctrlr.visible_ns = spdk_bit_array_create(1); 2960 spdk_bit_array_set(ctrlr.visible_ns, 0); 2961 2962 transport.opts.zcopy = true; 2963 2964 group.thread = spdk_get_thread(); 2965 group.num_sgroups = 1; 2966 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2967 sgroups.num_ns = 1; 2968 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2969 ns_info.channel = &io_ch; 2970 sgroups.ns_info = &ns_info; 2971 TAILQ_INIT(&sgroups.queued); 2972 group.sgroups = &sgroups; 2973 TAILQ_INIT(&qpair.outstanding); 2974 2975 qpair.ctrlr = &ctrlr; 2976 qpair.group = &group; 2977 qpair.transport = &transport; 2978 qpair.qid = 1; 2979 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 2980 2981 cmd.nsid = 1; 2982 2983 req.qpair = &qpair; 2984 req.cmd = (union nvmf_h2c_msg *)&cmd; 2985 req.rsp = &rsp; 2986 cmd.opc = SPDK_NVME_OPC_WRITE; 2987 2988 /* Prepare for zcopy */ 2989 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2990 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2991 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2992 CU_ASSERT(ns_info.io_outstanding == 0); 2993 2994 /* Perform the zcopy start */ 2995 spdk_nvmf_request_zcopy_start(&req); 2996 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2997 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 2998 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2999 CU_ASSERT(ns_info.io_outstanding == 1); 3000 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 3001 3002 /* Perform the zcopy end */ 3003 spdk_nvmf_request_zcopy_end(&req, true); 3004 CU_ASSERT(req.zcopy_bdev_io == NULL); 3005 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 3006 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 3007 CU_ASSERT(ns_info.io_outstanding == 0); 3008 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 3009 3010 spdk_bit_array_free(&ctrlr.visible_ns); 3011 } 3012 3013 static void 3014 test_nvmf_property_set(void) 3015 { 3016 int rc; 3017 struct spdk_nvmf_request req = {}; 3018 struct spdk_nvmf_qpair qpair = {}; 3019 struct spdk_nvmf_ctrlr ctrlr = {}; 3020 union nvmf_h2c_msg cmd = {}; 3021 union nvmf_c2h_msg rsp = {}; 3022 3023 req.qpair = &qpair; 3024 qpair.ctrlr = &ctrlr; 3025 req.cmd = &cmd; 3026 req.rsp = &rsp; 3027 3028 /* Invalid parameters */ 3029 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 3030 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs); 3031 3032 rc = nvmf_property_set(&req); 3033 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3034 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 3035 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 3036 3037 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms); 3038 3039 rc = nvmf_property_get(&req); 3040 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3041 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 3042 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 3043 3044 /* Set cc with same property size */ 3045 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 3046 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc); 3047 3048 rc = nvmf_property_set(&req); 3049 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3050 3051 /* Emulate cc data */ 3052 ctrlr.vcprop.cc.raw = 0xDEADBEEF; 3053 3054 rc = nvmf_property_get(&req); 3055 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3056 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF); 3057 3058 /* Set asq with different property size */ 3059 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 3060 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 3061 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq); 3062 3063 rc = nvmf_property_set(&req); 3064 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3065 3066 /* Emulate asq data */ 3067 ctrlr.vcprop.asq = 0xAADDADBEEF; 3068 3069 rc = nvmf_property_get(&req); 3070 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3071 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF); 3072 } 3073 3074 static void 3075 test_nvmf_ctrlr_get_features_host_behavior_support(void) 3076 { 3077 int rc; 3078 struct spdk_nvmf_request req = {}; 3079 struct spdk_nvmf_qpair qpair = {}; 3080 struct spdk_nvmf_ctrlr ctrlr = {}; 3081 struct spdk_nvme_host_behavior behavior = {}; 3082 union nvmf_h2c_msg cmd = {}; 3083 union nvmf_c2h_msg rsp = {}; 3084 3085 qpair.ctrlr = &ctrlr; 3086 req.qpair = &qpair; 3087 req.cmd = &cmd; 3088 req.rsp = &rsp; 3089 3090 /* Invalid data */ 3091 req.length = sizeof(struct spdk_nvme_host_behavior); 3092 req.iovcnt = 0; 3093 3094 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3095 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3096 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3097 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3098 3099 /* Wrong structure length */ 3100 req.length = sizeof(struct spdk_nvme_host_behavior) - 1; 3101 SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length); 3102 3103 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3104 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3105 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3106 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3107 3108 /* Get Features Host Behavior Support Success */ 3109 req.length = sizeof(struct spdk_nvme_host_behavior); 3110 SPDK_IOV_ONE(req.iov, &req.iovcnt, &behavior, req.length); 3111 3112 ctrlr.acre_enabled = true; 3113 behavior.acre = false; 3114 3115 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 3116 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3117 CU_ASSERT(behavior.acre == true); 3118 } 3119 3120 static void 3121 test_nvmf_ctrlr_set_features_host_behavior_support(void) 3122 { 3123 int rc; 3124 struct spdk_nvmf_request req = {}; 3125 struct spdk_nvmf_qpair qpair = {}; 3126 struct spdk_nvmf_ctrlr ctrlr = {}; 3127 struct spdk_nvme_host_behavior host_behavior = {}; 3128 union nvmf_h2c_msg cmd = {}; 3129 union nvmf_c2h_msg rsp = {}; 3130 3131 qpair.ctrlr = &ctrlr; 3132 req.qpair = &qpair; 3133 req.cmd = &cmd; 3134 req.rsp = &rsp; 3135 req.iov[0].iov_base = &host_behavior; 3136 req.iov[0].iov_len = sizeof(host_behavior); 3137 3138 /* Invalid iovcnt */ 3139 req.iovcnt = 0; 3140 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3141 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3142 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3143 3144 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3145 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3146 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3147 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3148 3149 /* Invalid iov_len */ 3150 req.iovcnt = 1; 3151 req.iov[0].iov_len = 0; 3152 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3153 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3154 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3155 3156 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3157 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3158 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3159 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3160 3161 /* acre is false */ 3162 host_behavior.acre = 0; 3163 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3164 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3165 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3166 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3167 3168 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3169 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3170 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3171 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3172 CU_ASSERT(ctrlr.acre_enabled == false); 3173 3174 /* acre is true */ 3175 host_behavior.acre = 1; 3176 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3177 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3178 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3179 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3180 3181 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3182 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3183 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3184 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3185 CU_ASSERT(ctrlr.acre_enabled == true); 3186 3187 /* Invalid acre */ 3188 host_behavior.acre = 2; 3189 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3190 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3191 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3192 3193 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3194 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3195 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3196 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3197 } 3198 3199 static void 3200 test_nvmf_ctrlr_ns_attachment(void) 3201 { 3202 struct spdk_nvmf_subsystem subsystem = {}; 3203 struct spdk_nvmf_ns ns1 = { 3204 .nsid = 1, 3205 .always_visible = false 3206 }; 3207 struct spdk_nvmf_ns ns3 = { 3208 .nsid = 3, 3209 .always_visible = false 3210 }; 3211 struct spdk_nvmf_ctrlr ctrlrA = { 3212 .subsys = &subsystem 3213 }; 3214 struct spdk_nvmf_ctrlr ctrlrB = { 3215 .subsys = &subsystem 3216 }; 3217 struct spdk_nvmf_host *host; 3218 uint32_t nsid; 3219 3220 subsystem.max_nsid = 3; 3221 subsystem.ns = calloc(subsystem.max_nsid, sizeof(subsystem.ns)); 3222 SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL); 3223 3224 /* nsid = 2 -> unallocated, nsid = 1,3 -> allocated */ 3225 subsystem.ns[0] = &ns1; 3226 subsystem.ns[2] = &ns3; 3227 3228 snprintf(ctrlrA.hostnqn, sizeof(ctrlrA.hostnqn), "nqn.2016-06.io.spdk:host1"); 3229 ctrlrA.visible_ns = spdk_bit_array_create(subsystem.max_nsid); 3230 SPDK_CU_ASSERT_FATAL(ctrlrA.visible_ns != NULL); 3231 snprintf(ctrlrB.hostnqn, sizeof(ctrlrB.hostnqn), "nqn.2016-06.io.spdk:host2"); 3232 ctrlrB.visible_ns = spdk_bit_array_create(subsystem.max_nsid); 3233 SPDK_CU_ASSERT_FATAL(ctrlrB.visible_ns != NULL); 3234 3235 /* Do not auto attach and no cold attach of any ctrlr */ 3236 nsid = 1; 3237 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3238 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3239 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3240 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3241 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3242 nsid = 3; 3243 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3244 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3245 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3246 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3247 3248 /* Cold attach ctrlrA to namespace 1 */ 3249 nsid = 1; 3250 host = calloc(1, sizeof(*host)); 3251 SPDK_CU_ASSERT_FATAL(host != NULL); 3252 snprintf(host->nqn, sizeof(host->nqn), "%s", ctrlrA.hostnqn); 3253 TAILQ_INSERT_HEAD(&ns1.hosts, host, link); 3254 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host); 3255 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3256 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3257 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3258 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3259 nsid = 3; 3260 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3261 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3262 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == host); 3263 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3264 3265 /* Detach ctrlrA from namespace 1 */ 3266 nsid = 1; 3267 spdk_bit_array_clear(ctrlrA.visible_ns, nsid - 1); 3268 TAILQ_REMOVE(&ns1.hosts, host, link); 3269 free(host); 3270 3271 /* Auto attach any ctrlr to namespace 2 */ 3272 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3273 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3274 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3275 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3276 nsid = 3; 3277 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3278 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3279 ns1.always_visible = true; 3280 nvmf_ctrlr_init_visible_ns(&ctrlrA); 3281 nsid = 1; 3282 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3283 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3284 nsid = 3; 3285 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3286 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3287 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3288 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3289 nvmf_ctrlr_init_visible_ns(&ctrlrB); 3290 nsid = 1; 3291 CU_ASSERT(spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3292 CU_ASSERT(spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3293 nsid = 3; 3294 CU_ASSERT(!spdk_bit_array_get(ctrlrA.visible_ns, nsid - 1)); 3295 CU_ASSERT(!spdk_bit_array_get(ctrlrB.visible_ns, nsid - 1)); 3296 CU_ASSERT(nvmf_ns_find_host(&ns1, ctrlrA.hostnqn) == NULL); 3297 CU_ASSERT(nvmf_ns_find_host(&ns3, ctrlrA.hostnqn) == NULL); 3298 3299 free(ctrlrA.visible_ns); 3300 free(ctrlrB.visible_ns); 3301 free(subsystem.ns); 3302 } 3303 3304 static void 3305 test_nvmf_check_qpair_active(void) 3306 { 3307 union nvmf_c2h_msg rsp = {}; 3308 union nvmf_h2c_msg cmd = {}; 3309 struct spdk_nvmf_qpair qpair = { .outstanding = TAILQ_HEAD_INITIALIZER(qpair.outstanding) }; 3310 struct spdk_nvmf_request req = { .qpair = &qpair, .cmd = &cmd, .rsp = &rsp }; 3311 size_t i; 3312 3313 /* qpair is active */ 3314 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3315 qpair.state = SPDK_NVMF_QPAIR_ENABLED; 3316 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3317 3318 /* qpair is connecting - CONNECT is allowed */ 3319 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 3320 cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 3321 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 3322 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3323 3324 /* qpair is connecting - other commands are disallowed */ 3325 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3326 qpair.state = SPDK_NVMF_QPAIR_CONNECTING; 3327 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false); 3328 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 3329 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 3330 3331 /* qpair is authenticating - AUTHENTICATION_SEND is allowed */ 3332 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 3333 cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND; 3334 qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING; 3335 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3336 3337 /* qpair is authenticating - AUTHENTICATION_RECV is allowed */ 3338 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 3339 cmd.nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV; 3340 qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING; 3341 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), true); 3342 3343 /* qpair is authenticating - other commands are disallowed */ 3344 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3345 qpair.state = SPDK_NVMF_QPAIR_AUTHENTICATING; 3346 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false); 3347 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_COMMAND_SPECIFIC); 3348 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVMF_FABRIC_SC_AUTH_REQUIRED); 3349 3350 /* qpair is in one of the other states - all commands are disallowed */ 3351 int disallowed_states[] = { 3352 SPDK_NVMF_QPAIR_UNINITIALIZED, 3353 SPDK_NVMF_QPAIR_DEACTIVATING, 3354 SPDK_NVMF_QPAIR_ERROR, 3355 }; 3356 qpair.state_cb = qpair_state_change_done; 3357 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 3358 for (i = 0; i < SPDK_COUNTOF(disallowed_states); ++i) { 3359 qpair.state = disallowed_states[i]; 3360 CU_ASSERT_EQUAL(nvmf_check_qpair_active(&req), false); 3361 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 3362 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 3363 } 3364 } 3365 3366 int 3367 main(int argc, char **argv) 3368 { 3369 CU_pSuite suite = NULL; 3370 unsigned int num_failures; 3371 3372 CU_initialize_registry(); 3373 3374 suite = CU_add_suite("nvmf", NULL, NULL); 3375 CU_ADD_TEST(suite, test_get_log_page); 3376 CU_ADD_TEST(suite, test_process_fabrics_cmd); 3377 CU_ADD_TEST(suite, test_connect); 3378 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 3379 CU_ADD_TEST(suite, test_identify_ns); 3380 CU_ADD_TEST(suite, test_identify_ns_iocs_specific); 3381 CU_ADD_TEST(suite, test_reservation_write_exclusive); 3382 CU_ADD_TEST(suite, test_reservation_exclusive_access); 3383 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 3384 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 3385 CU_ADD_TEST(suite, test_reservation_notification_log_page); 3386 CU_ADD_TEST(suite, test_get_dif_ctx); 3387 CU_ADD_TEST(suite, test_set_get_features); 3388 CU_ADD_TEST(suite, test_identify_ctrlr); 3389 CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific); 3390 CU_ADD_TEST(suite, test_custom_admin_cmd); 3391 CU_ADD_TEST(suite, test_fused_compare_and_write); 3392 CU_ADD_TEST(suite, test_multi_async_event_reqs); 3393 CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp); 3394 CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp); 3395 CU_ADD_TEST(suite, test_multi_async_events); 3396 CU_ADD_TEST(suite, test_rae); 3397 CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct); 3398 CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy); 3399 CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start); 3400 CU_ADD_TEST(suite, test_zcopy_read); 3401 CU_ADD_TEST(suite, test_zcopy_write); 3402 CU_ADD_TEST(suite, test_nvmf_property_set); 3403 CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support); 3404 CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support); 3405 CU_ADD_TEST(suite, test_nvmf_ctrlr_ns_attachment); 3406 CU_ADD_TEST(suite, test_nvmf_check_qpair_active); 3407 3408 allocate_threads(1); 3409 set_thread(0); 3410 3411 num_failures = spdk_ut_run_tests(argc, argv, NULL); 3412 CU_cleanup_registry(); 3413 3414 free_threads(); 3415 3416 return num_failures; 3417 } 3418