1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/bdev_zone.h" 8 #include "spdk/nvme_spec.h" 9 #include "spdk/stdinc.h" 10 11 #include "spdk_cunit.h" 12 #include "spdk_internal/mock.h" 13 #include "thread/thread_internal.h" 14 15 #include "common/lib/ut_multithread.c" 16 #include "nvmf/ctrlr.c" 17 18 SPDK_LOG_REGISTER_COMPONENT(nvmf) 19 20 struct spdk_bdev { 21 int ut_mock; 22 uint64_t blockcnt; 23 uint32_t blocklen; 24 bool zoned; 25 uint32_t zone_size; 26 uint32_t max_open_zones; 27 uint32_t max_active_zones; 28 }; 29 30 #define MAX_OPEN_ZONES 12 31 #define MAX_ACTIVE_ZONES 34 32 #define ZONE_SIZE 56 33 34 const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn"; 35 const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn"; 36 37 static struct spdk_bdev_io *zcopy_start_bdev_io_read = (struct spdk_bdev_io *) 0x1122334455667788UL; 38 static struct spdk_bdev_io *zcopy_start_bdev_io_write = (struct spdk_bdev_io *) 39 0x8877665544332211UL; 40 static struct spdk_bdev_io *zcopy_start_bdev_io_fail = (struct spdk_bdev_io *) 0xFFFFFFFFFFFFFFFFUL; 41 42 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 43 struct spdk_nvmf_subsystem *, 44 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 45 NULL); 46 47 DEFINE_STUB(spdk_nvmf_poll_group_create, 48 struct spdk_nvmf_poll_group *, 49 (struct spdk_nvmf_tgt *tgt), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_subsystem_get_sn, 53 const char *, 54 (const struct spdk_nvmf_subsystem *subsystem), 55 subsystem_default_sn); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_get_mn, 58 const char *, 59 (const struct spdk_nvmf_subsystem *subsystem), 60 subsystem_default_mn); 61 62 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 63 bool, 64 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 65 true); 66 67 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 68 int, 69 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 70 0); 71 72 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 73 struct spdk_nvmf_ctrlr *, 74 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 75 NULL); 76 77 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 78 bool, 79 (struct spdk_nvmf_ctrlr *ctrlr), 80 false); 81 82 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 83 bool, 84 (struct spdk_nvmf_ctrlr *ctrlr), 85 false); 86 87 DEFINE_STUB(nvmf_ctrlr_copy_supported, 88 bool, 89 (struct spdk_nvmf_ctrlr *ctrlr), 90 false); 91 92 DEFINE_STUB_V(nvmf_get_discovery_log_page, 93 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 94 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 95 96 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 97 int, 98 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 99 0); 100 101 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 102 bool, 103 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 104 true); 105 106 DEFINE_STUB(nvmf_subsystem_find_listener, 107 struct spdk_nvmf_subsystem_listener *, 108 (struct spdk_nvmf_subsystem *subsystem, 109 const struct spdk_nvme_transport_id *trid), 110 (void *)0x1); 111 112 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 113 int, 114 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 115 struct spdk_nvmf_request *req), 116 0); 117 118 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 119 int, 120 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 121 struct spdk_nvmf_request *req), 122 0); 123 124 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 125 int, 126 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 127 struct spdk_nvmf_request *req), 128 0); 129 130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 131 int, 132 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 134 0); 135 136 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 137 int, 138 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 139 struct spdk_nvmf_request *req), 140 0); 141 142 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 143 int, 144 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 145 struct spdk_nvmf_request *req), 146 0); 147 148 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 149 int, 150 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 151 struct spdk_nvmf_request *req), 152 0); 153 154 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 155 int, 156 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 157 struct spdk_nvmf_request *req), 158 0); 159 160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 161 int, 162 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 163 struct spdk_nvmf_request *req), 164 0); 165 166 DEFINE_STUB(nvmf_transport_req_complete, 167 int, 168 (struct spdk_nvmf_request *req), 169 0); 170 171 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 172 173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool, 174 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, 175 struct spdk_dif_ctx *dif_ctx), 176 true); 177 178 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 179 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 180 181 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 182 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 183 184 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, (struct spdk_nvmf_subsystem *subsystem, 185 struct spdk_nvmf_ctrlr *ctrlr)); 186 187 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 188 int, 189 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 190 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 191 0); 192 193 DEFINE_STUB(nvmf_transport_req_free, 194 int, 195 (struct spdk_nvmf_request *req), 196 0); 197 198 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 199 int, 200 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 201 struct spdk_nvmf_request *req, spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 202 0); 203 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 204 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 205 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 206 207 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, (const struct spdk_bdev *bdev), 208 MAX_ACTIVE_ZONES); 209 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, (const struct spdk_bdev *bdev), MAX_OPEN_ZONES); 210 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), ZONE_SIZE); 211 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 212 213 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 214 (const struct spdk_nvme_ns_data *nsdata), 0); 215 216 int 217 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 218 { 219 return 0; 220 } 221 222 void 223 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 224 bool dif_insert_or_strip) 225 { 226 uint64_t num_blocks; 227 228 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 229 num_blocks = ns->bdev->blockcnt; 230 nsdata->nsze = num_blocks; 231 nsdata->ncap = num_blocks; 232 nsdata->nuse = num_blocks; 233 nsdata->nlbaf = 0; 234 nsdata->flbas.format = 0; 235 nsdata->flbas.msb_format = 0; 236 nsdata->lbaf[0].lbads = spdk_u32log2(512); 237 } 238 239 struct spdk_nvmf_ns * 240 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 241 { 242 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 243 return subsystem->ns[0]; 244 } 245 246 struct spdk_nvmf_ns * 247 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, 248 struct spdk_nvmf_ns *prev_ns) 249 { 250 uint32_t nsid; 251 252 SPDK_CU_ASSERT_FATAL(subsystem->ns != NULL); 253 nsid = prev_ns->nsid; 254 255 if (nsid >= subsystem->max_nsid) { 256 return NULL; 257 } 258 for (nsid = nsid + 1; nsid <= subsystem->max_nsid; nsid++) { 259 if (subsystem->ns[nsid - 1]) { 260 return subsystem->ns[nsid - 1]; 261 } 262 } 263 return NULL; 264 } 265 266 bool 267 nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev) 268 { 269 return true; 270 } 271 272 int 273 nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev, 274 struct spdk_bdev_desc *desc, 275 struct spdk_io_channel *ch, 276 struct spdk_nvmf_request *req) 277 { 278 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 279 uint64_t start_lba; 280 uint64_t num_blocks; 281 282 start_lba = from_le64(&req->cmd->nvme_cmd.cdw10); 283 num_blocks = (from_le32(&req->cmd->nvme_cmd.cdw12) & 0xFFFFu) + 1; 284 285 if ((start_lba + num_blocks) > bdev->blockcnt) { 286 rsp->status.sct = SPDK_NVME_SCT_GENERIC; 287 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 288 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 289 } 290 291 if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) { 292 req->zcopy_bdev_io = zcopy_start_bdev_io_write; 293 } else if (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) { 294 req->zcopy_bdev_io = zcopy_start_bdev_io_read; 295 } else { 296 req->zcopy_bdev_io = zcopy_start_bdev_io_fail; 297 } 298 299 300 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 301 } 302 303 void 304 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit) 305 { 306 req->zcopy_bdev_io = NULL; 307 spdk_nvmf_request_complete(req); 308 } 309 310 static void 311 test_get_log_page(void) 312 { 313 struct spdk_nvmf_subsystem subsystem = {}; 314 struct spdk_nvmf_request req = {}; 315 struct spdk_nvmf_qpair qpair = {}; 316 struct spdk_nvmf_ctrlr ctrlr = {}; 317 union nvmf_h2c_msg cmd = {}; 318 union nvmf_c2h_msg rsp = {}; 319 char data[4096]; 320 321 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 322 323 ctrlr.subsys = &subsystem; 324 325 qpair.ctrlr = &ctrlr; 326 327 req.qpair = &qpair; 328 req.cmd = &cmd; 329 req.rsp = &rsp; 330 req.data = &data; 331 req.length = sizeof(data); 332 spdk_iov_one(req.iov, &req.iovcnt, &data, req.length); 333 334 /* Get Log Page - all valid */ 335 memset(&cmd, 0, sizeof(cmd)); 336 memset(&rsp, 0, sizeof(rsp)); 337 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 338 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 339 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 340 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 341 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 342 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 343 344 /* Get Log Page with invalid log ID */ 345 memset(&cmd, 0, sizeof(cmd)); 346 memset(&rsp, 0, sizeof(rsp)); 347 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 348 cmd.nvme_cmd.cdw10 = 0; 349 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 350 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 351 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 352 353 /* Get Log Page with invalid offset (not dword aligned) */ 354 memset(&cmd, 0, sizeof(cmd)); 355 memset(&rsp, 0, sizeof(rsp)); 356 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 357 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 358 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 359 cmd.nvme_cmd.cdw12 = 2; 360 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 361 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 362 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 363 364 /* Get Log Page without data buffer */ 365 memset(&cmd, 0, sizeof(cmd)); 366 memset(&rsp, 0, sizeof(rsp)); 367 req.data = NULL; 368 req.iovcnt = 0; 369 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 370 cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR; 371 cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = spdk_nvme_bytes_to_numd(req.length); 372 CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 373 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 374 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 375 req.data = data; 376 } 377 378 static void 379 test_process_fabrics_cmd(void) 380 { 381 struct spdk_nvmf_request req = {}; 382 int ret; 383 struct spdk_nvmf_qpair req_qpair = {}; 384 union nvmf_h2c_msg req_cmd = {}; 385 union nvmf_c2h_msg req_rsp = {}; 386 387 req.qpair = &req_qpair; 388 req.cmd = &req_cmd; 389 req.rsp = &req_rsp; 390 req.qpair->ctrlr = NULL; 391 392 /* No ctrlr and invalid command check */ 393 req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 394 ret = nvmf_ctrlr_process_fabrics_cmd(&req); 395 CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 396 CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 397 } 398 399 static bool 400 nvme_status_success(const struct spdk_nvme_status *status) 401 { 402 return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS; 403 } 404 405 static void 406 test_connect(void) 407 { 408 struct spdk_nvmf_fabric_connect_data connect_data; 409 struct spdk_nvmf_poll_group group; 410 struct spdk_nvmf_subsystem_poll_group *sgroups; 411 struct spdk_nvmf_transport transport; 412 struct spdk_nvmf_transport_ops tops = {}; 413 struct spdk_nvmf_subsystem subsystem; 414 struct spdk_nvmf_request req; 415 struct spdk_nvmf_qpair admin_qpair; 416 struct spdk_nvmf_qpair qpair; 417 struct spdk_nvmf_ctrlr ctrlr; 418 struct spdk_nvmf_tgt tgt; 419 union nvmf_h2c_msg cmd; 420 union nvmf_c2h_msg rsp; 421 const uint8_t hostid[16] = { 422 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 423 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 424 }; 425 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 426 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 427 int rc; 428 429 memset(&group, 0, sizeof(group)); 430 group.thread = spdk_get_thread(); 431 432 memset(&ctrlr, 0, sizeof(ctrlr)); 433 ctrlr.subsys = &subsystem; 434 ctrlr.qpair_mask = spdk_bit_array_create(3); 435 SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL); 436 ctrlr.vcprop.cc.bits.en = 1; 437 ctrlr.vcprop.cc.bits.iosqes = 6; 438 ctrlr.vcprop.cc.bits.iocqes = 4; 439 440 memset(&admin_qpair, 0, sizeof(admin_qpair)); 441 admin_qpair.group = &group; 442 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 443 444 memset(&tgt, 0, sizeof(tgt)); 445 memset(&transport, 0, sizeof(transport)); 446 transport.ops = &tops; 447 transport.opts.max_aq_depth = 32; 448 transport.opts.max_queue_depth = 64; 449 transport.opts.max_qpairs_per_ctrlr = 3; 450 transport.tgt = &tgt; 451 452 memset(&qpair, 0, sizeof(qpair)); 453 qpair.transport = &transport; 454 qpair.group = &group; 455 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 456 TAILQ_INIT(&qpair.outstanding); 457 458 memset(&connect_data, 0, sizeof(connect_data)); 459 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 460 connect_data.cntlid = 0xFFFF; 461 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 462 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 463 464 memset(&subsystem, 0, sizeof(subsystem)); 465 subsystem.thread = spdk_get_thread(); 466 subsystem.id = 1; 467 TAILQ_INIT(&subsystem.ctrlrs); 468 subsystem.tgt = &tgt; 469 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 470 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 471 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 472 473 sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group)); 474 group.sgroups = sgroups; 475 476 memset(&cmd, 0, sizeof(cmd)); 477 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 478 cmd.connect_cmd.cid = 1; 479 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 480 cmd.connect_cmd.recfmt = 0; 481 cmd.connect_cmd.qid = 0; 482 cmd.connect_cmd.sqsize = 31; 483 cmd.connect_cmd.cattr = 0; 484 cmd.connect_cmd.kato = 120000; 485 486 memset(&req, 0, sizeof(req)); 487 req.qpair = &qpair; 488 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 489 req.data = &connect_data; 490 req.length = sizeof(connect_data); 491 spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length); 492 req.cmd = &cmd; 493 req.rsp = &rsp; 494 495 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 496 MOCK_SET(spdk_nvmf_poll_group_create, &group); 497 498 /* Valid admin connect command */ 499 memset(&rsp, 0, sizeof(rsp)); 500 sgroups[subsystem.id].mgmt_io_outstanding++; 501 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 502 rc = nvmf_ctrlr_cmd_connect(&req); 503 poll_threads(); 504 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 505 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 506 CU_ASSERT(qpair.ctrlr != NULL); 507 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 508 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 509 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 510 free(qpair.ctrlr); 511 qpair.ctrlr = NULL; 512 513 /* Valid admin connect command with kato = 0 */ 514 cmd.connect_cmd.kato = 0; 515 memset(&rsp, 0, sizeof(rsp)); 516 sgroups[subsystem.id].mgmt_io_outstanding++; 517 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 518 rc = nvmf_ctrlr_cmd_connect(&req); 519 poll_threads(); 520 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 521 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 522 CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL); 523 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 524 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 525 free(qpair.ctrlr); 526 qpair.ctrlr = NULL; 527 cmd.connect_cmd.kato = 120000; 528 529 /* Invalid data length */ 530 memset(&rsp, 0, sizeof(rsp)); 531 req.length = sizeof(connect_data) - 1; 532 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 533 rc = nvmf_ctrlr_cmd_connect(&req); 534 poll_threads(); 535 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 536 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 537 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 538 CU_ASSERT(qpair.ctrlr == NULL); 539 req.length = sizeof(connect_data); 540 541 /* Invalid recfmt */ 542 memset(&rsp, 0, sizeof(rsp)); 543 cmd.connect_cmd.recfmt = 1234; 544 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 545 rc = nvmf_ctrlr_cmd_connect(&req); 546 poll_threads(); 547 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 548 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 549 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT); 550 CU_ASSERT(qpair.ctrlr == NULL); 551 cmd.connect_cmd.recfmt = 0; 552 553 /* Subsystem not found */ 554 memset(&rsp, 0, sizeof(rsp)); 555 MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL); 556 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 557 rc = nvmf_ctrlr_cmd_connect(&req); 558 poll_threads(); 559 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 560 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 561 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 562 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 563 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256); 564 CU_ASSERT(qpair.ctrlr == NULL); 565 MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem); 566 567 /* Unterminated hostnqn */ 568 memset(&rsp, 0, sizeof(rsp)); 569 memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn)); 570 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 571 rc = nvmf_ctrlr_cmd_connect(&req); 572 poll_threads(); 573 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 574 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 575 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 576 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 577 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512); 578 CU_ASSERT(qpair.ctrlr == NULL); 579 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 580 581 /* Host not allowed */ 582 memset(&rsp, 0, sizeof(rsp)); 583 MOCK_SET(spdk_nvmf_subsystem_host_allowed, false); 584 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 585 rc = nvmf_ctrlr_cmd_connect(&req); 586 poll_threads(); 587 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 588 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 589 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST); 590 CU_ASSERT(qpair.ctrlr == NULL); 591 MOCK_SET(spdk_nvmf_subsystem_host_allowed, true); 592 593 /* Invalid sqsize == 0 */ 594 memset(&rsp, 0, sizeof(rsp)); 595 cmd.connect_cmd.sqsize = 0; 596 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 597 rc = nvmf_ctrlr_cmd_connect(&req); 598 poll_threads(); 599 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 600 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 601 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 602 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 603 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 604 CU_ASSERT(qpair.ctrlr == NULL); 605 cmd.connect_cmd.sqsize = 31; 606 607 /* Invalid admin sqsize > max_aq_depth */ 608 memset(&rsp, 0, sizeof(rsp)); 609 cmd.connect_cmd.sqsize = 32; 610 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 611 rc = nvmf_ctrlr_cmd_connect(&req); 612 poll_threads(); 613 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 614 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 615 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 616 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 617 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 618 CU_ASSERT(qpair.ctrlr == NULL); 619 cmd.connect_cmd.sqsize = 31; 620 621 /* Invalid I/O sqsize > max_queue_depth */ 622 memset(&rsp, 0, sizeof(rsp)); 623 cmd.connect_cmd.qid = 1; 624 cmd.connect_cmd.sqsize = 64; 625 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 626 rc = nvmf_ctrlr_cmd_connect(&req); 627 poll_threads(); 628 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 629 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 630 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 631 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 632 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44); 633 CU_ASSERT(qpair.ctrlr == NULL); 634 cmd.connect_cmd.qid = 0; 635 cmd.connect_cmd.sqsize = 31; 636 637 /* Invalid cntlid for admin queue */ 638 memset(&rsp, 0, sizeof(rsp)); 639 connect_data.cntlid = 0x1234; 640 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 641 rc = nvmf_ctrlr_cmd_connect(&req); 642 poll_threads(); 643 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 644 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 645 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 646 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 647 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 648 CU_ASSERT(qpair.ctrlr == NULL); 649 connect_data.cntlid = 0xFFFF; 650 651 ctrlr.admin_qpair = &admin_qpair; 652 ctrlr.subsys = &subsystem; 653 654 /* Valid I/O queue connect command */ 655 memset(&rsp, 0, sizeof(rsp)); 656 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 657 cmd.connect_cmd.qid = 1; 658 cmd.connect_cmd.sqsize = 63; 659 sgroups[subsystem.id].mgmt_io_outstanding++; 660 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 661 rc = nvmf_ctrlr_cmd_connect(&req); 662 poll_threads(); 663 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 664 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 665 CU_ASSERT(qpair.ctrlr == &ctrlr); 666 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 667 qpair.ctrlr = NULL; 668 cmd.connect_cmd.sqsize = 31; 669 670 /* Non-existent controller */ 671 memset(&rsp, 0, sizeof(rsp)); 672 MOCK_SET(nvmf_subsystem_get_ctrlr, NULL); 673 sgroups[subsystem.id].mgmt_io_outstanding++; 674 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 675 rc = nvmf_ctrlr_cmd_connect(&req); 676 poll_threads(); 677 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 678 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 679 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 680 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1); 681 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16); 682 CU_ASSERT(qpair.ctrlr == NULL); 683 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 684 MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr); 685 686 /* I/O connect to discovery controller */ 687 memset(&rsp, 0, sizeof(rsp)); 688 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 689 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 690 sgroups[subsystem.id].mgmt_io_outstanding++; 691 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 692 rc = nvmf_ctrlr_cmd_connect(&req); 693 poll_threads(); 694 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 695 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 696 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 697 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 698 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 699 CU_ASSERT(qpair.ctrlr == NULL); 700 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 701 702 /* I/O connect to discovery controller with keep-alive-timeout != 0 */ 703 cmd.connect_cmd.qid = 0; 704 cmd.connect_cmd.kato = 120000; 705 memset(&rsp, 0, sizeof(rsp)); 706 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 707 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 708 sgroups[subsystem.id].mgmt_io_outstanding++; 709 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 710 rc = nvmf_ctrlr_cmd_connect(&req); 711 poll_threads(); 712 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 713 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 714 CU_ASSERT(qpair.ctrlr != NULL); 715 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 716 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 717 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 718 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 719 free(qpair.ctrlr); 720 qpair.ctrlr = NULL; 721 722 /* I/O connect to discovery controller with keep-alive-timeout == 0. 723 * Then, a fixed timeout value is set to keep-alive-timeout. 724 */ 725 cmd.connect_cmd.kato = 0; 726 memset(&rsp, 0, sizeof(rsp)); 727 subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY; 728 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 729 sgroups[subsystem.id].mgmt_io_outstanding++; 730 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 731 rc = nvmf_ctrlr_cmd_connect(&req); 732 poll_threads(); 733 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 734 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 735 CU_ASSERT(qpair.ctrlr != NULL); 736 CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL); 737 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 738 nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr); 739 spdk_bit_array_free(&qpair.ctrlr->qpair_mask); 740 free(qpair.ctrlr); 741 qpair.ctrlr = NULL; 742 cmd.connect_cmd.qid = 1; 743 cmd.connect_cmd.kato = 120000; 744 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 745 746 /* I/O connect to disabled controller */ 747 memset(&rsp, 0, sizeof(rsp)); 748 ctrlr.vcprop.cc.bits.en = 0; 749 sgroups[subsystem.id].mgmt_io_outstanding++; 750 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 751 rc = nvmf_ctrlr_cmd_connect(&req); 752 poll_threads(); 753 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 754 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 755 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 756 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 757 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 758 CU_ASSERT(qpair.ctrlr == NULL); 759 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 760 ctrlr.vcprop.cc.bits.en = 1; 761 762 /* I/O connect with invalid IOSQES */ 763 memset(&rsp, 0, sizeof(rsp)); 764 ctrlr.vcprop.cc.bits.iosqes = 3; 765 sgroups[subsystem.id].mgmt_io_outstanding++; 766 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 767 rc = nvmf_ctrlr_cmd_connect(&req); 768 poll_threads(); 769 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 770 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 771 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 772 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 773 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 774 CU_ASSERT(qpair.ctrlr == NULL); 775 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 776 ctrlr.vcprop.cc.bits.iosqes = 6; 777 778 /* I/O connect with invalid IOCQES */ 779 memset(&rsp, 0, sizeof(rsp)); 780 ctrlr.vcprop.cc.bits.iocqes = 3; 781 sgroups[subsystem.id].mgmt_io_outstanding++; 782 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 783 rc = nvmf_ctrlr_cmd_connect(&req); 784 poll_threads(); 785 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 786 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 787 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 788 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0); 789 CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42); 790 CU_ASSERT(qpair.ctrlr == NULL); 791 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 792 ctrlr.vcprop.cc.bits.iocqes = 4; 793 794 /* I/O connect with qid that is too large */ 795 memset(&rsp, 0, sizeof(rsp)); 796 cmd.connect_cmd.qid = 3; 797 sgroups[subsystem.id].mgmt_io_outstanding++; 798 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 799 rc = nvmf_ctrlr_cmd_connect(&req); 800 poll_threads(); 801 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 802 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 803 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 804 CU_ASSERT(qpair.ctrlr == NULL); 805 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 806 807 /* I/O connect with duplicate queue ID */ 808 memset(&rsp, 0, sizeof(rsp)); 809 spdk_bit_array_set(ctrlr.qpair_mask, 1); 810 cmd.connect_cmd.qid = 1; 811 sgroups[subsystem.id].mgmt_io_outstanding++; 812 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 813 rc = nvmf_ctrlr_cmd_connect(&req); 814 poll_threads(); 815 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 816 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 817 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER); 818 CU_ASSERT(qpair.ctrlr == NULL); 819 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 820 821 /* I/O connect when admin qpair is being destroyed */ 822 admin_qpair.group = NULL; 823 admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 824 memset(&rsp, 0, sizeof(rsp)); 825 sgroups[subsystem.id].mgmt_io_outstanding++; 826 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 827 rc = nvmf_ctrlr_cmd_connect(&req); 828 poll_threads(); 829 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 830 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 831 CU_ASSERT(qpair.ctrlr == NULL); 832 CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0); 833 admin_qpair.group = &group; 834 admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 835 836 /* Clean up globals */ 837 MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem); 838 MOCK_CLEAR(spdk_nvmf_poll_group_create); 839 840 spdk_bit_array_free(&ctrlr.qpair_mask); 841 free(sgroups); 842 } 843 844 static void 845 test_get_ns_id_desc_list(void) 846 { 847 struct spdk_nvmf_subsystem subsystem; 848 struct spdk_nvmf_qpair qpair; 849 struct spdk_nvmf_ctrlr ctrlr; 850 struct spdk_nvmf_request req; 851 struct spdk_nvmf_ns *ns_ptrs[1]; 852 struct spdk_nvmf_ns ns; 853 union nvmf_h2c_msg cmd; 854 union nvmf_c2h_msg rsp; 855 struct spdk_bdev bdev; 856 uint8_t buf[4096]; 857 858 memset(&subsystem, 0, sizeof(subsystem)); 859 ns_ptrs[0] = &ns; 860 subsystem.ns = ns_ptrs; 861 subsystem.max_nsid = 1; 862 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 863 864 memset(&ns, 0, sizeof(ns)); 865 ns.opts.nsid = 1; 866 ns.bdev = &bdev; 867 868 memset(&qpair, 0, sizeof(qpair)); 869 qpair.ctrlr = &ctrlr; 870 871 memset(&ctrlr, 0, sizeof(ctrlr)); 872 ctrlr.subsys = &subsystem; 873 ctrlr.vcprop.cc.bits.en = 1; 874 ctrlr.thread = spdk_get_thread(); 875 876 memset(&req, 0, sizeof(req)); 877 req.qpair = &qpair; 878 req.cmd = &cmd; 879 req.rsp = &rsp; 880 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 881 req.data = buf; 882 req.length = sizeof(buf); 883 spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length); 884 885 memset(&cmd, 0, sizeof(cmd)); 886 cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; 887 cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST; 888 889 /* Invalid NSID */ 890 cmd.nvme_cmd.nsid = 0; 891 memset(&rsp, 0, sizeof(rsp)); 892 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 893 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 894 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 895 896 /* Valid NSID, but ns has no IDs defined */ 897 cmd.nvme_cmd.nsid = 1; 898 memset(&rsp, 0, sizeof(rsp)); 899 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 900 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 901 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 902 CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf))); 903 904 /* Valid NSID, only EUI64 defined */ 905 ns.opts.eui64[0] = 0x11; 906 ns.opts.eui64[7] = 0xFF; 907 memset(&rsp, 0, sizeof(rsp)); 908 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 909 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 910 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 911 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 912 CU_ASSERT(buf[1] == 8); 913 CU_ASSERT(buf[4] == 0x11); 914 CU_ASSERT(buf[11] == 0xFF); 915 CU_ASSERT(buf[13] == 0); 916 917 /* Valid NSID, only NGUID defined */ 918 memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64)); 919 ns.opts.nguid[0] = 0x22; 920 ns.opts.nguid[15] = 0xEE; 921 memset(&rsp, 0, sizeof(rsp)); 922 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 923 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 924 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 925 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID); 926 CU_ASSERT(buf[1] == 16); 927 CU_ASSERT(buf[4] == 0x22); 928 CU_ASSERT(buf[19] == 0xEE); 929 CU_ASSERT(buf[21] == 0); 930 931 /* Valid NSID, both EUI64 and NGUID defined */ 932 ns.opts.eui64[0] = 0x11; 933 ns.opts.eui64[7] = 0xFF; 934 ns.opts.nguid[0] = 0x22; 935 ns.opts.nguid[15] = 0xEE; 936 memset(&rsp, 0, sizeof(rsp)); 937 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 938 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 939 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 940 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 941 CU_ASSERT(buf[1] == 8); 942 CU_ASSERT(buf[4] == 0x11); 943 CU_ASSERT(buf[11] == 0xFF); 944 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 945 CU_ASSERT(buf[13] == 16); 946 CU_ASSERT(buf[16] == 0x22); 947 CU_ASSERT(buf[31] == 0xEE); 948 CU_ASSERT(buf[33] == 0); 949 950 /* Valid NSID, EUI64, NGUID, and UUID defined */ 951 ns.opts.eui64[0] = 0x11; 952 ns.opts.eui64[7] = 0xFF; 953 ns.opts.nguid[0] = 0x22; 954 ns.opts.nguid[15] = 0xEE; 955 ns.opts.uuid.u.raw[0] = 0x33; 956 ns.opts.uuid.u.raw[15] = 0xDD; 957 memset(&rsp, 0, sizeof(rsp)); 958 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 959 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 960 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 961 CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64); 962 CU_ASSERT(buf[1] == 8); 963 CU_ASSERT(buf[4] == 0x11); 964 CU_ASSERT(buf[11] == 0xFF); 965 CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID); 966 CU_ASSERT(buf[13] == 16); 967 CU_ASSERT(buf[16] == 0x22); 968 CU_ASSERT(buf[31] == 0xEE); 969 CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID); 970 CU_ASSERT(buf[33] == 16); 971 CU_ASSERT(buf[36] == 0x33); 972 CU_ASSERT(buf[51] == 0xDD); 973 CU_ASSERT(buf[53] == 0); 974 } 975 976 static void 977 test_identify_ns(void) 978 { 979 struct spdk_nvmf_subsystem subsystem = {}; 980 struct spdk_nvmf_transport transport = {}; 981 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 982 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 983 struct spdk_nvme_cmd cmd = {}; 984 struct spdk_nvme_cpl rsp = {}; 985 struct spdk_nvme_ns_data nsdata = {}; 986 struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}}; 987 struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}}; 988 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 989 990 subsystem.ns = ns_arr; 991 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 992 993 /* Invalid NSID 0 */ 994 cmd.nsid = 0; 995 memset(&nsdata, 0, sizeof(nsdata)); 996 memset(&rsp, 0, sizeof(rsp)); 997 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 998 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 999 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1000 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1001 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1002 1003 /* Valid NSID 1 */ 1004 cmd.nsid = 1; 1005 memset(&nsdata, 0, sizeof(nsdata)); 1006 memset(&rsp, 0, sizeof(rsp)); 1007 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1008 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1009 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1010 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1011 CU_ASSERT(nsdata.nsze == 1234); 1012 1013 /* Valid but inactive NSID 2 */ 1014 cmd.nsid = 2; 1015 memset(&nsdata, 0, sizeof(nsdata)); 1016 memset(&rsp, 0, sizeof(rsp)); 1017 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1018 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1019 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1020 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1021 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1022 1023 /* Valid NSID 3 */ 1024 cmd.nsid = 3; 1025 memset(&nsdata, 0, sizeof(nsdata)); 1026 memset(&rsp, 0, sizeof(rsp)); 1027 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1028 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1029 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1030 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1031 CU_ASSERT(nsdata.nsze == 5678); 1032 1033 /* Invalid NSID 4 */ 1034 cmd.nsid = 4; 1035 memset(&nsdata, 0, sizeof(nsdata)); 1036 memset(&rsp, 0, sizeof(rsp)); 1037 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1038 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1039 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1040 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1041 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1042 1043 /* Invalid NSID 0xFFFFFFFF (NS management not supported) */ 1044 cmd.nsid = 0xFFFFFFFF; 1045 memset(&nsdata, 0, sizeof(nsdata)); 1046 memset(&rsp, 0, sizeof(rsp)); 1047 CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp, 1048 &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1049 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1050 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1051 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1052 } 1053 1054 static void 1055 test_identify_ns_iocs_specific(void) 1056 { 1057 struct spdk_nvmf_subsystem subsystem = {}; 1058 struct spdk_nvmf_transport transport = {}; 1059 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport }; 1060 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1061 struct spdk_nvme_cmd cmd = {}; 1062 struct spdk_nvme_cpl rsp = {}; 1063 struct spdk_nvme_zns_ns_data nsdata = {}; 1064 struct spdk_bdev bdev[2] = {{.blockcnt = 1234, .zoned = true, .zone_size = ZONE_SIZE, .max_open_zones = MAX_OPEN_ZONES, .max_active_zones = MAX_ACTIVE_ZONES}, {.blockcnt = 5678}}; 1065 struct spdk_nvmf_ns ns[2] = {{.bdev = &bdev[0]}, {.bdev = &bdev[1]}}; 1066 struct spdk_nvmf_ns *ns_arr[2] = {&ns[0], &ns[1]}; 1067 1068 subsystem.ns = ns_arr; 1069 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1070 1071 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1072 1073 /* Invalid ZNS NSID 0 */ 1074 cmd.nsid = 0; 1075 memset(&nsdata, 0xFF, sizeof(nsdata)); 1076 memset(&rsp, 0, sizeof(rsp)); 1077 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1078 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1079 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1080 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1081 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1082 1083 /* Valid ZNS NSID 1 */ 1084 cmd.nsid = 1; 1085 memset(&nsdata, 0xFF, sizeof(nsdata)); 1086 memset(&rsp, 0, sizeof(rsp)); 1087 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1088 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1089 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1090 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1091 CU_ASSERT(nsdata.ozcs.read_across_zone_boundaries == 1); 1092 CU_ASSERT(nsdata.mar == MAX_ACTIVE_ZONES - 1); 1093 CU_ASSERT(nsdata.mor == MAX_OPEN_ZONES - 1); 1094 CU_ASSERT(nsdata.lbafe[0].zsze == ZONE_SIZE); 1095 nsdata.ozcs.read_across_zone_boundaries = 0; 1096 nsdata.mar = 0; 1097 nsdata.mor = 0; 1098 nsdata.lbafe[0].zsze = 0; 1099 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1100 1101 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1102 1103 /* Valid NVM NSID 2 */ 1104 cmd.nsid = 2; 1105 memset(&nsdata, 0xFF, sizeof(nsdata)); 1106 memset(&rsp, 0, sizeof(rsp)); 1107 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1108 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1109 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1110 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1111 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1112 1113 /* Invalid NVM NSID 3 */ 1114 cmd.nsid = 0; 1115 memset(&nsdata, 0xFF, sizeof(nsdata)); 1116 memset(&rsp, 0, sizeof(rsp)); 1117 CU_ASSERT(spdk_nvmf_ns_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1118 &nsdata, sizeof(nsdata)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1119 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1120 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 1121 CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata))); 1122 } 1123 1124 static void 1125 test_set_get_features(void) 1126 { 1127 struct spdk_nvmf_subsystem subsystem = {}; 1128 struct spdk_nvmf_qpair admin_qpair = {}; 1129 enum spdk_nvme_ana_state ana_state[3]; 1130 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1131 struct spdk_nvmf_ctrlr ctrlr = { 1132 .subsys = &subsystem, .admin_qpair = &admin_qpair, .listener = &listener 1133 }; 1134 union nvmf_h2c_msg cmd = {}; 1135 union nvmf_c2h_msg rsp = {}; 1136 struct spdk_nvmf_ns ns[3]; 1137 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]}; 1138 struct spdk_nvmf_request req; 1139 int rc; 1140 1141 ns[0].anagrpid = 1; 1142 ns[2].anagrpid = 3; 1143 subsystem.ns = ns_arr; 1144 subsystem.max_nsid = SPDK_COUNTOF(ns_arr); 1145 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1146 listener.ana_state[2] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1147 admin_qpair.ctrlr = &ctrlr; 1148 req.qpair = &admin_qpair; 1149 cmd.nvme_cmd.nsid = 1; 1150 req.cmd = &cmd; 1151 req.rsp = &rsp; 1152 1153 /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1154 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1155 cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1; 1156 ns[0].ptpl_file = "testcfg"; 1157 rc = nvmf_ctrlr_set_features_reservation_persistence(&req); 1158 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1159 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 1160 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE); 1161 CU_ASSERT(ns[0].ptpl_activated == true); 1162 1163 /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */ 1164 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1165 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST; 1166 rc = nvmf_ctrlr_get_features_reservation_persistence(&req); 1167 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1168 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1169 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1170 CU_ASSERT(rsp.nvme_cpl.cdw0 == 1); 1171 1172 1173 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1174 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1175 cmd.nvme_cmd.cdw11 = 0x42; 1176 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1177 1178 rc = nvmf_ctrlr_get_features(&req); 1179 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1180 1181 /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1182 cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1183 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1184 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1185 1186 rc = nvmf_ctrlr_get_features(&req); 1187 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1188 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1189 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1190 1191 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */ 1192 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1193 cmd.nvme_cmd.cdw11 = 0x42; 1194 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1195 1196 rc = nvmf_ctrlr_set_features(&req); 1197 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1198 1199 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */ 1200 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1201 cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */ 1202 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1203 1204 rc = nvmf_ctrlr_set_features(&req); 1205 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1206 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1207 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1208 1209 /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */ 1210 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1211 cmd.nvme_cmd.cdw11 = 0x42; 1212 cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */ 1213 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; 1214 1215 rc = nvmf_ctrlr_set_features(&req); 1216 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1217 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1218 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1219 1220 1221 /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */ 1222 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1223 cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1224 1225 rc = nvmf_ctrlr_get_features(&req); 1226 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1227 1228 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */ 1229 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1230 cmd.nvme_cmd.cdw11 = 0x42; 1231 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1; 1232 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1233 1234 rc = nvmf_ctrlr_set_features(&req); 1235 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1236 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1237 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 1238 1239 /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */ 1240 cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES; 1241 cmd.nvme_cmd.cdw11 = 0x42; 1242 cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0; 1243 cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY; 1244 1245 rc = nvmf_ctrlr_set_features(&req); 1246 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1247 } 1248 1249 /* 1250 * Reservation Unit Test Configuration 1251 * -------- -------- -------- 1252 * | Host A | | Host B | | Host C | 1253 * -------- -------- -------- 1254 * / \ | | 1255 * -------- -------- ------- ------- 1256 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 1257 * -------- -------- ------- ------- 1258 * \ \ / / 1259 * \ \ / / 1260 * \ \ / / 1261 * -------------------------------------- 1262 * | NAMESPACE 1 | 1263 * -------------------------------------- 1264 */ 1265 1266 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 1267 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 1268 1269 static void 1270 ut_reservation_init(enum spdk_nvme_reservation_type rtype) 1271 { 1272 /* Host A has two controllers */ 1273 spdk_uuid_generate(&g_ctrlr1_A.hostid); 1274 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 1275 1276 /* Host B has 1 controller */ 1277 spdk_uuid_generate(&g_ctrlr_B.hostid); 1278 1279 /* Host C has 1 controller */ 1280 spdk_uuid_generate(&g_ctrlr_C.hostid); 1281 1282 memset(&g_ns_info, 0, sizeof(g_ns_info)); 1283 g_ns_info.rtype = rtype; 1284 g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid; 1285 g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid; 1286 g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid; 1287 } 1288 1289 static void 1290 test_reservation_write_exclusive(void) 1291 { 1292 struct spdk_nvmf_request req = {}; 1293 union nvmf_h2c_msg cmd = {}; 1294 union nvmf_c2h_msg rsp = {}; 1295 int rc; 1296 1297 req.cmd = &cmd; 1298 req.rsp = &rsp; 1299 1300 /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */ 1301 ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1302 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1303 1304 /* Test Case: Issue a Read command from Host A and Host B */ 1305 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1306 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1307 SPDK_CU_ASSERT_FATAL(rc == 0); 1308 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1309 SPDK_CU_ASSERT_FATAL(rc == 0); 1310 1311 /* Test Case: Issue a DSM Write command from Host A and Host B */ 1312 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1313 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1314 SPDK_CU_ASSERT_FATAL(rc == 0); 1315 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1316 SPDK_CU_ASSERT_FATAL(rc < 0); 1317 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1318 1319 /* Test Case: Issue a Write command from Host C */ 1320 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1321 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1322 SPDK_CU_ASSERT_FATAL(rc < 0); 1323 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1324 1325 /* Test Case: Issue a Read command from Host B */ 1326 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1327 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1328 SPDK_CU_ASSERT_FATAL(rc == 0); 1329 1330 /* Unregister Host C */ 1331 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1332 1333 /* Test Case: Read and Write commands from non-registrant Host C */ 1334 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1335 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1336 SPDK_CU_ASSERT_FATAL(rc < 0); 1337 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1338 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1339 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1340 SPDK_CU_ASSERT_FATAL(rc == 0); 1341 } 1342 1343 static void 1344 test_reservation_exclusive_access(void) 1345 { 1346 struct spdk_nvmf_request req = {}; 1347 union nvmf_h2c_msg cmd = {}; 1348 union nvmf_c2h_msg rsp = {}; 1349 int rc; 1350 1351 req.cmd = &cmd; 1352 req.rsp = &rsp; 1353 1354 /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */ 1355 ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS); 1356 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1357 1358 /* Test Case: Issue a Read command from Host B */ 1359 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1360 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1361 SPDK_CU_ASSERT_FATAL(rc < 0); 1362 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1363 1364 /* Test Case: Issue a Reservation Release command from a valid Registrant */ 1365 cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE; 1366 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1367 SPDK_CU_ASSERT_FATAL(rc == 0); 1368 } 1369 1370 static void 1371 _test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1372 { 1373 struct spdk_nvmf_request req = {}; 1374 union nvmf_h2c_msg cmd = {}; 1375 union nvmf_c2h_msg rsp = {}; 1376 int rc; 1377 1378 req.cmd = &cmd; 1379 req.rsp = &rsp; 1380 1381 /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */ 1382 ut_reservation_init(rtype); 1383 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1384 1385 /* Test Case: Issue a Read command from Host A and Host C */ 1386 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1387 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1388 SPDK_CU_ASSERT_FATAL(rc == 0); 1389 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1390 SPDK_CU_ASSERT_FATAL(rc == 0); 1391 1392 /* Test Case: Issue a DSM Write command from Host A and Host C */ 1393 cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; 1394 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req); 1395 SPDK_CU_ASSERT_FATAL(rc == 0); 1396 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1397 SPDK_CU_ASSERT_FATAL(rc == 0); 1398 1399 /* Unregister Host C */ 1400 memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid)); 1401 1402 /* Test Case: Read and Write commands from non-registrant Host C */ 1403 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1404 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1405 SPDK_CU_ASSERT_FATAL(rc == 0); 1406 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1407 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req); 1408 SPDK_CU_ASSERT_FATAL(rc < 0); 1409 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1410 } 1411 1412 static void 1413 test_reservation_write_exclusive_regs_only_and_all_regs(void) 1414 { 1415 _test_reservation_write_exclusive_regs_only_and_all_regs( 1416 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1417 _test_reservation_write_exclusive_regs_only_and_all_regs( 1418 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1419 } 1420 1421 static void 1422 _test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype) 1423 { 1424 struct spdk_nvmf_request req = {}; 1425 union nvmf_h2c_msg cmd = {}; 1426 union nvmf_c2h_msg rsp = {}; 1427 int rc; 1428 1429 req.cmd = &cmd; 1430 req.rsp = &rsp; 1431 1432 /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */ 1433 ut_reservation_init(rtype); 1434 g_ns_info.holder_id = g_ctrlr1_A.hostid; 1435 1436 /* Test Case: Issue a Write command from Host B */ 1437 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1438 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1439 SPDK_CU_ASSERT_FATAL(rc == 0); 1440 1441 /* Unregister Host B */ 1442 memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid)); 1443 1444 /* Test Case: Issue a Read command from Host B */ 1445 cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ; 1446 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1447 SPDK_CU_ASSERT_FATAL(rc < 0); 1448 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1449 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 1450 rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req); 1451 SPDK_CU_ASSERT_FATAL(rc < 0); 1452 SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 1453 } 1454 1455 static void 1456 test_reservation_exclusive_access_regs_only_and_all_regs(void) 1457 { 1458 _test_reservation_exclusive_access_regs_only_and_all_regs( 1459 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY); 1460 _test_reservation_exclusive_access_regs_only_and_all_regs( 1461 SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS); 1462 } 1463 1464 static void 1465 init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1466 { 1467 STAILQ_INIT(&ctrlr->async_events); 1468 } 1469 1470 static void 1471 cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1472 { 1473 struct spdk_nvmf_async_event_completion *event, *event_tmp; 1474 1475 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) { 1476 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link); 1477 free(event); 1478 } 1479 } 1480 1481 static int 1482 num_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr) 1483 { 1484 int num = 0; 1485 struct spdk_nvmf_async_event_completion *event; 1486 1487 STAILQ_FOREACH(event, &ctrlr->async_events, link) { 1488 num++; 1489 } 1490 return num; 1491 } 1492 1493 static void 1494 test_reservation_notification_log_page(void) 1495 { 1496 struct spdk_nvmf_ctrlr ctrlr; 1497 struct spdk_nvmf_qpair qpair; 1498 struct spdk_nvmf_ns ns; 1499 struct spdk_nvmf_request req = {}; 1500 union nvmf_h2c_msg cmd = {}; 1501 union nvmf_c2h_msg rsp = {}; 1502 union spdk_nvme_async_event_completion event = {}; 1503 struct spdk_nvme_reservation_notification_log logs[3]; 1504 struct iovec iov; 1505 1506 memset(&ctrlr, 0, sizeof(ctrlr)); 1507 ctrlr.thread = spdk_get_thread(); 1508 TAILQ_INIT(&ctrlr.log_head); 1509 init_pending_async_events(&ctrlr); 1510 ns.nsid = 1; 1511 1512 /* Test Case: Mask all the reservation notifications */ 1513 ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK | 1514 SPDK_NVME_RESERVATION_RELEASED_MASK | 1515 SPDK_NVME_RESERVATION_PREEMPTED_MASK; 1516 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1517 SPDK_NVME_REGISTRATION_PREEMPTED); 1518 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1519 SPDK_NVME_RESERVATION_RELEASED); 1520 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1521 SPDK_NVME_RESERVATION_PREEMPTED); 1522 poll_threads(); 1523 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head)); 1524 1525 /* Test Case: Unmask all the reservation notifications, 1526 * 3 log pages are generated, and AER was triggered. 1527 */ 1528 ns.mask = 0; 1529 ctrlr.num_avail_log_pages = 0; 1530 req.cmd = &cmd; 1531 req.rsp = &rsp; 1532 ctrlr.aer_req[0] = &req; 1533 ctrlr.nr_aer_reqs = 1; 1534 req.qpair = &qpair; 1535 TAILQ_INIT(&qpair.outstanding); 1536 qpair.ctrlr = NULL; 1537 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1538 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 1539 1540 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1541 SPDK_NVME_REGISTRATION_PREEMPTED); 1542 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1543 SPDK_NVME_RESERVATION_RELEASED); 1544 nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns, 1545 SPDK_NVME_RESERVATION_PREEMPTED); 1546 poll_threads(); 1547 event.raw = rsp.nvme_cpl.cdw0; 1548 SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO); 1549 SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL); 1550 SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION); 1551 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3); 1552 1553 /* Test Case: Get Log Page to clear the log pages */ 1554 iov.iov_base = &logs[0]; 1555 iov.iov_len = sizeof(logs); 1556 nvmf_get_reservation_notification_log_page(&ctrlr, &iov, 1, 0, sizeof(logs), 0); 1557 SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0); 1558 1559 cleanup_pending_async_events(&ctrlr); 1560 } 1561 1562 static void 1563 test_get_dif_ctx(void) 1564 { 1565 struct spdk_nvmf_subsystem subsystem = {}; 1566 struct spdk_nvmf_request req = {}; 1567 struct spdk_nvmf_qpair qpair = {}; 1568 struct spdk_nvmf_ctrlr ctrlr = {}; 1569 struct spdk_nvmf_ns ns = {}; 1570 struct spdk_nvmf_ns *_ns = NULL; 1571 struct spdk_bdev bdev = {}; 1572 union nvmf_h2c_msg cmd = {}; 1573 struct spdk_dif_ctx dif_ctx = {}; 1574 bool ret; 1575 1576 ctrlr.subsys = &subsystem; 1577 1578 qpair.ctrlr = &ctrlr; 1579 1580 req.qpair = &qpair; 1581 req.cmd = &cmd; 1582 1583 ns.bdev = &bdev; 1584 1585 ctrlr.dif_insert_or_strip = false; 1586 1587 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1588 CU_ASSERT(ret == false); 1589 1590 ctrlr.dif_insert_or_strip = true; 1591 qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; 1592 1593 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1594 CU_ASSERT(ret == false); 1595 1596 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1597 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC; 1598 1599 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1600 CU_ASSERT(ret == false); 1601 1602 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH; 1603 1604 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1605 CU_ASSERT(ret == false); 1606 1607 qpair.qid = 1; 1608 1609 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1610 CU_ASSERT(ret == false); 1611 1612 cmd.nvme_cmd.nsid = 1; 1613 1614 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1615 CU_ASSERT(ret == false); 1616 1617 subsystem.max_nsid = 1; 1618 subsystem.ns = &_ns; 1619 subsystem.ns[0] = &ns; 1620 1621 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1622 CU_ASSERT(ret == false); 1623 1624 cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE; 1625 1626 ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx); 1627 CU_ASSERT(ret == true); 1628 } 1629 1630 static void 1631 test_identify_ctrlr(void) 1632 { 1633 struct spdk_nvmf_tgt tgt = {}; 1634 struct spdk_nvmf_subsystem subsystem = { 1635 .subtype = SPDK_NVMF_SUBTYPE_NVME, 1636 .tgt = &tgt, 1637 }; 1638 struct spdk_nvmf_transport_ops tops = {}; 1639 struct spdk_nvmf_transport transport = { 1640 .ops = &tops, 1641 .opts = { 1642 .in_capsule_data_size = 4096, 1643 }, 1644 }; 1645 struct spdk_nvmf_qpair admin_qpair = { .transport = &transport}; 1646 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair }; 1647 struct spdk_nvme_ctrlr_data cdata = {}; 1648 uint32_t expected_ioccsz; 1649 1650 nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata); 1651 1652 /* Check ioccsz, TCP transport */ 1653 tops.type = SPDK_NVME_TRANSPORT_TCP; 1654 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1655 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1656 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1657 1658 /* Check ioccsz, RDMA transport */ 1659 tops.type = SPDK_NVME_TRANSPORT_RDMA; 1660 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1661 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1662 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1663 1664 /* Check ioccsz, TCP transport with dif_insert_or_strip */ 1665 tops.type = SPDK_NVME_TRANSPORT_TCP; 1666 ctrlr.dif_insert_or_strip = true; 1667 expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; 1668 CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1669 CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); 1670 } 1671 1672 static void 1673 test_identify_ctrlr_iocs_specific(void) 1674 { 1675 struct spdk_nvmf_subsystem subsystem = { .max_zone_append_size_kib = 0 }; 1676 struct spdk_nvmf_registers vcprop = { .cap.bits.mpsmin = 0 }; 1677 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .vcprop = vcprop }; 1678 struct spdk_nvme_cmd cmd = {}; 1679 struct spdk_nvme_cpl rsp = {}; 1680 struct spdk_nvme_zns_ctrlr_data ctrlr_data = {}; 1681 1682 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_ZNS; 1683 1684 /* ZNS max_zone_append_size_kib no limit */ 1685 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1686 memset(&rsp, 0, sizeof(rsp)); 1687 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1688 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1689 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1690 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1691 CU_ASSERT(ctrlr_data.zasl == 0); 1692 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1693 1694 /* ZNS max_zone_append_size_kib = 4096 */ 1695 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1696 memset(&rsp, 0, sizeof(rsp)); 1697 subsystem.max_zone_append_size_kib = 4096; 1698 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1699 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1700 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1701 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1702 CU_ASSERT(ctrlr_data.zasl == 0); 1703 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1704 1705 /* ZNS max_zone_append_size_kib = 60000 */ 1706 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1707 memset(&rsp, 0, sizeof(rsp)); 1708 subsystem.max_zone_append_size_kib = 60000; 1709 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1710 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1711 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1712 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1713 CU_ASSERT(ctrlr_data.zasl == 3); 1714 ctrlr_data.zasl = 0; 1715 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1716 1717 /* ZNS max_zone_append_size_kib = 60000; mpsmin = 2 */ 1718 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1719 memset(&rsp, 0, sizeof(rsp)); 1720 ctrlr.vcprop.cap.bits.mpsmin = 2; 1721 subsystem.max_zone_append_size_kib = 60000; 1722 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1723 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1724 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1725 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1726 CU_ASSERT(ctrlr_data.zasl == 1); 1727 ctrlr_data.zasl = 0; 1728 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1729 ctrlr.vcprop.cap.bits.mpsmin = 0; 1730 1731 cmd.cdw11_bits.identify.csi = SPDK_NVME_CSI_NVM; 1732 1733 /* NVM */ 1734 memset(&ctrlr_data, 0xFF, sizeof(ctrlr_data)); 1735 memset(&rsp, 0, sizeof(rsp)); 1736 CU_ASSERT(spdk_nvmf_ctrlr_identify_iocs_specific(&ctrlr, &cmd, &rsp, 1737 &ctrlr_data, sizeof(ctrlr_data)) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1738 CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC); 1739 CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS); 1740 CU_ASSERT(spdk_mem_all_zero(&ctrlr_data, sizeof(ctrlr_data))); 1741 } 1742 1743 static int 1744 custom_admin_cmd_hdlr(struct spdk_nvmf_request *req) 1745 { 1746 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 1747 1748 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; 1749 }; 1750 1751 static void 1752 test_custom_admin_cmd(void) 1753 { 1754 struct spdk_nvmf_subsystem subsystem; 1755 struct spdk_nvmf_qpair qpair; 1756 struct spdk_nvmf_ctrlr ctrlr; 1757 struct spdk_nvmf_request req; 1758 struct spdk_nvmf_ns *ns_ptrs[1]; 1759 struct spdk_nvmf_ns ns; 1760 union nvmf_h2c_msg cmd; 1761 union nvmf_c2h_msg rsp; 1762 struct spdk_bdev bdev; 1763 uint8_t buf[4096]; 1764 int rc; 1765 1766 memset(&subsystem, 0, sizeof(subsystem)); 1767 ns_ptrs[0] = &ns; 1768 subsystem.ns = ns_ptrs; 1769 subsystem.max_nsid = 1; 1770 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1771 1772 memset(&ns, 0, sizeof(ns)); 1773 ns.opts.nsid = 1; 1774 ns.bdev = &bdev; 1775 1776 memset(&qpair, 0, sizeof(qpair)); 1777 qpair.ctrlr = &ctrlr; 1778 1779 memset(&ctrlr, 0, sizeof(ctrlr)); 1780 ctrlr.subsys = &subsystem; 1781 ctrlr.vcprop.cc.bits.en = 1; 1782 ctrlr.thread = spdk_get_thread(); 1783 1784 memset(&req, 0, sizeof(req)); 1785 req.qpair = &qpair; 1786 req.cmd = &cmd; 1787 req.rsp = &rsp; 1788 req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 1789 req.data = buf; 1790 req.length = sizeof(buf); 1791 spdk_iov_one(req.iov, &req.iovcnt, &buf, req.length); 1792 1793 memset(&cmd, 0, sizeof(cmd)); 1794 cmd.nvme_cmd.opc = 0xc1; 1795 cmd.nvme_cmd.nsid = 0; 1796 memset(&rsp, 0, sizeof(rsp)); 1797 1798 spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr); 1799 1800 /* Ensure that our hdlr is being called */ 1801 rc = nvmf_ctrlr_process_admin_cmd(&req); 1802 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1803 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1804 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1805 } 1806 1807 static void 1808 test_fused_compare_and_write(void) 1809 { 1810 struct spdk_nvmf_request req = {}; 1811 struct spdk_nvmf_qpair qpair = {}; 1812 struct spdk_nvme_cmd cmd = {}; 1813 union nvmf_c2h_msg rsp = {}; 1814 struct spdk_nvmf_ctrlr ctrlr = {}; 1815 struct spdk_nvmf_subsystem subsystem = {}; 1816 struct spdk_nvmf_ns ns = {}; 1817 struct spdk_nvmf_ns *subsys_ns[1] = {}; 1818 enum spdk_nvme_ana_state ana_state[1]; 1819 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1820 struct spdk_bdev bdev = {}; 1821 1822 struct spdk_nvmf_poll_group group = {}; 1823 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1824 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 1825 struct spdk_io_channel io_ch = {}; 1826 1827 ns.bdev = &bdev; 1828 ns.anagrpid = 1; 1829 1830 subsystem.id = 0; 1831 subsystem.max_nsid = 1; 1832 subsys_ns[0] = &ns; 1833 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 1834 1835 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 1836 1837 /* Enable controller */ 1838 ctrlr.vcprop.cc.bits.en = 1; 1839 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 1840 ctrlr.listener = &listener; 1841 1842 group.num_sgroups = 1; 1843 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1844 sgroups.num_ns = 1; 1845 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1846 ns_info.channel = &io_ch; 1847 sgroups.ns_info = &ns_info; 1848 TAILQ_INIT(&sgroups.queued); 1849 group.sgroups = &sgroups; 1850 TAILQ_INIT(&qpair.outstanding); 1851 1852 qpair.ctrlr = &ctrlr; 1853 qpair.group = &group; 1854 qpair.qid = 1; 1855 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1856 1857 cmd.nsid = 1; 1858 1859 req.qpair = &qpair; 1860 req.cmd = (union nvmf_h2c_msg *)&cmd; 1861 req.rsp = &rsp; 1862 1863 /* SUCCESS/SUCCESS */ 1864 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1865 cmd.opc = SPDK_NVME_OPC_COMPARE; 1866 1867 spdk_nvmf_request_exec(&req); 1868 CU_ASSERT(qpair.first_fused_req != NULL); 1869 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1870 1871 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1872 cmd.opc = SPDK_NVME_OPC_WRITE; 1873 1874 spdk_nvmf_request_exec(&req); 1875 CU_ASSERT(qpair.first_fused_req == NULL); 1876 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1877 1878 /* Wrong sequence */ 1879 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1880 cmd.opc = SPDK_NVME_OPC_WRITE; 1881 1882 spdk_nvmf_request_exec(&req); 1883 CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status)); 1884 CU_ASSERT(qpair.first_fused_req == NULL); 1885 1886 /* Write as FUSE_FIRST (Wrong op code) */ 1887 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1888 cmd.opc = SPDK_NVME_OPC_WRITE; 1889 1890 spdk_nvmf_request_exec(&req); 1891 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1892 CU_ASSERT(qpair.first_fused_req == NULL); 1893 1894 /* Compare as FUSE_SECOND (Wrong op code) */ 1895 cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 1896 cmd.opc = SPDK_NVME_OPC_COMPARE; 1897 1898 spdk_nvmf_request_exec(&req); 1899 CU_ASSERT(qpair.first_fused_req != NULL); 1900 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 1901 1902 cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 1903 cmd.opc = SPDK_NVME_OPC_COMPARE; 1904 1905 spdk_nvmf_request_exec(&req); 1906 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1907 CU_ASSERT(qpair.first_fused_req == NULL); 1908 } 1909 1910 static void 1911 test_multi_async_event_reqs(void) 1912 { 1913 struct spdk_nvmf_subsystem subsystem = {}; 1914 struct spdk_nvmf_qpair qpair = {}; 1915 struct spdk_nvmf_ctrlr ctrlr = {}; 1916 struct spdk_nvmf_request req[5] = {}; 1917 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 1918 struct spdk_nvmf_ns ns = {}; 1919 union nvmf_h2c_msg cmd[5] = {}; 1920 union nvmf_c2h_msg rsp[5] = {}; 1921 1922 struct spdk_nvmf_poll_group group = {}; 1923 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 1924 1925 int i; 1926 1927 ns_ptrs[0] = &ns; 1928 subsystem.ns = ns_ptrs; 1929 subsystem.max_nsid = 1; 1930 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 1931 1932 ns.opts.nsid = 1; 1933 group.sgroups = &sgroups; 1934 1935 qpair.ctrlr = &ctrlr; 1936 qpair.group = &group; 1937 TAILQ_INIT(&qpair.outstanding); 1938 1939 ctrlr.subsys = &subsystem; 1940 ctrlr.vcprop.cc.bits.en = 1; 1941 ctrlr.thread = spdk_get_thread(); 1942 1943 for (i = 0; i < 5; i++) { 1944 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 1945 cmd[i].nvme_cmd.nsid = 1; 1946 cmd[i].nvme_cmd.cid = i; 1947 1948 req[i].qpair = &qpair; 1949 req[i].cmd = &cmd[i]; 1950 req[i].rsp = &rsp[i]; 1951 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 1952 } 1953 1954 /* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */ 1955 sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS; 1956 for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) { 1957 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 1958 CU_ASSERT(ctrlr.nr_aer_reqs == i + 1); 1959 } 1960 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 1961 1962 /* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */ 1963 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 1964 CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS); 1965 CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC); 1966 CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED); 1967 1968 /* Test if the aer_reqs keep continuous when abort a req in the middle */ 1969 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true); 1970 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1971 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1972 CU_ASSERT(ctrlr.aer_req[2] == &req[3]); 1973 1974 CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true); 1975 CU_ASSERT(ctrlr.aer_req[0] == &req[0]); 1976 CU_ASSERT(ctrlr.aer_req[1] == &req[1]); 1977 CU_ASSERT(ctrlr.aer_req[2] == NULL); 1978 CU_ASSERT(ctrlr.nr_aer_reqs == 2); 1979 1980 TAILQ_REMOVE(&qpair.outstanding, &req[0], link); 1981 TAILQ_REMOVE(&qpair.outstanding, &req[1], link); 1982 } 1983 1984 static void 1985 test_get_ana_log_page_one_ns_per_anagrp(void) 1986 { 1987 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t)) 1988 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + 3 * UT_ANA_DESC_SIZE) 1989 uint32_t ana_group[3]; 1990 struct spdk_nvmf_subsystem subsystem = { .ana_group = ana_group }; 1991 struct spdk_nvmf_ctrlr ctrlr = {}; 1992 enum spdk_nvme_ana_state ana_state[3]; 1993 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 1994 struct spdk_nvmf_ns ns[3]; 1995 struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], &ns[1], &ns[2]}; 1996 uint64_t offset; 1997 uint32_t length; 1998 int i; 1999 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2000 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2001 struct iovec iov, iovs[2]; 2002 struct spdk_nvme_ana_page *ana_hdr; 2003 char _ana_desc[UT_ANA_DESC_SIZE]; 2004 struct spdk_nvme_ana_group_descriptor *ana_desc; 2005 2006 subsystem.ns = ns_arr; 2007 subsystem.max_nsid = 3; 2008 for (i = 0; i < 3; i++) { 2009 subsystem.ana_group[i] = 1; 2010 } 2011 ctrlr.subsys = &subsystem; 2012 ctrlr.listener = &listener; 2013 2014 for (i = 0; i < 3; i++) { 2015 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2016 } 2017 2018 for (i = 0; i < 3; i++) { 2019 ns_arr[i]->nsid = i + 1; 2020 ns_arr[i]->anagrpid = i + 1; 2021 } 2022 2023 /* create expected page */ 2024 ana_hdr = (void *)&expected_page[0]; 2025 ana_hdr->num_ana_group_desc = 3; 2026 ana_hdr->change_count = 0; 2027 2028 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2029 ana_desc = (void *)_ana_desc; 2030 offset = sizeof(struct spdk_nvme_ana_page); 2031 2032 for (i = 0; i < 3; i++) { 2033 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 2034 ana_desc->ana_group_id = ns_arr[i]->nsid; 2035 ana_desc->num_of_nsid = 1; 2036 ana_desc->change_count = 0; 2037 ana_desc->ana_state = ctrlr.listener->ana_state[i]; 2038 ana_desc->nsid[0] = ns_arr[i]->nsid; 2039 memcpy(&expected_page[offset], ana_desc, UT_ANA_DESC_SIZE); 2040 offset += UT_ANA_DESC_SIZE; 2041 } 2042 2043 /* read entire actual log page */ 2044 offset = 0; 2045 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2046 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2047 iov.iov_base = &actual_page[offset]; 2048 iov.iov_len = length; 2049 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2050 offset += length; 2051 } 2052 2053 /* compare expected page and actual page */ 2054 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2055 2056 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2057 offset = 0; 2058 iovs[0].iov_base = &actual_page[offset]; 2059 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2060 offset += UT_ANA_LOG_PAGE_SIZE - UT_ANA_DESC_SIZE + 4; 2061 iovs[1].iov_base = &actual_page[offset]; 2062 iovs[1].iov_len = UT_ANA_LOG_PAGE_SIZE - offset; 2063 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2064 2065 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2066 2067 #undef UT_ANA_DESC_SIZE 2068 #undef UT_ANA_LOG_PAGE_SIZE 2069 } 2070 2071 static void 2072 test_get_ana_log_page_multi_ns_per_anagrp(void) 2073 { 2074 #define UT_ANA_LOG_PAGE_SIZE (sizeof(struct spdk_nvme_ana_page) + \ 2075 sizeof(struct spdk_nvme_ana_group_descriptor) * 2 + \ 2076 sizeof(uint32_t) * 5) 2077 struct spdk_nvmf_ns ns[5]; 2078 struct spdk_nvmf_ns *ns_arr[5] = {&ns[0], &ns[1], &ns[2], &ns[3], &ns[4]}; 2079 uint32_t ana_group[5] = {0}; 2080 struct spdk_nvmf_subsystem subsystem = { .ns = ns_arr, .ana_group = ana_group, }; 2081 enum spdk_nvme_ana_state ana_state[5]; 2082 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state, }; 2083 struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .listener = &listener, }; 2084 char expected_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2085 char actual_page[UT_ANA_LOG_PAGE_SIZE] = {0}; 2086 struct iovec iov, iovs[2]; 2087 struct spdk_nvme_ana_page *ana_hdr; 2088 char _ana_desc[UT_ANA_LOG_PAGE_SIZE]; 2089 struct spdk_nvme_ana_group_descriptor *ana_desc; 2090 uint64_t offset; 2091 uint32_t length; 2092 int i; 2093 2094 subsystem.max_nsid = 5; 2095 subsystem.ana_group[1] = 3; 2096 subsystem.ana_group[2] = 2; 2097 for (i = 0; i < 5; i++) { 2098 listener.ana_state[i] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2099 } 2100 2101 for (i = 0; i < 5; i++) { 2102 ns_arr[i]->nsid = i + 1; 2103 } 2104 ns_arr[0]->anagrpid = 2; 2105 ns_arr[1]->anagrpid = 3; 2106 ns_arr[2]->anagrpid = 2; 2107 ns_arr[3]->anagrpid = 3; 2108 ns_arr[4]->anagrpid = 2; 2109 2110 /* create expected page */ 2111 ana_hdr = (void *)&expected_page[0]; 2112 ana_hdr->num_ana_group_desc = 2; 2113 ana_hdr->change_count = 0; 2114 2115 /* descriptor may be unaligned. So create data and then copy it to the location. */ 2116 ana_desc = (void *)_ana_desc; 2117 offset = sizeof(struct spdk_nvme_ana_page); 2118 2119 memset(_ana_desc, 0, sizeof(_ana_desc)); 2120 ana_desc->ana_group_id = 2; 2121 ana_desc->num_of_nsid = 3; 2122 ana_desc->change_count = 0; 2123 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2124 ana_desc->nsid[0] = 1; 2125 ana_desc->nsid[1] = 3; 2126 ana_desc->nsid[2] = 5; 2127 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2128 sizeof(uint32_t) * 3); 2129 offset += sizeof(struct spdk_nvme_ana_group_descriptor) + sizeof(uint32_t) * 3; 2130 2131 memset(_ana_desc, 0, sizeof(_ana_desc)); 2132 ana_desc->ana_group_id = 3; 2133 ana_desc->num_of_nsid = 2; 2134 ana_desc->change_count = 0; 2135 ana_desc->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2136 ana_desc->nsid[0] = 2; 2137 ana_desc->nsid[1] = 4; 2138 memcpy(&expected_page[offset], ana_desc, sizeof(struct spdk_nvme_ana_group_descriptor) + 2139 sizeof(uint32_t) * 2); 2140 2141 /* read entire actual log page, and compare expected page and actual page. */ 2142 offset = 0; 2143 while (offset < UT_ANA_LOG_PAGE_SIZE) { 2144 length = spdk_min(16, UT_ANA_LOG_PAGE_SIZE - offset); 2145 iov.iov_base = &actual_page[offset]; 2146 iov.iov_len = length; 2147 nvmf_get_ana_log_page(&ctrlr, &iov, 1, offset, length, 0); 2148 offset += length; 2149 } 2150 2151 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2152 2153 memset(&actual_page[0], 0, UT_ANA_LOG_PAGE_SIZE); 2154 offset = 0; 2155 iovs[0].iov_base = &actual_page[offset]; 2156 iovs[0].iov_len = UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2157 offset += UT_ANA_LOG_PAGE_SIZE - sizeof(uint32_t) * 5; 2158 iovs[1].iov_base = &actual_page[offset]; 2159 iovs[1].iov_len = sizeof(uint32_t) * 5; 2160 nvmf_get_ana_log_page(&ctrlr, &iovs[0], 2, 0, UT_ANA_LOG_PAGE_SIZE, 0); 2161 2162 CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0); 2163 2164 #undef UT_ANA_LOG_PAGE_SIZE 2165 } 2166 static void 2167 test_multi_async_events(void) 2168 { 2169 struct spdk_nvmf_subsystem subsystem = {}; 2170 struct spdk_nvmf_qpair qpair = {}; 2171 struct spdk_nvmf_ctrlr ctrlr = {}; 2172 struct spdk_nvmf_request req[4] = {}; 2173 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2174 struct spdk_nvmf_ns ns = {}; 2175 union nvmf_h2c_msg cmd[4] = {}; 2176 union nvmf_c2h_msg rsp[4] = {}; 2177 union spdk_nvme_async_event_completion event = {}; 2178 struct spdk_nvmf_poll_group group = {}; 2179 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2180 int i; 2181 2182 ns_ptrs[0] = &ns; 2183 subsystem.ns = ns_ptrs; 2184 subsystem.max_nsid = 1; 2185 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2186 2187 ns.opts.nsid = 1; 2188 group.sgroups = &sgroups; 2189 2190 qpair.ctrlr = &ctrlr; 2191 qpair.group = &group; 2192 TAILQ_INIT(&qpair.outstanding); 2193 2194 ctrlr.subsys = &subsystem; 2195 ctrlr.vcprop.cc.bits.en = 1; 2196 ctrlr.thread = spdk_get_thread(); 2197 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2198 ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1; 2199 ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1; 2200 init_pending_async_events(&ctrlr); 2201 2202 /* Target queue pending events when there is no outstanding AER request */ 2203 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2204 nvmf_ctrlr_async_event_ana_change_notice(&ctrlr); 2205 nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr); 2206 2207 for (i = 0; i < 4; i++) { 2208 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2209 cmd[i].nvme_cmd.nsid = 1; 2210 cmd[i].nvme_cmd.cid = i; 2211 2212 req[i].qpair = &qpair; 2213 req[i].cmd = &cmd[i]; 2214 req[i].rsp = &rsp[i]; 2215 2216 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link); 2217 2218 sgroups.mgmt_io_outstanding = 1; 2219 if (i < 3) { 2220 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2221 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2222 CU_ASSERT(ctrlr.nr_aer_reqs == 0); 2223 } else { 2224 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 2225 CU_ASSERT(sgroups.mgmt_io_outstanding == 0); 2226 CU_ASSERT(ctrlr.nr_aer_reqs == 1); 2227 } 2228 } 2229 2230 event.raw = rsp[0].nvme_cpl.cdw0; 2231 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2232 event.raw = rsp[1].nvme_cpl.cdw0; 2233 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE); 2234 event.raw = rsp[2].nvme_cpl.cdw0; 2235 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE); 2236 2237 cleanup_pending_async_events(&ctrlr); 2238 } 2239 2240 static void 2241 test_rae(void) 2242 { 2243 struct spdk_nvmf_subsystem subsystem = {}; 2244 struct spdk_nvmf_qpair qpair = {}; 2245 struct spdk_nvmf_ctrlr ctrlr = {}; 2246 struct spdk_nvmf_request req[3] = {}; 2247 struct spdk_nvmf_ns *ns_ptrs[1] = {}; 2248 struct spdk_nvmf_ns ns = {}; 2249 union nvmf_h2c_msg cmd[3] = {}; 2250 union nvmf_c2h_msg rsp[3] = {}; 2251 union spdk_nvme_async_event_completion event = {}; 2252 struct spdk_nvmf_poll_group group = {}; 2253 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2254 int i; 2255 char data[4096]; 2256 2257 ns_ptrs[0] = &ns; 2258 subsystem.ns = ns_ptrs; 2259 subsystem.max_nsid = 1; 2260 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2261 2262 ns.opts.nsid = 1; 2263 group.sgroups = &sgroups; 2264 2265 qpair.ctrlr = &ctrlr; 2266 qpair.group = &group; 2267 TAILQ_INIT(&qpair.outstanding); 2268 2269 ctrlr.subsys = &subsystem; 2270 ctrlr.vcprop.cc.bits.en = 1; 2271 ctrlr.thread = spdk_get_thread(); 2272 ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1; 2273 init_pending_async_events(&ctrlr); 2274 2275 /* Target queue pending events when there is no outstanding AER request */ 2276 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2277 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2278 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2279 /* only one event will be queued before RAE is clear */ 2280 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2281 2282 req[0].qpair = &qpair; 2283 req[0].cmd = &cmd[0]; 2284 req[0].rsp = &rsp[0]; 2285 cmd[0].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 2286 cmd[0].nvme_cmd.nsid = 1; 2287 cmd[0].nvme_cmd.cid = 0; 2288 2289 for (i = 1; i < 3; i++) { 2290 req[i].qpair = &qpair; 2291 req[i].cmd = &cmd[i]; 2292 req[i].rsp = &rsp[i]; 2293 req[i].data = &data; 2294 req[i].length = sizeof(data); 2295 spdk_iov_one(req[i].iov, &req[i].iovcnt, &data, req[i].length); 2296 2297 cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE; 2298 cmd[i].nvme_cmd.cdw10_bits.get_log_page.lid = 2299 SPDK_NVME_LOG_CHANGED_NS_LIST; 2300 cmd[i].nvme_cmd.cdw10_bits.get_log_page.numdl = 2301 spdk_nvme_bytes_to_numd(req[i].length); 2302 cmd[i].nvme_cmd.cid = i; 2303 } 2304 cmd[1].nvme_cmd.cdw10_bits.get_log_page.rae = 1; 2305 cmd[2].nvme_cmd.cdw10_bits.get_log_page.rae = 0; 2306 2307 /* consume the pending event */ 2308 TAILQ_INSERT_TAIL(&qpair.outstanding, &req[0], link); 2309 CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[0]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2310 event.raw = rsp[0].nvme_cpl.cdw0; 2311 CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED); 2312 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2313 2314 /* get log with RAE set */ 2315 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[1]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2316 CU_ASSERT(rsp[1].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2317 CU_ASSERT(rsp[1].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2318 2319 /* will not generate new event until RAE is clear */ 2320 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2321 CU_ASSERT(num_pending_async_events(&ctrlr) == 0); 2322 2323 /* get log with RAE clear */ 2324 CU_ASSERT(nvmf_ctrlr_get_log_page(&req[2]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2325 CU_ASSERT(rsp[2].nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2326 CU_ASSERT(rsp[2].nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 2327 2328 nvmf_ctrlr_async_event_ns_notice(&ctrlr); 2329 CU_ASSERT(num_pending_async_events(&ctrlr) == 1); 2330 2331 cleanup_pending_async_events(&ctrlr); 2332 } 2333 2334 static void 2335 test_nvmf_ctrlr_create_destruct(void) 2336 { 2337 struct spdk_nvmf_fabric_connect_data connect_data = {}; 2338 struct spdk_nvmf_poll_group group = {}; 2339 struct spdk_nvmf_subsystem_poll_group sgroups[2] = {}; 2340 struct spdk_nvmf_transport transport = {}; 2341 struct spdk_nvmf_transport_ops tops = {}; 2342 struct spdk_nvmf_subsystem subsystem = {}; 2343 struct spdk_nvmf_request req = {}; 2344 struct spdk_nvmf_qpair qpair = {}; 2345 struct spdk_nvmf_ctrlr *ctrlr = NULL; 2346 struct spdk_nvmf_tgt tgt = {}; 2347 union nvmf_h2c_msg cmd = {}; 2348 union nvmf_c2h_msg rsp = {}; 2349 const uint8_t hostid[16] = { 2350 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2351 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F 2352 }; 2353 const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 2354 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 2355 2356 group.thread = spdk_get_thread(); 2357 transport.ops = &tops; 2358 transport.opts.max_aq_depth = 32; 2359 transport.opts.max_queue_depth = 64; 2360 transport.opts.max_qpairs_per_ctrlr = 3; 2361 transport.opts.dif_insert_or_strip = true; 2362 transport.tgt = &tgt; 2363 qpair.transport = &transport; 2364 qpair.group = &group; 2365 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2366 TAILQ_INIT(&qpair.outstanding); 2367 2368 memcpy(connect_data.hostid, hostid, sizeof(hostid)); 2369 connect_data.cntlid = 0xFFFF; 2370 snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn); 2371 snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn); 2372 2373 subsystem.thread = spdk_get_thread(); 2374 subsystem.id = 1; 2375 TAILQ_INIT(&subsystem.ctrlrs); 2376 subsystem.tgt = &tgt; 2377 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2378 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2379 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 2380 2381 group.sgroups = sgroups; 2382 2383 cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2384 cmd.connect_cmd.cid = 1; 2385 cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 2386 cmd.connect_cmd.recfmt = 0; 2387 cmd.connect_cmd.qid = 0; 2388 cmd.connect_cmd.sqsize = 31; 2389 cmd.connect_cmd.cattr = 0; 2390 cmd.connect_cmd.kato = 120000; 2391 2392 req.qpair = &qpair; 2393 req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 2394 req.data = &connect_data; 2395 req.length = sizeof(connect_data); 2396 spdk_iov_one(req.iov, &req.iovcnt, &connect_data, req.length); 2397 req.cmd = &cmd; 2398 req.rsp = &rsp; 2399 2400 TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link); 2401 sgroups[subsystem.id].mgmt_io_outstanding++; 2402 2403 ctrlr = nvmf_ctrlr_create(&subsystem, &req, &req.cmd->connect_cmd, req.iov[0].iov_base); 2404 poll_threads(); 2405 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2406 CU_ASSERT(req.qpair->ctrlr == ctrlr); 2407 CU_ASSERT(ctrlr->subsys == &subsystem); 2408 CU_ASSERT(ctrlr->thread == req.qpair->group->thread); 2409 CU_ASSERT(ctrlr->disconnect_in_progress == false); 2410 CU_ASSERT(ctrlr->qpair_mask != NULL); 2411 CU_ASSERT(ctrlr->feat.keep_alive_timer.bits.kato == 120000); 2412 CU_ASSERT(ctrlr->feat.async_event_configuration.bits.ns_attr_notice == 1); 2413 CU_ASSERT(ctrlr->feat.volatile_write_cache.bits.wce == 1); 2414 CU_ASSERT(ctrlr->feat.number_of_queues.bits.ncqr == 1); 2415 CU_ASSERT(ctrlr->feat.number_of_queues.bits.nsqr == 1); 2416 CU_ASSERT(!strncmp((void *)&ctrlr->hostid, hostid, 16)); 2417 CU_ASSERT(ctrlr->vcprop.cap.bits.cqr == 1); 2418 CU_ASSERT(ctrlr->vcprop.cap.bits.mqes == 63); 2419 CU_ASSERT(ctrlr->vcprop.cap.bits.ams == 0); 2420 CU_ASSERT(ctrlr->vcprop.cap.bits.to == NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500); 2421 CU_ASSERT(ctrlr->vcprop.cap.bits.dstrd == 0); 2422 CU_ASSERT(ctrlr->vcprop.cap.bits.css == SPDK_NVME_CAP_CSS_NVM); 2423 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmin == 0); 2424 CU_ASSERT(ctrlr->vcprop.cap.bits.mpsmax == 0); 2425 CU_ASSERT(ctrlr->vcprop.vs.bits.mjr == 1); 2426 CU_ASSERT(ctrlr->vcprop.vs.bits.mnr == 3); 2427 CU_ASSERT(ctrlr->vcprop.vs.bits.ter == 0); 2428 CU_ASSERT(ctrlr->vcprop.cc.raw == 0); 2429 CU_ASSERT(ctrlr->vcprop.cc.bits.en == 0); 2430 CU_ASSERT(ctrlr->vcprop.csts.raw == 0); 2431 CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0); 2432 CU_ASSERT(ctrlr->dif_insert_or_strip == true); 2433 2434 ctrlr->in_destruct = true; 2435 nvmf_ctrlr_destruct(ctrlr); 2436 poll_threads(); 2437 CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs)); 2438 CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding)); 2439 } 2440 2441 static void 2442 test_nvmf_ctrlr_use_zcopy(void) 2443 { 2444 struct spdk_nvmf_subsystem subsystem = {}; 2445 struct spdk_nvmf_transport transport = {}; 2446 struct spdk_nvmf_request req = {}; 2447 struct spdk_nvmf_qpair qpair = {}; 2448 struct spdk_nvmf_ctrlr ctrlr = {}; 2449 union nvmf_h2c_msg cmd = {}; 2450 struct spdk_nvmf_ns ns = {}; 2451 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2452 struct spdk_bdev bdev = {}; 2453 struct spdk_nvmf_poll_group group = {}; 2454 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2455 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2456 struct spdk_io_channel io_ch = {}; 2457 int opc; 2458 2459 subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME; 2460 ns.bdev = &bdev; 2461 2462 subsystem.id = 0; 2463 subsystem.max_nsid = 1; 2464 subsys_ns[0] = &ns; 2465 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2466 2467 ctrlr.subsys = &subsystem; 2468 2469 transport.opts.zcopy = true; 2470 2471 qpair.ctrlr = &ctrlr; 2472 qpair.group = &group; 2473 qpair.qid = 1; 2474 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2475 qpair.transport = &transport; 2476 2477 group.thread = spdk_get_thread(); 2478 group.num_sgroups = 1; 2479 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2480 sgroups.num_ns = 1; 2481 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2482 ns_info.channel = &io_ch; 2483 sgroups.ns_info = &ns_info; 2484 TAILQ_INIT(&sgroups.queued); 2485 group.sgroups = &sgroups; 2486 TAILQ_INIT(&qpair.outstanding); 2487 2488 req.qpair = &qpair; 2489 req.cmd = &cmd; 2490 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2491 2492 /* Admin queue */ 2493 qpair.qid = 0; 2494 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2495 qpair.qid = 1; 2496 2497 /* Invalid Opcodes */ 2498 for (opc = 0; opc <= 255; opc++) { 2499 cmd.nvme_cmd.opc = (enum spdk_nvme_nvm_opcode) opc; 2500 if ((cmd.nvme_cmd.opc != SPDK_NVME_OPC_READ) && 2501 (cmd.nvme_cmd.opc != SPDK_NVME_OPC_WRITE)) { 2502 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2503 } 2504 } 2505 cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE; 2506 2507 /* Fused WRITE */ 2508 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 2509 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2510 cmd.nvme_cmd.fuse = SPDK_NVME_CMD_FUSE_NONE; 2511 2512 /* Non bdev */ 2513 cmd.nvme_cmd.nsid = 4; 2514 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2515 cmd.nvme_cmd.nsid = 1; 2516 2517 /* ZCOPY Not supported */ 2518 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2519 ns.zcopy = true; 2520 2521 /* ZCOPY disabled on transport level */ 2522 transport.opts.zcopy = false; 2523 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false); 2524 transport.opts.zcopy = true; 2525 2526 /* Success */ 2527 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2528 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2529 } 2530 2531 static void 2532 qpair_state_change_done(void *cb_arg, int status) 2533 { 2534 } 2535 2536 static void 2537 test_spdk_nvmf_request_zcopy_start(void) 2538 { 2539 struct spdk_nvmf_request req = {}; 2540 struct spdk_nvmf_qpair qpair = {}; 2541 struct spdk_nvmf_transport transport = {}; 2542 struct spdk_nvme_cmd cmd = {}; 2543 union nvmf_c2h_msg rsp = {}; 2544 struct spdk_nvmf_ctrlr ctrlr = {}; 2545 struct spdk_nvmf_subsystem subsystem = {}; 2546 struct spdk_nvmf_ns ns = {}; 2547 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2548 enum spdk_nvme_ana_state ana_state[1]; 2549 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2550 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2551 2552 struct spdk_nvmf_poll_group group = {}; 2553 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2554 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2555 struct spdk_io_channel io_ch = {}; 2556 2557 ns.bdev = &bdev; 2558 ns.zcopy = true; 2559 ns.anagrpid = 1; 2560 2561 subsystem.id = 0; 2562 subsystem.max_nsid = 1; 2563 subsys_ns[0] = &ns; 2564 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2565 2566 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2567 2568 /* Enable controller */ 2569 ctrlr.vcprop.cc.bits.en = 1; 2570 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2571 ctrlr.listener = &listener; 2572 2573 transport.opts.zcopy = true; 2574 2575 group.thread = spdk_get_thread(); 2576 group.num_sgroups = 1; 2577 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2578 sgroups.num_ns = 1; 2579 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2580 ns_info.channel = &io_ch; 2581 sgroups.ns_info = &ns_info; 2582 TAILQ_INIT(&sgroups.queued); 2583 group.sgroups = &sgroups; 2584 TAILQ_INIT(&qpair.outstanding); 2585 2586 qpair.ctrlr = &ctrlr; 2587 qpair.group = &group; 2588 qpair.transport = &transport; 2589 qpair.qid = 1; 2590 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2591 2592 cmd.nsid = 1; 2593 2594 req.qpair = &qpair; 2595 req.cmd = (union nvmf_h2c_msg *)&cmd; 2596 req.rsp = &rsp; 2597 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2598 cmd.opc = SPDK_NVME_OPC_READ; 2599 2600 /* Fail because no controller */ 2601 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2602 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2603 qpair.ctrlr = NULL; 2604 spdk_nvmf_request_zcopy_start(&req); 2605 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2606 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2607 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR); 2608 qpair.ctrlr = &ctrlr; 2609 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2610 2611 /* Fail because bad NSID */ 2612 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2613 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2614 cmd.nsid = 0; 2615 spdk_nvmf_request_zcopy_start(&req); 2616 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2617 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2618 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2619 cmd.nsid = 1; 2620 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2621 2622 /* Fail because bad Channel */ 2623 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2624 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2625 ns_info.channel = NULL; 2626 spdk_nvmf_request_zcopy_start(&req); 2627 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2628 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 2629 CU_ASSERT_EQUAL(rsp.nvme_cpl.status.sc, SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT); 2630 ns_info.channel = &io_ch; 2631 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2632 2633 /* Queue the requet because NSID is not active */ 2634 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2635 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2636 ns_info.state = SPDK_NVMF_SUBSYSTEM_PAUSING; 2637 spdk_nvmf_request_zcopy_start(&req); 2638 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT); 2639 CU_ASSERT_EQUAL(TAILQ_FIRST(&sgroups.queued), &req); 2640 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2641 TAILQ_REMOVE(&sgroups.queued, &req, link); 2642 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2643 2644 /* Fail because QPair is not active */ 2645 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2646 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2647 qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING; 2648 qpair.state_cb = qpair_state_change_done; 2649 spdk_nvmf_request_zcopy_start(&req); 2650 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT_FAILED); 2651 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2652 qpair.state_cb = NULL; 2653 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2654 2655 /* Fail because nvmf_bdev_ctrlr_zcopy_start fails */ 2656 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2657 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2658 cmd.cdw10 = bdev.blockcnt; /* SLBA: CDW10 and CDW11 */ 2659 cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 2660 req.length = (cmd.cdw12 + 1) * bdev.blocklen; 2661 spdk_nvmf_request_zcopy_start(&req); 2662 CU_ASSERT_EQUAL(req.zcopy_phase, NVMF_ZCOPY_PHASE_INIT_FAILED); 2663 cmd.cdw10 = 0; 2664 cmd.cdw12 = 0; 2665 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 2666 2667 /* Success */ 2668 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2669 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2670 spdk_nvmf_request_zcopy_start(&req); 2671 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2672 } 2673 2674 static void 2675 test_zcopy_read(void) 2676 { 2677 struct spdk_nvmf_request req = {}; 2678 struct spdk_nvmf_qpair qpair = {}; 2679 struct spdk_nvmf_transport transport = {}; 2680 struct spdk_nvme_cmd cmd = {}; 2681 union nvmf_c2h_msg rsp = {}; 2682 struct spdk_nvmf_ctrlr ctrlr = {}; 2683 struct spdk_nvmf_subsystem subsystem = {}; 2684 struct spdk_nvmf_ns ns = {}; 2685 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2686 enum spdk_nvme_ana_state ana_state[1]; 2687 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2688 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2689 2690 struct spdk_nvmf_poll_group group = {}; 2691 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2692 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2693 struct spdk_io_channel io_ch = {}; 2694 2695 ns.bdev = &bdev; 2696 ns.zcopy = true; 2697 ns.anagrpid = 1; 2698 2699 subsystem.id = 0; 2700 subsystem.max_nsid = 1; 2701 subsys_ns[0] = &ns; 2702 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2703 2704 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2705 2706 /* Enable controller */ 2707 ctrlr.vcprop.cc.bits.en = 1; 2708 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2709 ctrlr.listener = &listener; 2710 2711 transport.opts.zcopy = true; 2712 2713 group.thread = spdk_get_thread(); 2714 group.num_sgroups = 1; 2715 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2716 sgroups.num_ns = 1; 2717 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2718 ns_info.channel = &io_ch; 2719 sgroups.ns_info = &ns_info; 2720 TAILQ_INIT(&sgroups.queued); 2721 group.sgroups = &sgroups; 2722 TAILQ_INIT(&qpair.outstanding); 2723 2724 qpair.ctrlr = &ctrlr; 2725 qpair.group = &group; 2726 qpair.transport = &transport; 2727 qpair.qid = 1; 2728 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2729 2730 cmd.nsid = 1; 2731 2732 req.qpair = &qpair; 2733 req.cmd = (union nvmf_h2c_msg *)&cmd; 2734 req.rsp = &rsp; 2735 cmd.opc = SPDK_NVME_OPC_READ; 2736 2737 /* Prepare for zcopy */ 2738 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2739 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2740 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2741 CU_ASSERT(ns_info.io_outstanding == 0); 2742 2743 /* Perform the zcopy start */ 2744 spdk_nvmf_request_zcopy_start(&req); 2745 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2746 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_read); 2747 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2748 CU_ASSERT(ns_info.io_outstanding == 1); 2749 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2750 2751 /* Perform the zcopy end */ 2752 spdk_nvmf_request_zcopy_end(&req, false); 2753 CU_ASSERT(req.zcopy_bdev_io == NULL); 2754 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2755 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2756 CU_ASSERT(ns_info.io_outstanding == 0); 2757 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2758 } 2759 2760 static void 2761 test_zcopy_write(void) 2762 { 2763 struct spdk_nvmf_request req = {}; 2764 struct spdk_nvmf_qpair qpair = {}; 2765 struct spdk_nvmf_transport transport = {}; 2766 struct spdk_nvme_cmd cmd = {}; 2767 union nvmf_c2h_msg rsp = {}; 2768 struct spdk_nvmf_ctrlr ctrlr = {}; 2769 struct spdk_nvmf_subsystem subsystem = {}; 2770 struct spdk_nvmf_ns ns = {}; 2771 struct spdk_nvmf_ns *subsys_ns[1] = {}; 2772 enum spdk_nvme_ana_state ana_state[1]; 2773 struct spdk_nvmf_subsystem_listener listener = { .ana_state = ana_state }; 2774 struct spdk_bdev bdev = { .blockcnt = 100, .blocklen = 512}; 2775 2776 struct spdk_nvmf_poll_group group = {}; 2777 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 2778 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 2779 struct spdk_io_channel io_ch = {}; 2780 2781 ns.bdev = &bdev; 2782 ns.zcopy = true; 2783 ns.anagrpid = 1; 2784 2785 subsystem.id = 0; 2786 subsystem.max_nsid = 1; 2787 subsys_ns[0] = &ns; 2788 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 2789 2790 listener.ana_state[0] = SPDK_NVME_ANA_OPTIMIZED_STATE; 2791 2792 /* Enable controller */ 2793 ctrlr.vcprop.cc.bits.en = 1; 2794 ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem; 2795 ctrlr.listener = &listener; 2796 2797 transport.opts.zcopy = true; 2798 2799 group.thread = spdk_get_thread(); 2800 group.num_sgroups = 1; 2801 sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2802 sgroups.num_ns = 1; 2803 ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 2804 ns_info.channel = &io_ch; 2805 sgroups.ns_info = &ns_info; 2806 TAILQ_INIT(&sgroups.queued); 2807 group.sgroups = &sgroups; 2808 TAILQ_INIT(&qpair.outstanding); 2809 2810 qpair.ctrlr = &ctrlr; 2811 qpair.group = &group; 2812 qpair.transport = &transport; 2813 qpair.qid = 1; 2814 qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 2815 2816 cmd.nsid = 1; 2817 2818 req.qpair = &qpair; 2819 req.cmd = (union nvmf_h2c_msg *)&cmd; 2820 req.rsp = &rsp; 2821 cmd.opc = SPDK_NVME_OPC_WRITE; 2822 2823 /* Prepare for zcopy */ 2824 CU_ASSERT(nvmf_ctrlr_use_zcopy(&req)); 2825 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_INIT); 2826 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2827 CU_ASSERT(ns_info.io_outstanding == 0); 2828 2829 /* Perform the zcopy start */ 2830 spdk_nvmf_request_zcopy_start(&req); 2831 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_EXECUTE); 2832 CU_ASSERT(req.zcopy_bdev_io == zcopy_start_bdev_io_write); 2833 CU_ASSERT(qpair.outstanding.tqh_first == &req); 2834 CU_ASSERT(ns_info.io_outstanding == 1); 2835 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2836 2837 /* Perform the zcopy end */ 2838 spdk_nvmf_request_zcopy_end(&req, true); 2839 CU_ASSERT(req.zcopy_bdev_io == NULL); 2840 CU_ASSERT(req.zcopy_phase == NVMF_ZCOPY_PHASE_COMPLETE); 2841 CU_ASSERT(qpair.outstanding.tqh_first == NULL); 2842 CU_ASSERT(ns_info.io_outstanding == 0); 2843 CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status)); 2844 } 2845 2846 static void 2847 test_nvmf_property_set(void) 2848 { 2849 int rc; 2850 struct spdk_nvmf_request req = {}; 2851 struct spdk_nvmf_qpair qpair = {}; 2852 struct spdk_nvmf_ctrlr ctrlr = {}; 2853 union nvmf_h2c_msg cmd = {}; 2854 union nvmf_c2h_msg rsp = {}; 2855 2856 req.qpair = &qpair; 2857 qpair.ctrlr = &ctrlr; 2858 req.cmd = &cmd; 2859 req.rsp = &rsp; 2860 2861 /* Invalid parameters */ 2862 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2863 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, vs); 2864 2865 rc = nvmf_property_set(&req); 2866 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2867 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2868 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2869 2870 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, intms); 2871 2872 rc = nvmf_property_get(&req); 2873 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2874 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 2875 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM); 2876 2877 /* Set cc with same property size */ 2878 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2879 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc); 2880 2881 rc = nvmf_property_set(&req); 2882 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2883 2884 /* Emulate cc data */ 2885 ctrlr.vcprop.cc.raw = 0xDEADBEEF; 2886 2887 rc = nvmf_property_get(&req); 2888 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2889 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDEADBEEF); 2890 2891 /* Set asq with different property size */ 2892 memset(req.rsp, 0, sizeof(union nvmf_c2h_msg)); 2893 cmd.prop_set_cmd.attrib.size = SPDK_NVMF_PROP_SIZE_4; 2894 cmd.prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, asq); 2895 2896 rc = nvmf_property_set(&req); 2897 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2898 2899 /* Emulate asq data */ 2900 ctrlr.vcprop.asq = 0xAADDADBEEF; 2901 2902 rc = nvmf_property_get(&req); 2903 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2904 CU_ASSERT(req.rsp->prop_get_rsp.value.u64 == 0xDDADBEEF); 2905 } 2906 2907 static void 2908 test_nvmf_ctrlr_get_features_host_behavior_support(void) 2909 { 2910 int rc; 2911 struct spdk_nvmf_request req = {}; 2912 struct spdk_nvmf_qpair qpair = {}; 2913 struct spdk_nvmf_ctrlr ctrlr = {}; 2914 struct spdk_nvme_host_behavior behavior = {}; 2915 union nvmf_h2c_msg cmd = {}; 2916 union nvmf_c2h_msg rsp = {}; 2917 2918 qpair.ctrlr = &ctrlr; 2919 req.qpair = &qpair; 2920 req.cmd = &cmd; 2921 req.rsp = &rsp; 2922 2923 /* Invalid data */ 2924 req.data = NULL; 2925 req.length = sizeof(struct spdk_nvme_host_behavior); 2926 req.iovcnt = 0; 2927 2928 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 2929 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2930 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2931 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2932 CU_ASSERT(req.data == NULL); 2933 2934 /* Wrong structure length */ 2935 req.data = &behavior; 2936 req.length = sizeof(struct spdk_nvme_host_behavior) - 1; 2937 spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length); 2938 2939 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 2940 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2941 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2942 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2943 2944 /* Get Features Host Behavior Support Success */ 2945 req.data = &behavior; 2946 req.length = sizeof(struct spdk_nvme_host_behavior); 2947 spdk_iov_one(req.iov, &req.iovcnt, &behavior, req.length); 2948 2949 ctrlr.acre_enabled = true; 2950 behavior.acre = false; 2951 2952 rc = nvmf_ctrlr_get_features_host_behavior_support(&req); 2953 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2954 CU_ASSERT(behavior.acre == true); 2955 } 2956 2957 static void 2958 test_nvmf_ctrlr_set_features_host_behavior_support(void) 2959 { 2960 int rc; 2961 struct spdk_nvmf_request req = {}; 2962 struct spdk_nvmf_qpair qpair = {}; 2963 struct spdk_nvmf_ctrlr ctrlr = {}; 2964 struct spdk_nvme_host_behavior host_behavior = {}; 2965 union nvmf_h2c_msg cmd = {}; 2966 union nvmf_c2h_msg rsp = {}; 2967 2968 qpair.ctrlr = &ctrlr; 2969 req.qpair = &qpair; 2970 req.cmd = &cmd; 2971 req.rsp = &rsp; 2972 req.iov[0].iov_base = &host_behavior; 2973 req.iov[0].iov_len = sizeof(host_behavior); 2974 2975 /* Invalid iovcnt */ 2976 req.iovcnt = 0; 2977 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2978 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2979 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 2980 2981 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 2982 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2983 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2984 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2985 2986 /* Invalid iov_len */ 2987 req.iovcnt = 1; 2988 req.iov[0].iov_len = 0; 2989 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 2990 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2991 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 2992 2993 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 2994 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 2995 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 2996 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 2997 2998 /* acre is false */ 2999 host_behavior.acre = 0; 3000 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3001 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3002 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3003 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3004 3005 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3006 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3007 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3008 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3009 CU_ASSERT(ctrlr.acre_enabled == false); 3010 3011 /* acre is true */ 3012 host_behavior.acre = 1; 3013 req.iov[0].iov_len = sizeof(struct spdk_nvme_host_behavior); 3014 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3015 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3016 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3017 3018 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3019 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3020 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3021 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 3022 CU_ASSERT(ctrlr.acre_enabled == true); 3023 3024 /* Invalid acre */ 3025 host_behavior.acre = 2; 3026 rc = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; 3027 req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3028 req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS; 3029 3030 rc = nvmf_ctrlr_set_features_host_behavior_support(&req); 3031 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 3032 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 3033 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 3034 } 3035 3036 int 3037 main(int argc, char **argv) 3038 { 3039 CU_pSuite suite = NULL; 3040 unsigned int num_failures; 3041 3042 CU_set_error_action(CUEA_ABORT); 3043 CU_initialize_registry(); 3044 3045 suite = CU_add_suite("nvmf", NULL, NULL); 3046 CU_ADD_TEST(suite, test_get_log_page); 3047 CU_ADD_TEST(suite, test_process_fabrics_cmd); 3048 CU_ADD_TEST(suite, test_connect); 3049 CU_ADD_TEST(suite, test_get_ns_id_desc_list); 3050 CU_ADD_TEST(suite, test_identify_ns); 3051 CU_ADD_TEST(suite, test_identify_ns_iocs_specific); 3052 CU_ADD_TEST(suite, test_reservation_write_exclusive); 3053 CU_ADD_TEST(suite, test_reservation_exclusive_access); 3054 CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs); 3055 CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs); 3056 CU_ADD_TEST(suite, test_reservation_notification_log_page); 3057 CU_ADD_TEST(suite, test_get_dif_ctx); 3058 CU_ADD_TEST(suite, test_set_get_features); 3059 CU_ADD_TEST(suite, test_identify_ctrlr); 3060 CU_ADD_TEST(suite, test_identify_ctrlr_iocs_specific); 3061 CU_ADD_TEST(suite, test_custom_admin_cmd); 3062 CU_ADD_TEST(suite, test_fused_compare_and_write); 3063 CU_ADD_TEST(suite, test_multi_async_event_reqs); 3064 CU_ADD_TEST(suite, test_get_ana_log_page_one_ns_per_anagrp); 3065 CU_ADD_TEST(suite, test_get_ana_log_page_multi_ns_per_anagrp); 3066 CU_ADD_TEST(suite, test_multi_async_events); 3067 CU_ADD_TEST(suite, test_rae); 3068 CU_ADD_TEST(suite, test_nvmf_ctrlr_create_destruct); 3069 CU_ADD_TEST(suite, test_nvmf_ctrlr_use_zcopy); 3070 CU_ADD_TEST(suite, test_spdk_nvmf_request_zcopy_start); 3071 CU_ADD_TEST(suite, test_zcopy_read); 3072 CU_ADD_TEST(suite, test_zcopy_write); 3073 CU_ADD_TEST(suite, test_nvmf_property_set); 3074 CU_ADD_TEST(suite, test_nvmf_ctrlr_get_features_host_behavior_support); 3075 CU_ADD_TEST(suite, test_nvmf_ctrlr_set_features_host_behavior_support); 3076 3077 allocate_threads(1); 3078 set_thread(0); 3079 3080 CU_basic_set_mode(CU_BRM_VERBOSE); 3081 CU_basic_run_tests(); 3082 num_failures = CU_get_number_of_failures(); 3083 CU_cleanup_registry(); 3084 3085 free_threads(); 3086 3087 return num_failures; 3088 } 3089