1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk_internal/cunit.h" 10 11 #include "spdk_internal/mock.h" 12 #include "thread/thread_internal.h" 13 14 #include "nvmf/ctrlr_bdev.c" 15 16 #include "spdk/bdev_module.h" 17 18 SPDK_LOG_REGISTER_COMPONENT(nvmf) 19 20 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1); 21 22 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test"); 23 24 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t, 25 (const struct spdk_bdev *bdev), 4096); 26 27 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0); 28 29 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc, 30 struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, 31 uint64_t offset_blocks, uint64_t num_blocks, 32 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 33 34 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int, 35 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 36 const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, 37 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 38 39 DEFINE_STUB(spdk_bdev_abort, int, 40 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 41 void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 42 43 DEFINE_STUB_V(spdk_bdev_io_get_iovec, 44 (struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)); 45 DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t, (const struct spdk_bdev *bdev), 1); 46 47 uint32_t 48 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev) 49 { 50 return bdev->optimal_io_boundary; 51 } 52 53 uint32_t 54 spdk_bdev_get_md_size(const struct spdk_bdev *bdev) 55 { 56 return bdev->md_len; 57 } 58 59 bool 60 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev) 61 { 62 return (bdev->md_len != 0) && bdev->md_interleave; 63 } 64 65 /* We have to use the typedef in the function declaration to appease astyle. */ 66 typedef enum spdk_dif_type spdk_dif_type_t; 67 68 spdk_dif_type_t 69 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev) 70 { 71 if (bdev->md_len != 0) { 72 return bdev->dif_type; 73 } else { 74 return SPDK_DIF_DISABLE; 75 } 76 } 77 78 bool 79 spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev) 80 { 81 if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) { 82 return bdev->dif_is_head_of_md; 83 } else { 84 return false; 85 } 86 } 87 88 uint32_t 89 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev) 90 { 91 if (spdk_bdev_is_md_interleaved(bdev)) { 92 return bdev->blocklen - bdev->md_len; 93 } else { 94 return bdev->blocklen; 95 } 96 } 97 98 uint16_t 99 spdk_bdev_get_acwu(const struct spdk_bdev *bdev) 100 { 101 return bdev->acwu; 102 } 103 104 uint32_t 105 spdk_bdev_get_block_size(const struct spdk_bdev *bdev) 106 { 107 return bdev->blocklen; 108 } 109 110 uint64_t 111 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev) 112 { 113 return bdev->blockcnt; 114 } 115 116 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int, 117 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 118 struct iovec *compare_iov, int compare_iovcnt, 119 struct iovec *write_iov, int write_iovcnt, 120 uint64_t offset_blocks, uint64_t num_blocks, 121 spdk_bdev_io_completion_cb cb, void *cb_arg), 122 0); 123 124 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0); 125 126 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io, 127 uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc)); 128 129 DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool, 130 (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false); 131 132 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, 133 (struct spdk_bdev_desc *desc), NULL); 134 135 DEFINE_STUB(spdk_bdev_flush_blocks, int, 136 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 uint64_t offset_blocks, uint64_t num_blocks, 138 spdk_bdev_io_completion_cb cb, void *cb_arg), 139 0); 140 141 DEFINE_STUB(spdk_bdev_unmap_blocks, int, 142 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 uint64_t offset_blocks, uint64_t num_blocks, 144 spdk_bdev_io_completion_cb cb, void *cb_arg), 145 0); 146 147 DEFINE_STUB(spdk_bdev_io_type_supported, bool, 148 (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false); 149 150 DEFINE_STUB(spdk_bdev_queue_io_wait, int, 151 (struct spdk_bdev *bdev, struct spdk_io_channel *ch, 152 struct spdk_bdev_io_wait_entry *entry), 153 0); 154 155 DEFINE_STUB(spdk_bdev_write_blocks, int, 156 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf, 157 uint64_t offset_blocks, uint64_t num_blocks, 158 spdk_bdev_io_completion_cb cb, void *cb_arg), 159 0); 160 161 DEFINE_STUB(spdk_bdev_writev_blocks, int, 162 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 163 struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks, 164 spdk_bdev_io_completion_cb cb, void *cb_arg), 165 0); 166 167 DEFINE_STUB(spdk_bdev_read_blocks, int, 168 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf, 169 uint64_t offset_blocks, uint64_t num_blocks, 170 spdk_bdev_io_completion_cb cb, void *cb_arg), 171 0); 172 173 DEFINE_STUB(spdk_bdev_readv_blocks, int, 174 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 175 struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks, 176 spdk_bdev_io_completion_cb cb, void *cb_arg), 177 0); 178 179 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int, 180 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 181 uint64_t offset_blocks, uint64_t num_blocks, 182 spdk_bdev_io_completion_cb cb, void *cb_arg), 183 0); 184 185 DEFINE_STUB(spdk_bdev_nvme_io_passthru, int, 186 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 187 const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, 188 spdk_bdev_io_completion_cb cb, void *cb_arg), 189 0); 190 191 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 192 193 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 194 (const struct spdk_nvmf_subsystem *subsystem), NULL); 195 196 DEFINE_STUB(spdk_bdev_zcopy_start, int, 197 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 198 struct iovec *iov, int iovcnt, 199 uint64_t offset_blocks, uint64_t num_blocks, 200 bool populate, 201 spdk_bdev_io_completion_cb cb, void *cb_arg), 202 0); 203 204 DEFINE_STUB(spdk_bdev_zcopy_end, int, 205 (struct spdk_bdev_io *bdev_io, bool commit, 206 spdk_bdev_io_completion_cb cb, void *cb_arg), 207 0); 208 209 DEFINE_STUB(spdk_bdev_copy_blocks, int, 210 (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 211 uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks, 212 spdk_bdev_io_completion_cb cb, void *cb_arg), 213 0); 214 215 DEFINE_STUB(spdk_bdev_get_max_copy, uint32_t, (const struct spdk_bdev *bdev), 0); 216 217 struct spdk_nvmf_ns * 218 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid) 219 { 220 abort(); 221 return NULL; 222 } 223 224 struct spdk_nvmf_ns * 225 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem) 226 { 227 abort(); 228 return NULL; 229 } 230 231 struct spdk_nvmf_ns * 232 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns) 233 { 234 abort(); 235 return NULL; 236 } 237 238 int 239 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size, 240 bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags, 241 uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag, 242 uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts) 243 { 244 ctx->dif_pi_format = opts->dif_pi_format; 245 ctx->block_size = block_size; 246 ctx->md_size = md_size; 247 ctx->init_ref_tag = init_ref_tag; 248 249 return 0; 250 } 251 252 static uint32_t g_bdev_nvme_status_cdw0; 253 static uint32_t g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC; 254 static uint32_t g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS; 255 256 static void 257 reset_bdev_nvme_status(void) 258 { 259 g_bdev_nvme_status_cdw0 = 0; 260 g_bdev_nvme_status_sct = SPDK_NVME_SCT_GENERIC; 261 g_bdev_nvme_status_sc = SPDK_NVME_SC_SUCCESS; 262 } 263 264 void 265 spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, 266 int *sc) 267 { 268 *cdw0 = g_bdev_nvme_status_cdw0; 269 *sct = g_bdev_nvme_status_sct; 270 *sc = g_bdev_nvme_status_sc; 271 } 272 273 static void 274 test_get_rw_params(void) 275 { 276 struct spdk_nvme_cmd cmd = {0}; 277 uint64_t lba; 278 uint64_t count; 279 280 lba = 0; 281 count = 0; 282 to_le64(&cmd.cdw10, 0x1234567890ABCDEF); 283 to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS); 284 nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count); 285 CU_ASSERT(lba == 0x1234567890ABCDEF); 286 CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */ 287 } 288 289 static void 290 test_lba_in_range(void) 291 { 292 /* Trivial cases (no overflow) */ 293 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true); 294 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true); 295 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false); 296 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true); 297 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false); 298 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true); 299 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false); 300 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false); 301 302 /* Overflow edge cases */ 303 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true); 304 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false); 305 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true); 306 CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false); 307 } 308 309 static void 310 test_get_dif_ctx(void) 311 { 312 struct spdk_bdev bdev = {}; 313 struct spdk_nvme_cmd cmd = {}; 314 struct spdk_dif_ctx dif_ctx = {}; 315 bool ret; 316 317 bdev.md_len = 0; 318 319 ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx); 320 CU_ASSERT(ret == false); 321 322 to_le64(&cmd.cdw10, 0x1234567890ABCDEF); 323 bdev.blocklen = 520; 324 bdev.md_len = 8; 325 326 ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx); 327 CU_ASSERT(ret == true); 328 CU_ASSERT(dif_ctx.block_size = 520); 329 CU_ASSERT(dif_ctx.md_size == 8); 330 CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF); 331 } 332 333 static void 334 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void) 335 { 336 int rc; 337 struct spdk_bdev bdev = {}; 338 struct spdk_bdev_desc *desc = NULL; 339 struct spdk_io_channel ch = {}; 340 341 struct spdk_nvmf_request cmp_req = {}; 342 union nvmf_c2h_msg cmp_rsp = {}; 343 344 struct spdk_nvmf_request write_req = {}; 345 union nvmf_c2h_msg write_rsp = {}; 346 347 struct spdk_nvmf_qpair qpair = {}; 348 349 struct spdk_nvme_cmd cmp_cmd = {}; 350 struct spdk_nvme_cmd write_cmd = {}; 351 352 struct spdk_nvmf_ctrlr ctrlr = {}; 353 struct spdk_nvmf_subsystem subsystem = {}; 354 struct spdk_nvmf_ns ns = {}; 355 struct spdk_nvmf_ns *subsys_ns[1] = {}; 356 357 struct spdk_nvmf_poll_group group = {}; 358 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 359 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 360 361 bdev.blocklen = 512; 362 bdev.blockcnt = 10; 363 ns.bdev = &bdev; 364 365 subsystem.id = 0; 366 subsystem.max_nsid = 1; 367 subsys_ns[0] = &ns; 368 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 369 370 /* Enable controller */ 371 ctrlr.vcprop.cc.bits.en = 1; 372 ctrlr.subsys = &subsystem; 373 374 group.num_sgroups = 1; 375 sgroups.num_ns = 1; 376 sgroups.ns_info = &ns_info; 377 group.sgroups = &sgroups; 378 379 qpair.ctrlr = &ctrlr; 380 qpair.group = &group; 381 382 cmp_req.qpair = &qpair; 383 cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd; 384 cmp_req.rsp = &cmp_rsp; 385 386 cmp_cmd.nsid = 1; 387 cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST; 388 cmp_cmd.opc = SPDK_NVME_OPC_COMPARE; 389 390 write_req.qpair = &qpair; 391 write_req.cmd = (union nvmf_h2c_msg *)&write_cmd; 392 write_req.rsp = &write_rsp; 393 394 write_cmd.nsid = 1; 395 write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND; 396 write_cmd.opc = SPDK_NVME_OPC_WRITE; 397 398 /* 1. SUCCESS */ 399 cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 400 cmp_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */ 401 402 write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 403 write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */ 404 write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen; 405 406 rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req); 407 408 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 409 CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0); 410 CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0); 411 CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0); 412 CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0); 413 414 /* 2. Fused command start lba / num blocks mismatch */ 415 cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 416 cmp_cmd.cdw12 = 2; /* NLB: CDW12 bits 15:00, 0's based */ 417 418 write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 419 write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */ 420 write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen; 421 422 rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req); 423 424 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 425 CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0); 426 CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0); 427 CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 428 CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 429 430 /* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */ 431 cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 432 cmp_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 433 434 write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 435 write_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 436 write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen; 437 438 rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req); 439 440 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 441 CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0); 442 CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0); 443 CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 444 CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE); 445 446 /* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */ 447 cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 448 cmp_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */ 449 450 write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 451 write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */ 452 write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1; 453 454 rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req); 455 456 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 457 CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0); 458 CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0); 459 CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 460 CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID); 461 } 462 463 static void 464 test_nvmf_bdev_ctrlr_identify_ns(void) 465 { 466 struct spdk_nvmf_ns ns = {}; 467 struct spdk_nvme_ns_data nsdata = {}; 468 struct spdk_bdev bdev = {}; 469 uint8_t ns_g_id[16] = "abcdefgh"; 470 uint8_t eui64[8] = "12345678"; 471 472 ns.bdev = &bdev; 473 ns.ptpl_file = (void *)0xDEADBEEF; 474 memcpy(ns.opts.nguid, ns_g_id, 16); 475 memcpy(ns.opts.eui64, eui64, 8); 476 477 bdev.blockcnt = 10; 478 bdev.acwu = 1; 479 bdev.md_len = 512; 480 bdev.dif_type = SPDK_DIF_TYPE1; 481 bdev.blocklen = 4096; 482 bdev.md_interleave = 0; 483 bdev.optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 484 bdev.dif_is_head_of_md = true; 485 486 nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false); 487 CU_ASSERT(nsdata.nsze == 10); 488 CU_ASSERT(nsdata.ncap == 10); 489 CU_ASSERT(nsdata.nuse == 10); 490 CU_ASSERT(nsdata.nlbaf == 0); 491 CU_ASSERT(nsdata.flbas.format == 0); 492 CU_ASSERT(nsdata.flbas.msb_format == 0); 493 CU_ASSERT(nsdata.nacwu == 0); 494 CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096)); 495 CU_ASSERT(nsdata.lbaf[0].ms == 512); 496 CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_DISABLE); 497 CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV); 498 CU_ASSERT(nsdata.nmic.can_share == 1); 499 CU_ASSERT(nsdata.nsrescap.rescap.persist == 1); 500 CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1); 501 CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1); 502 CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1); 503 CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1); 504 CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1); 505 CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1); 506 CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1); 507 CU_ASSERT(nsdata.flbas.extended == 1); 508 CU_ASSERT(nsdata.mc.extended == 1); 509 CU_ASSERT(nsdata.mc.pointer == 0); 510 CU_ASSERT(nsdata.dps.md_start == true); 511 CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16)); 512 CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8)); 513 514 memset(&nsdata, 0, sizeof(nsdata)); 515 nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, true); 516 CU_ASSERT(nsdata.nsze == 10); 517 CU_ASSERT(nsdata.ncap == 10); 518 CU_ASSERT(nsdata.nuse == 10); 519 CU_ASSERT(nsdata.nlbaf == 0); 520 CU_ASSERT(nsdata.flbas.format == 0); 521 CU_ASSERT(nsdata.flbas.msb_format == 0); 522 CU_ASSERT(nsdata.nacwu == 0); 523 CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096)); 524 CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV); 525 CU_ASSERT(nsdata.nmic.can_share == 1); 526 CU_ASSERT(nsdata.lbaf[0].ms == 0); 527 CU_ASSERT(nsdata.nsrescap.rescap.persist == 1); 528 CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1); 529 CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access == 1); 530 CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_reg_only == 1); 531 CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_reg_only == 1); 532 CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive_all_reg == 1); 533 CU_ASSERT(nsdata.nsrescap.rescap.exclusive_access_all_reg == 1); 534 CU_ASSERT(nsdata.nsrescap.rescap.ignore_existing_key == 1); 535 CU_ASSERT(!strncmp(nsdata.nguid, ns_g_id, 16)); 536 CU_ASSERT(!strncmp((uint8_t *)&nsdata.eui64, eui64, 8)); 537 } 538 539 static void 540 test_nvmf_bdev_ctrlr_zcopy_start(void) 541 { 542 int rc; 543 struct spdk_bdev bdev = {}; 544 struct spdk_bdev_desc *desc = NULL; 545 struct spdk_io_channel ch = {}; 546 547 struct spdk_nvmf_request write_req = {}; 548 union nvmf_c2h_msg write_rsp = {}; 549 550 struct spdk_nvmf_qpair qpair = {}; 551 552 struct spdk_nvme_cmd write_cmd = {}; 553 554 struct spdk_nvmf_ctrlr ctrlr = {}; 555 struct spdk_nvmf_subsystem subsystem = {}; 556 struct spdk_nvmf_ns ns = {}; 557 struct spdk_nvmf_ns *subsys_ns[1] = {}; 558 559 struct spdk_nvmf_poll_group group = {}; 560 struct spdk_nvmf_subsystem_poll_group sgroups = {}; 561 struct spdk_nvmf_subsystem_pg_ns_info ns_info = {}; 562 563 bdev.blocklen = 512; 564 bdev.blockcnt = 10; 565 ns.bdev = &bdev; 566 567 subsystem.id = 0; 568 subsystem.max_nsid = 1; 569 subsys_ns[0] = &ns; 570 subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns; 571 572 /* Enable controller */ 573 ctrlr.vcprop.cc.bits.en = 1; 574 ctrlr.subsys = &subsystem; 575 576 group.num_sgroups = 1; 577 sgroups.num_ns = 1; 578 sgroups.ns_info = &ns_info; 579 group.sgroups = &sgroups; 580 581 qpair.ctrlr = &ctrlr; 582 qpair.group = &group; 583 584 write_req.qpair = &qpair; 585 write_req.cmd = (union nvmf_h2c_msg *)&write_cmd; 586 write_req.rsp = &write_rsp; 587 588 write_cmd.nsid = 1; 589 write_cmd.opc = SPDK_NVME_OPC_WRITE; 590 591 /* 1. SUCCESS */ 592 write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 593 write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */ 594 write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen; 595 596 rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req); 597 598 CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 599 CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 600 CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS); 601 602 /* 2. SPDK_NVME_SC_LBA_OUT_OF_RANGE */ 603 write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 604 write_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */ 605 write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen; 606 607 rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req); 608 609 CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 610 CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 611 CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_LBA_OUT_OF_RANGE); 612 613 /* 3. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */ 614 write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 615 write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */ 616 write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1; 617 618 rc = nvmf_bdev_ctrlr_zcopy_start(&bdev, desc, &ch, &write_req); 619 620 CU_ASSERT_EQUAL(rc, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 621 CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sct, SPDK_NVME_SCT_GENERIC); 622 CU_ASSERT_EQUAL(write_rsp.nvme_cpl.status.sc, SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID); 623 } 624 625 static void 626 test_nvmf_bdev_ctrlr_cmd(void) 627 { 628 int rc; 629 struct spdk_bdev bdev = {}; 630 struct spdk_io_channel ch = {}; 631 struct spdk_nvmf_request req = {}; 632 struct spdk_nvmf_qpair qpair = {}; 633 union nvmf_h2c_msg cmd = {}; 634 union nvmf_c2h_msg rsp = {}; 635 struct spdk_nvme_scc_source_range range = {}; 636 637 req.cmd = &cmd; 638 req.rsp = &rsp; 639 req.qpair = &qpair; 640 req.length = 4096; 641 bdev.blocklen = 512; 642 bdev.blockcnt = 3; 643 cmd.nvme_cmd.cdw10 = 0; 644 cmd.nvme_cmd.cdw12 = 2; 645 646 /* Compare status asynchronous */ 647 rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req); 648 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 649 650 /* SLBA out of range */ 651 cmd.nvme_cmd.cdw10 = 3; 652 653 rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req); 654 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 655 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 656 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE); 657 658 /* SGL length invalid */ 659 cmd.nvme_cmd.cdw10 = 0; 660 req.length = 512; 661 memset(&rsp, 0, sizeof(rsp)); 662 663 rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req); 664 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 665 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 666 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID); 667 668 /* Device error */ 669 req.length = 4096; 670 memset(&rsp, 0, sizeof(rsp)); 671 MOCK_SET(spdk_bdev_comparev_blocks, -1); 672 673 rc = nvmf_bdev_ctrlr_compare_cmd(&bdev, NULL, &ch, &req); 674 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 675 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 676 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 677 678 /* bdev not support flush */ 679 MOCK_SET(spdk_bdev_io_type_supported, false); 680 memset(&rsp, 0, sizeof(rsp)); 681 682 rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req); 683 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 684 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 685 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 686 687 /* Flush error */ 688 MOCK_SET(spdk_bdev_io_type_supported, true); 689 MOCK_SET(spdk_bdev_flush_blocks, -1); 690 memset(&rsp, 0, sizeof(rsp)); 691 692 rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req); 693 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 694 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 695 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 696 697 /* Flush blocks status asynchronous */ 698 MOCK_SET(spdk_bdev_flush_blocks, 0); 699 700 rc = nvmf_bdev_ctrlr_flush_cmd(&bdev, NULL, &ch, &req); 701 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 702 MOCK_CLEAR(spdk_bdev_io_type_supported); 703 MOCK_CLEAR(spdk_bdev_flush_blocks); 704 705 /* Write zeroes blocks status asynchronous */ 706 rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req); 707 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 708 709 /* SLBA out of range */ 710 cmd.nvme_cmd.cdw10 = 3; 711 memset(&rsp, 0, sizeof(rsp)); 712 713 rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req); 714 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 715 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 716 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE); 717 718 /* Write block error */ 719 MOCK_SET(spdk_bdev_write_zeroes_blocks, -1); 720 cmd.nvme_cmd.cdw10 = 0; 721 memset(&rsp, 0, sizeof(rsp)); 722 723 rc = nvmf_bdev_ctrlr_write_zeroes_cmd(&bdev, NULL, &ch, &req); 724 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 725 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 726 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 727 728 /* Copy blocks status asynchronous */ 729 MOCK_SET(spdk_bdev_io_type_supported, true); 730 cmd.nvme_cmd.cdw10 = 1024; 731 cmd.nvme_cmd.cdw11 = 0; 732 cmd.nvme_cmd.cdw12 = 0; 733 cmd.nvme_cmd.cdw12_bits.copy.nr = 0; 734 range.slba = 512; 735 range.nlb = 511; 736 req.data = ⦥ 737 req.length = 32; 738 spdk_iov_one(req.iov, &req.iovcnt, &range, req.length); 739 rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req); 740 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 741 742 /* Copy command not supported */ 743 MOCK_SET(spdk_bdev_io_type_supported, false); 744 memset(&rsp, 0, sizeof(rsp)); 745 746 rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req); 747 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 748 749 MOCK_SET(spdk_bdev_io_type_supported, true); 750 751 /* Unsupported number of source ranges */ 752 cmd.nvme_cmd.cdw12_bits.copy.nr = 1; 753 req.length = 64; 754 memset(&rsp, 0, sizeof(rsp)); 755 756 rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req); 757 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 758 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC); 759 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED); 760 761 cmd.nvme_cmd.cdw12_bits.copy.nr = 0; 762 req.length = 32; 763 764 /* Unsupported source range descriptor format */ 765 cmd.nvme_cmd.cdw12_bits.copy.df = 1; 766 memset(&rsp, 0, sizeof(rsp)); 767 768 rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req); 769 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 770 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 771 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD); 772 773 cmd.nvme_cmd.cdw12_bits.copy.df = 0; 774 775 /* Bdev copy command failed */ 776 MOCK_SET(spdk_bdev_copy_blocks, -1); 777 memset(&rsp, 0, sizeof(rsp)); 778 779 rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req); 780 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 781 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 782 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 783 784 MOCK_CLEAR(spdk_bdev_copy_blocks); 785 MOCK_CLEAR(spdk_bdev_io_type_supported); 786 } 787 788 static void 789 test_nvmf_bdev_ctrlr_read_write_cmd(void) 790 { 791 struct spdk_bdev bdev = {}; 792 struct spdk_nvmf_request req = {}; 793 union nvmf_c2h_msg rsp = {}; 794 union nvmf_h2c_msg cmd = {}; 795 int rc; 796 797 req.cmd = &cmd; 798 req.rsp = &rsp; 799 800 /* Read two blocks, block size 4096 */ 801 cmd.nvme_cmd.cdw12 = 1; 802 bdev.blockcnt = 100; 803 bdev.blocklen = 4096; 804 req.length = 8192; 805 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 806 807 rc = nvmf_bdev_ctrlr_read_cmd(&bdev, NULL, NULL, &req); 808 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 809 810 /* Write two blocks, block size 4096 */ 811 cmd.nvme_cmd.cdw12 = 1; 812 bdev.blockcnt = 100; 813 bdev.blocklen = 4096; 814 req.length = 8192; 815 req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 816 817 rc = nvmf_bdev_ctrlr_write_cmd(&bdev, NULL, NULL, &req); 818 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 819 } 820 821 static void 822 test_nvmf_bdev_ctrlr_nvme_passthru(void) 823 { 824 int rc; 825 struct spdk_bdev bdev = {}; 826 struct spdk_bdev_desc *desc = NULL; 827 struct spdk_io_channel ch = {}; 828 struct spdk_nvmf_qpair qpair = {}; 829 struct spdk_nvmf_poll_group group = {}; 830 831 struct spdk_nvmf_request req = {}; 832 union nvmf_c2h_msg rsp = {}; 833 struct spdk_nvme_cmd cmd = {}; 834 struct spdk_bdev_io bdev_io; 835 836 bdev.blocklen = 512; 837 bdev.blockcnt = 10; 838 839 qpair.group = &group; 840 841 req.qpair = &qpair; 842 req.cmd = (union nvmf_h2c_msg *)&cmd; 843 req.rsp = &rsp; 844 spdk_iov_one(req.iov, &req.iovcnt, NULL, 0); 845 846 cmd.nsid = 1; 847 cmd.opc = 0xFF; 848 849 cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */ 850 cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */ 851 852 /* NVME_IO success */ 853 memset(&rsp, 0, sizeof(rsp)); 854 rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req); 855 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 856 nvmf_bdev_ctrlr_complete_cmd(&bdev_io, true, &req); 857 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 858 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 859 860 /* NVME_IO fail */ 861 memset(&rsp, 0, sizeof(rsp)); 862 rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req); 863 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 864 g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 865 nvmf_bdev_ctrlr_complete_cmd(&bdev_io, false, &req); 866 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 867 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 868 reset_bdev_nvme_status(); 869 870 /* NVME_IO not supported */ 871 memset(&rsp, 0, sizeof(rsp)); 872 MOCK_SET(spdk_bdev_nvme_io_passthru, -ENOTSUP); 873 rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req); 874 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 875 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 876 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 877 CU_ASSERT(rsp.nvme_cpl.status.dnr == 1); 878 879 /* NVME_IO no channel - queue IO */ 880 memset(&rsp, 0, sizeof(rsp)); 881 MOCK_SET(spdk_bdev_nvme_io_passthru, -ENOMEM); 882 rc = nvmf_bdev_ctrlr_nvme_passthru_io(&bdev, desc, &ch, &req); 883 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 884 CU_ASSERT(group.stat.pending_bdev_io == 1); 885 886 MOCK_SET(spdk_bdev_nvme_io_passthru, 0); 887 888 /* NVME_ADMIN success */ 889 memset(&rsp, 0, sizeof(rsp)); 890 rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL); 891 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 892 nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req); 893 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 894 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 895 896 /* NVME_ADMIN fail */ 897 memset(&rsp, 0, sizeof(rsp)); 898 rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL); 899 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 900 g_bdev_nvme_status_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 901 nvmf_bdev_ctrlr_complete_admin_cmd(&bdev_io, true, &req); 902 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 903 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 904 reset_bdev_nvme_status(); 905 906 /* NVME_ADMIN not supported */ 907 memset(&rsp, 0, sizeof(rsp)); 908 MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOTSUP); 909 rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL); 910 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); 911 CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 912 CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 913 CU_ASSERT(rsp.nvme_cpl.status.dnr == 1); 914 915 /* NVME_ADMIN no channel - queue IO */ 916 memset(&rsp, 0, sizeof(rsp)); 917 MOCK_SET(spdk_bdev_nvme_admin_passthru, -ENOMEM); 918 rc = spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(&bdev, desc, &ch, &req, NULL); 919 CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 920 CU_ASSERT(group.stat.pending_bdev_io == 2); 921 922 MOCK_SET(spdk_bdev_nvme_admin_passthru, 0); 923 } 924 925 int 926 main(int argc, char **argv) 927 { 928 CU_pSuite suite = NULL; 929 unsigned int num_failures; 930 931 CU_initialize_registry(); 932 933 suite = CU_add_suite("nvmf", NULL, NULL); 934 935 CU_ADD_TEST(suite, test_get_rw_params); 936 CU_ADD_TEST(suite, test_lba_in_range); 937 CU_ADD_TEST(suite, test_get_dif_ctx); 938 CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_identify_ns); 939 CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd); 940 CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_zcopy_start); 941 CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_cmd); 942 CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_read_write_cmd); 943 CU_ADD_TEST(suite, test_nvmf_bdev_ctrlr_nvme_passthru); 944 945 num_failures = spdk_ut_run_tests(argc, argv, NULL); 946 CU_cleanup_registry(); 947 return num_failures; 948 } 949