1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2015 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk_internal/cunit.h" 7 8 #include "nvme/nvme_ns_cmd.c" 9 #include "nvme/nvme.c" 10 11 #include "common/lib/test_env.c" 12 13 #define UT_SIZE_IOMS 128u 14 15 static struct nvme_driver _g_nvme_driver = { 16 .lock = PTHREAD_MUTEX_INITIALIZER, 17 }; 18 19 static struct nvme_request *g_request = NULL; 20 static uint32_t g_ctrlr_quirks; 21 22 DEFINE_STUB_V(nvme_io_msg_ctrlr_detach, (struct spdk_nvme_ctrlr *ctrlr)); 23 24 DEFINE_STUB_V(nvme_ctrlr_destruct_async, 25 (struct spdk_nvme_ctrlr *ctrlr, struct nvme_ctrlr_detach_ctx *ctx)); 26 27 DEFINE_STUB(nvme_ctrlr_destruct_poll_async, 28 int, 29 (struct spdk_nvme_ctrlr *ctrlr, struct nvme_ctrlr_detach_ctx *ctx), 30 0); 31 32 DEFINE_STUB(spdk_nvme_poll_group_process_completions, 33 int64_t, 34 (struct spdk_nvme_poll_group *group, uint32_t completions_per_qpair, 35 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 36 0); 37 38 DEFINE_STUB(spdk_nvme_qpair_process_completions, 39 int32_t, 40 (struct spdk_nvme_qpair *qpair, uint32_t max_completions), 41 0); 42 43 DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, 44 union spdk_nvme_csts_register, 45 (struct spdk_nvme_ctrlr *ctrlr), 46 {}); 47 48 DEFINE_STUB(spdk_pci_event_listen, int, (void), 1); 49 50 DEFINE_STUB(nvme_transport_ctrlr_destruct, 51 int, 52 (struct spdk_nvme_ctrlr *ctrlr), 53 0); 54 55 DEFINE_STUB(nvme_ctrlr_get_current_process, 56 struct spdk_nvme_ctrlr_process *, 57 (struct spdk_nvme_ctrlr *ctrlr), 58 (struct spdk_nvme_ctrlr_process *)(uintptr_t)0x1); 59 60 DEFINE_STUB(nvme_transport_ctrlr_scan_attached, 61 int, 62 (struct spdk_nvme_probe_ctx *probe_ctx), 63 0); 64 65 int 66 spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx) 67 { 68 return -1; 69 } 70 71 struct nvme_request_sgl_ctx { 72 struct iovec *iovs; 73 uint32_t iovcnt; 74 uint32_t cur_idx; 75 }; 76 77 static void 78 nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset) 79 { 80 struct nvme_request_sgl_ctx *ctx = (struct nvme_request_sgl_ctx *)cb_arg; 81 82 ctx->cur_idx = 0; 83 } 84 85 static int 86 nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length) 87 { 88 struct nvme_request_sgl_ctx *ctx = (struct nvme_request_sgl_ctx *)cb_arg; 89 90 *address = (void *)ctx->iovs[ctx->cur_idx].iov_base; 91 *length = ctx->iovs[ctx->cur_idx].iov_len; 92 93 ctx->cur_idx++; 94 95 return 0; 96 } 97 98 bool 99 spdk_nvme_transport_available_by_name(const char *transport_name) 100 { 101 return true; 102 } 103 104 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid, 105 const struct spdk_nvme_ctrlr_opts *opts, 106 void *devhandle) 107 { 108 return NULL; 109 } 110 111 void 112 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) 113 { 114 } 115 116 int 117 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle) 118 { 119 return 0; 120 } 121 122 int 123 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr) 124 { 125 return 0; 126 } 127 128 void 129 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove) 130 { 131 } 132 133 struct spdk_pci_addr 134 spdk_pci_device_get_addr(struct spdk_pci_device *pci_dev) 135 { 136 struct spdk_pci_addr pci_addr; 137 138 memset(&pci_addr, 0, sizeof(pci_addr)); 139 return pci_addr; 140 } 141 142 struct spdk_pci_id 143 spdk_pci_device_get_id(struct spdk_pci_device *pci_dev) 144 { 145 struct spdk_pci_id pci_id; 146 147 memset(&pci_id, 0xFF, sizeof(pci_id)); 148 149 return pci_id; 150 } 151 152 void 153 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 154 { 155 memset(opts, 0, sizeof(*opts)); 156 } 157 158 uint32_t 159 spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns) 160 { 161 return ns->sector_size; 162 } 163 164 uint32_t 165 spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns) 166 { 167 return ns->ctrlr->max_xfer_size; 168 } 169 170 int 171 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 172 { 173 g_request = req; 174 175 return 0; 176 } 177 178 void 179 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr) 180 { 181 return; 182 } 183 184 void 185 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr) 186 { 187 return; 188 } 189 190 int 191 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr) 192 { 193 return 0; 194 } 195 196 int 197 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, 198 bool direct_connect) 199 { 200 return 0; 201 } 202 203 static void 204 prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr, 205 struct spdk_nvme_qpair *qpair, 206 uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size, 207 uint32_t stripe_size, bool extended_lba) 208 { 209 uint32_t num_requests = 32; 210 uint32_t i; 211 212 memset(ctrlr, 0, sizeof(*ctrlr)); 213 ctrlr->quirks = g_ctrlr_quirks; 214 ctrlr->max_xfer_size = max_xfer_size; 215 /* 216 * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set 217 * so that we test the SGL splitting path. 218 */ 219 ctrlr->flags = 0; 220 ctrlr->min_page_size = 4096; 221 ctrlr->page_size = 4096; 222 memset(&ctrlr->opts, 0, sizeof(ctrlr->opts)); 223 memset(ns, 0, sizeof(*ns)); 224 ns->ctrlr = ctrlr; 225 ns->sector_size = sector_size; 226 ns->extended_lba_size = sector_size; 227 if (extended_lba) { 228 ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED; 229 ns->extended_lba_size += md_size; 230 } 231 ns->md_size = md_size; 232 ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size; 233 ns->sectors_per_max_io_no_md = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->sector_size; 234 if (ctrlr->quirks & NVME_QUIRK_MDTS_EXCLUDE_MD) { 235 ns->sectors_per_max_io = ns->sectors_per_max_io_no_md; 236 } 237 ns->sectors_per_stripe = stripe_size / ns->extended_lba_size; 238 239 memset(qpair, 0, sizeof(*qpair)); 240 qpair->ctrlr = ctrlr; 241 qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request)); 242 SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL); 243 244 for (i = 0; i < num_requests; i++) { 245 struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request); 246 247 req->qpair = qpair; 248 STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq); 249 } 250 251 g_request = NULL; 252 } 253 254 static void 255 cleanup_after_test(struct spdk_nvme_qpair *qpair) 256 { 257 free(qpair->req_buf); 258 g_ctrlr_quirks = 0; 259 } 260 261 static void 262 nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd, 263 uint64_t *lba, uint32_t *num_blocks) 264 { 265 *lba = *(const uint64_t *)&cmd->cdw10; 266 *num_blocks = (cmd->cdw12 & 0xFFFFu) + 1; 267 } 268 269 static void 270 split_test(void) 271 { 272 struct spdk_nvme_ns ns; 273 struct spdk_nvme_qpair qpair; 274 struct spdk_nvme_ctrlr ctrlr; 275 void *payload; 276 uint64_t lba, cmd_lba; 277 uint32_t lba_count, cmd_lba_count; 278 int rc; 279 280 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 281 payload = malloc(512); 282 lba = 0; 283 lba_count = 1; 284 285 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0); 286 287 CU_ASSERT(rc == 0); 288 SPDK_CU_ASSERT_FATAL(g_request != NULL); 289 290 CU_ASSERT(g_request->num_children == 0); 291 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count); 292 CU_ASSERT(cmd_lba == lba); 293 CU_ASSERT(cmd_lba_count == lba_count); 294 295 free(payload); 296 nvme_free_request(g_request); 297 cleanup_after_test(&qpair); 298 } 299 300 static void 301 split_test2(void) 302 { 303 struct spdk_nvme_ns ns; 304 struct spdk_nvme_ctrlr ctrlr; 305 struct spdk_nvme_qpair qpair; 306 struct nvme_request *child; 307 void *payload; 308 uint64_t lba, cmd_lba; 309 uint32_t lba_count, cmd_lba_count; 310 int rc; 311 312 /* 313 * Controller has max xfer of 128 KB (256 blocks). 314 * Submit an I/O of 256 KB starting at LBA 0, which should be split 315 * on the max I/O boundary into two I/Os of 128 KB. 316 */ 317 318 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 319 payload = malloc(256 * 1024); 320 lba = 0; 321 lba_count = (256 * 1024) / 512; 322 323 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0); 324 325 CU_ASSERT(rc == 0); 326 SPDK_CU_ASSERT_FATAL(g_request != NULL); 327 328 CU_ASSERT(g_request->num_children == 2); 329 330 child = TAILQ_FIRST(&g_request->children); 331 nvme_request_remove_child(g_request, child); 332 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); 333 CU_ASSERT(child->num_children == 0); 334 CU_ASSERT(child->payload_size == 128 * 1024); 335 CU_ASSERT(cmd_lba == 0); 336 CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */ 337 nvme_free_request(child); 338 339 child = TAILQ_FIRST(&g_request->children); 340 nvme_request_remove_child(g_request, child); 341 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); 342 CU_ASSERT(child->num_children == 0); 343 CU_ASSERT(child->payload_size == 128 * 1024); 344 CU_ASSERT(cmd_lba == 256); 345 CU_ASSERT(cmd_lba_count == 256); 346 nvme_free_request(child); 347 348 CU_ASSERT(TAILQ_EMPTY(&g_request->children)); 349 350 free(payload); 351 nvme_free_request(g_request); 352 cleanup_after_test(&qpair); 353 } 354 355 static void 356 split_test3(void) 357 { 358 struct spdk_nvme_ns ns; 359 struct spdk_nvme_ctrlr ctrlr; 360 struct spdk_nvme_qpair qpair; 361 struct nvme_request *child; 362 void *payload; 363 uint64_t lba, cmd_lba; 364 uint32_t lba_count, cmd_lba_count; 365 int rc; 366 367 /* 368 * Controller has max xfer of 128 KB (256 blocks). 369 * Submit an I/O of 256 KB starting at LBA 10, which should be split 370 * into two I/Os: 371 * 1) LBA = 10, count = 256 blocks 372 * 2) LBA = 266, count = 256 blocks 373 */ 374 375 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 376 payload = malloc(256 * 1024); 377 lba = 10; /* Start at an LBA that isn't aligned to the stripe size */ 378 lba_count = (256 * 1024) / 512; 379 380 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0); 381 382 CU_ASSERT(rc == 0); 383 SPDK_CU_ASSERT_FATAL(g_request != NULL); 384 385 CU_ASSERT(g_request->num_children == 2); 386 387 child = TAILQ_FIRST(&g_request->children); 388 nvme_request_remove_child(g_request, child); 389 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); 390 CU_ASSERT(child->num_children == 0); 391 CU_ASSERT(child->payload_size == 128 * 1024); 392 CU_ASSERT(cmd_lba == 10); 393 CU_ASSERT(cmd_lba_count == 256); 394 nvme_free_request(child); 395 396 child = TAILQ_FIRST(&g_request->children); 397 nvme_request_remove_child(g_request, child); 398 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); 399 CU_ASSERT(child->num_children == 0); 400 CU_ASSERT(child->payload_size == 128 * 1024); 401 CU_ASSERT(cmd_lba == 266); 402 CU_ASSERT(cmd_lba_count == 256); 403 nvme_free_request(child); 404 405 CU_ASSERT(TAILQ_EMPTY(&g_request->children)); 406 407 free(payload); 408 nvme_free_request(g_request); 409 cleanup_after_test(&qpair); 410 } 411 412 static void 413 split_test4(void) 414 { 415 struct spdk_nvme_ns ns; 416 struct spdk_nvme_ctrlr ctrlr; 417 struct spdk_nvme_qpair qpair; 418 struct nvme_request *child; 419 void *payload; 420 uint64_t lba, cmd_lba; 421 uint32_t lba_count, cmd_lba_count; 422 int rc; 423 424 /* 425 * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB. 426 * (Same as split_test3 except with driver-assisted striping enabled.) 427 * Submit an I/O of 256 KB starting at LBA 10, which should be split 428 * into three I/Os: 429 * 1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size) 430 * 2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size) 431 * 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size) 432 */ 433 434 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false); 435 payload = malloc(256 * 1024); 436 lba = 10; /* Start at an LBA that isn't aligned to the stripe size */ 437 lba_count = (256 * 1024) / 512; 438 439 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 440 SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS); 441 442 CU_ASSERT(rc == 0); 443 SPDK_CU_ASSERT_FATAL(g_request != NULL); 444 445 CU_ASSERT(g_request->num_children == 3); 446 447 child = TAILQ_FIRST(&g_request->children); 448 nvme_request_remove_child(g_request, child); 449 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); 450 CU_ASSERT(child->num_children == 0); 451 CU_ASSERT(child->payload_size == (256 - 10) * 512); 452 CU_ASSERT(child->payload_offset == 0); 453 CU_ASSERT(cmd_lba == 10); 454 CU_ASSERT(cmd_lba_count == 256 - 10); 455 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); 456 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0); 457 nvme_free_request(child); 458 459 child = TAILQ_FIRST(&g_request->children); 460 nvme_request_remove_child(g_request, child); 461 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); 462 CU_ASSERT(child->num_children == 0); 463 CU_ASSERT(child->payload_size == 128 * 1024); 464 CU_ASSERT(child->payload_offset == (256 - 10) * 512); 465 CU_ASSERT(cmd_lba == 256); 466 CU_ASSERT(cmd_lba_count == 256); 467 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); 468 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0); 469 nvme_free_request(child); 470 471 child = TAILQ_FIRST(&g_request->children); 472 nvme_request_remove_child(g_request, child); 473 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); 474 CU_ASSERT(child->num_children == 0); 475 CU_ASSERT(child->payload_size == 10 * 512); 476 CU_ASSERT(child->payload_offset == (512 - 10) * 512); 477 CU_ASSERT(cmd_lba == 512); 478 CU_ASSERT(cmd_lba_count == 10); 479 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); 480 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0); 481 nvme_free_request(child); 482 483 CU_ASSERT(TAILQ_EMPTY(&g_request->children)); 484 485 free(payload); 486 nvme_free_request(g_request); 487 cleanup_after_test(&qpair); 488 } 489 490 static void 491 test_cmd_child_request(void) 492 { 493 494 struct spdk_nvme_ns ns; 495 struct spdk_nvme_ctrlr ctrlr; 496 struct spdk_nvme_qpair qpair; 497 int rc = 0; 498 struct nvme_request *child, *tmp; 499 void *payload; 500 uint64_t lba = 0x1000; 501 uint32_t i = 0; 502 uint32_t offset = 0; 503 uint32_t sector_size = 512; 504 uint32_t max_io_size = 128 * 1024; 505 uint32_t sectors_per_max_io = max_io_size / sector_size; 506 507 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_io_size, 0, false); 508 509 payload = malloc(128 * 1024); 510 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io, NULL, NULL, 0); 511 CU_ASSERT(rc == 0); 512 SPDK_CU_ASSERT_FATAL(g_request != NULL); 513 CU_ASSERT(g_request->payload_offset == 0); 514 CU_ASSERT(g_request->num_children == 0); 515 nvme_free_request(g_request); 516 517 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0); 518 CU_ASSERT(rc == 0); 519 SPDK_CU_ASSERT_FATAL(g_request != NULL); 520 CU_ASSERT(g_request->payload_offset == 0); 521 CU_ASSERT(g_request->num_children == 0); 522 nvme_free_request(g_request); 523 524 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0); 525 CU_ASSERT(rc == 0); 526 SPDK_CU_ASSERT_FATAL(g_request != NULL); 527 CU_ASSERT(g_request->num_children == 4); 528 529 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, (DEFAULT_IO_QUEUE_REQUESTS + 1) * sector_size, 530 NULL, 531 NULL, 0); 532 CU_ASSERT(rc == -EINVAL); 533 534 TAILQ_FOREACH_SAFE(child, &g_request->children, child_tailq, tmp) { 535 nvme_request_remove_child(g_request, child); 536 CU_ASSERT(child->payload_offset == offset); 537 CU_ASSERT(child->cmd.opc == SPDK_NVME_OPC_READ); 538 CU_ASSERT(child->cmd.nsid == ns.id); 539 CU_ASSERT(child->cmd.cdw10 == (lba + sectors_per_max_io * i)); 540 CU_ASSERT(child->cmd.cdw12 == ((sectors_per_max_io - 1) | 0)); 541 offset += max_io_size; 542 nvme_free_request(child); 543 i++; 544 } 545 546 free(payload); 547 nvme_free_request(g_request); 548 cleanup_after_test(&qpair); 549 } 550 551 static void 552 test_nvme_ns_cmd_flush(void) 553 { 554 struct spdk_nvme_ns ns; 555 struct spdk_nvme_ctrlr ctrlr; 556 struct spdk_nvme_qpair qpair; 557 spdk_nvme_cmd_cb cb_fn = NULL; 558 void *cb_arg = NULL; 559 int rc; 560 561 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 562 563 rc = spdk_nvme_ns_cmd_flush(&ns, &qpair, cb_fn, cb_arg); 564 CU_ASSERT(rc == 0); 565 SPDK_CU_ASSERT_FATAL(g_request != NULL); 566 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH); 567 CU_ASSERT(g_request->cmd.nsid == ns.id); 568 569 nvme_free_request(g_request); 570 cleanup_after_test(&qpair); 571 } 572 573 static void 574 test_nvme_ns_cmd_write_zeroes(void) 575 { 576 struct spdk_nvme_ns ns = { 0 }; 577 struct spdk_nvme_ctrlr ctrlr = {{0}}; 578 struct spdk_nvme_qpair qpair; 579 spdk_nvme_cmd_cb cb_fn = NULL; 580 void *cb_arg = NULL; 581 uint64_t cmd_lba; 582 uint32_t cmd_lba_count; 583 int rc; 584 585 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 586 587 rc = spdk_nvme_ns_cmd_write_zeroes(&ns, &qpair, 0, 2, cb_fn, cb_arg, 0); 588 CU_ASSERT(rc == 0); 589 SPDK_CU_ASSERT_FATAL(g_request != NULL); 590 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES); 591 CU_ASSERT(g_request->cmd.nsid == ns.id); 592 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count); 593 CU_ASSERT_EQUAL(cmd_lba, 0); 594 CU_ASSERT_EQUAL(cmd_lba_count, 2); 595 596 nvme_free_request(g_request); 597 cleanup_after_test(&qpair); 598 } 599 600 static void 601 test_nvme_ns_cmd_write_uncorrectable(void) 602 { 603 struct spdk_nvme_ns ns = { 0 }; 604 struct spdk_nvme_ctrlr ctrlr = {{0}}; 605 struct spdk_nvme_qpair qpair; 606 spdk_nvme_cmd_cb cb_fn = NULL; 607 void *cb_arg = NULL; 608 uint64_t cmd_lba; 609 uint32_t cmd_lba_count; 610 int rc; 611 612 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 613 614 rc = spdk_nvme_ns_cmd_write_uncorrectable(&ns, &qpair, 0, 2, cb_fn, cb_arg); 615 CU_ASSERT(rc == 0); 616 SPDK_CU_ASSERT_FATAL(g_request != NULL); 617 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_UNCORRECTABLE); 618 CU_ASSERT(g_request->cmd.nsid == ns.id); 619 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count); 620 CU_ASSERT_EQUAL(cmd_lba, 0); 621 CU_ASSERT_EQUAL(cmd_lba_count, 2); 622 623 nvme_free_request(g_request); 624 cleanup_after_test(&qpair); 625 } 626 627 static void 628 test_nvme_ns_cmd_dataset_management(void) 629 { 630 struct spdk_nvme_ns ns; 631 struct spdk_nvme_ctrlr ctrlr; 632 struct spdk_nvme_qpair qpair; 633 spdk_nvme_cmd_cb cb_fn = NULL; 634 void *cb_arg = NULL; 635 struct spdk_nvme_dsm_range ranges[256]; 636 uint16_t i; 637 int rc = 0; 638 639 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 640 641 for (i = 0; i < 256; i++) { 642 ranges[i].starting_lba = i; 643 ranges[i].length = 1; 644 ranges[i].attributes.raw = 0; 645 } 646 647 /* TRIM one LBA */ 648 rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE, 649 ranges, 1, cb_fn, cb_arg); 650 CU_ASSERT(rc == 0); 651 SPDK_CU_ASSERT_FATAL(g_request != NULL); 652 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT); 653 CU_ASSERT(g_request->cmd.nsid == ns.id); 654 CU_ASSERT(g_request->cmd.cdw10 == 0); 655 CU_ASSERT(g_request->cmd.cdw11_bits.dsm.ad == 1); 656 spdk_free(g_request->payload.contig_or_cb_arg); 657 nvme_free_request(g_request); 658 659 /* TRIM 256 LBAs */ 660 rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE, 661 ranges, 256, cb_fn, cb_arg); 662 CU_ASSERT(rc == 0); 663 SPDK_CU_ASSERT_FATAL(g_request != NULL); 664 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT); 665 CU_ASSERT(g_request->cmd.nsid == ns.id); 666 CU_ASSERT(g_request->cmd.cdw10 == 255u); 667 CU_ASSERT(g_request->cmd.cdw11_bits.dsm.ad == 1); 668 spdk_free(g_request->payload.contig_or_cb_arg); 669 nvme_free_request(g_request); 670 671 rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE, 672 NULL, 0, cb_fn, cb_arg); 673 CU_ASSERT(rc != 0); 674 cleanup_after_test(&qpair); 675 } 676 677 static void 678 test_nvme_ns_cmd_copy(void) 679 { 680 struct spdk_nvme_ns ns; 681 struct spdk_nvme_ctrlr ctrlr; 682 struct spdk_nvme_qpair qpair; 683 spdk_nvme_cmd_cb cb_fn = NULL; 684 void *cb_arg = NULL; 685 uint16_t i; 686 int rc = 0; 687 uint64_t cmd_dest_lba; 688 uint32_t cmd_range_count; 689 struct spdk_nvme_scc_source_range ranges[64]; 690 691 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 692 693 for (i = 0; i < 64; i++) { 694 ranges[i].slba = i; 695 ranges[i].nlb = 1; 696 } 697 698 /* COPY one LBA */ 699 rc = spdk_nvme_ns_cmd_copy(&ns, &qpair, ranges, 700 1, 64, cb_fn, cb_arg); 701 CU_ASSERT(rc == 0); 702 SPDK_CU_ASSERT_FATAL(g_request != NULL); 703 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COPY); 704 CU_ASSERT(g_request->cmd.nsid == ns.id); 705 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_dest_lba, &cmd_range_count); 706 CU_ASSERT_EQUAL(cmd_dest_lba, 64); 707 CU_ASSERT_EQUAL(cmd_range_count, 1); 708 spdk_free(g_request->payload.contig_or_cb_arg); 709 nvme_free_request(g_request); 710 711 /* COPY 64 LBAs */ 712 rc = spdk_nvme_ns_cmd_copy(&ns, &qpair, ranges, 713 64, 64, cb_fn, cb_arg); 714 CU_ASSERT(rc == 0); 715 SPDK_CU_ASSERT_FATAL(g_request != NULL); 716 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COPY); 717 CU_ASSERT(g_request->cmd.nsid == ns.id); 718 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_dest_lba, &cmd_range_count); 719 CU_ASSERT_EQUAL(cmd_dest_lba, 64); 720 CU_ASSERT_EQUAL(cmd_range_count, 64); 721 spdk_free(g_request->payload.contig_or_cb_arg); 722 nvme_free_request(g_request); 723 724 rc = spdk_nvme_ns_cmd_copy(&ns, &qpair, ranges, 725 0, 64, cb_fn, cb_arg); 726 CU_ASSERT(rc != 0); 727 cleanup_after_test(&qpair); 728 } 729 730 static void 731 test_nvme_ns_cmd_readv(void) 732 { 733 struct spdk_nvme_ns ns; 734 struct spdk_nvme_ctrlr ctrlr; 735 struct spdk_nvme_qpair qpair; 736 int rc = 0; 737 uint32_t lba_count = 256; 738 uint32_t sector_size = 512; 739 struct nvme_request_sgl_ctx sgl_ctx = {}; 740 struct iovec iov = {}; 741 742 iov.iov_base = (void *)(uintptr_t)0x10000000; 743 iov.iov_len = sector_size * lba_count; 744 sgl_ctx.iovs = &iov; 745 sgl_ctx.iovcnt = 1; 746 747 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false); 748 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sgl_ctx, 0, 749 nvme_request_reset_sgl, nvme_request_next_sge); 750 751 CU_ASSERT(rc == 0); 752 SPDK_CU_ASSERT_FATAL(g_request != NULL); 753 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ); 754 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL); 755 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl); 756 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge); 757 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sgl_ctx); 758 CU_ASSERT(g_request->cmd.nsid == ns.id); 759 760 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, &sgl_ctx, 0, nvme_request_reset_sgl, 761 NULL); 762 CU_ASSERT(rc != 0); 763 764 nvme_free_request(g_request); 765 cleanup_after_test(&qpair); 766 } 767 768 /* Like test_nvme_ns_cmd_readv, but the underlying controller has SGL support. */ 769 static void 770 test_nvme_ns_cmd_readv_sgl(void) 771 { 772 struct spdk_nvme_ns ns; 773 struct spdk_nvme_ctrlr ctrlr; 774 struct spdk_nvme_qpair qpair; 775 int rc = 0; 776 uint32_t lba_count = 256; 777 uint32_t sector_size = 512; 778 struct nvme_request_sgl_ctx sgl_ctx = {}; 779 struct iovec iov[3] = {}; 780 struct nvme_request *child; 781 uint64_t cmd_lba; 782 uint32_t cmd_lba_count; 783 784 iov[0].iov_base = (void *)(uintptr_t)0x10000000; 785 iov[0].iov_len = sector_size * lba_count; 786 sgl_ctx.iovs = iov; 787 sgl_ctx.iovcnt = 1; 788 789 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false); 790 ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED; 791 792 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, &sgl_ctx, 0, nvme_request_reset_sgl, 793 NULL); 794 CU_ASSERT(rc != 0); 795 796 797 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sgl_ctx, 0, 798 nvme_request_reset_sgl, nvme_request_next_sge); 799 800 CU_ASSERT(rc == 0); 801 SPDK_CU_ASSERT_FATAL(g_request != NULL); 802 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ); 803 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL); 804 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl); 805 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge); 806 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sgl_ctx); 807 CU_ASSERT(g_request->cmd.nsid == ns.id); 808 809 /* Set the controller to only support 1 sge per request. Then do a 2 sector I/O with 810 * 3 unaligned sges. This will fail! */ 811 ctrlr.max_sges = 1; 812 lba_count = 2; 813 iov[0].iov_base = (void *)(uintptr_t)0x10000000; 814 iov[0].iov_len = 300; 815 iov[1].iov_base = iov[0].iov_base + iov[0].iov_len; 816 iov[1].iov_len = 300; 817 iov[2].iov_base = iov[1].iov_base + iov[1].iov_len; 818 iov[2].iov_len = (sector_size * lba_count) - iov[0].iov_len - iov[1].iov_len; 819 sgl_ctx.iovs = iov; 820 sgl_ctx.iovcnt = 3; 821 822 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sgl_ctx, 0, 823 nvme_request_reset_sgl, nvme_request_next_sge); 824 825 CU_ASSERT(rc != 0); 826 827 /* Let the controller support 2 sges per request and repeat. This should 828 * succeed. */ 829 ctrlr.max_sges = 2; 830 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sgl_ctx, 0, 831 nvme_request_reset_sgl, nvme_request_next_sge); 832 833 CU_ASSERT(rc == 0); 834 SPDK_CU_ASSERT_FATAL(g_request != NULL); 835 CU_ASSERT(g_request->num_children == 2); 836 837 child = TAILQ_FIRST(&g_request->children); 838 nvme_request_remove_child(g_request, child); 839 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); 840 CU_ASSERT(child->num_children == 0); 841 CU_ASSERT(child->payload_size == 512); 842 CU_ASSERT(cmd_lba == 0x1000); 843 CU_ASSERT(cmd_lba_count == 1); 844 nvme_free_request(child); 845 846 child = TAILQ_FIRST(&g_request->children); 847 nvme_request_remove_child(g_request, child); 848 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count); 849 CU_ASSERT(child->num_children == 0); 850 CU_ASSERT(child->payload_size == 512); 851 CU_ASSERT(cmd_lba == 0x1001); 852 CU_ASSERT(cmd_lba_count == 1); 853 nvme_free_request(child); 854 855 CU_ASSERT(TAILQ_EMPTY(&g_request->children)); 856 857 nvme_free_request(g_request); 858 cleanup_after_test(&qpair); 859 } 860 861 static void 862 test_nvme_ns_cmd_writev(void) 863 { 864 struct spdk_nvme_ns ns; 865 struct spdk_nvme_ctrlr ctrlr; 866 struct spdk_nvme_qpair qpair; 867 int rc = 0; 868 uint32_t lba_count = 256; 869 uint32_t sector_size = 512; 870 struct nvme_request_sgl_ctx sgl_ctx = {}; 871 struct iovec iov[2] = {}; 872 873 iov[0].iov_base = (void *)(uintptr_t)0x10000000; 874 iov[0].iov_len = sector_size * lba_count; 875 sgl_ctx.iovs = iov; 876 sgl_ctx.iovcnt = 1; 877 878 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false); 879 rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, lba_count, NULL, &sgl_ctx, 0, 880 nvme_request_reset_sgl, nvme_request_next_sge); 881 882 CU_ASSERT(rc == 0); 883 SPDK_CU_ASSERT_FATAL(g_request != NULL); 884 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE); 885 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL); 886 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl); 887 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge); 888 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sgl_ctx); 889 CU_ASSERT(g_request->cmd.nsid == ns.id); 890 891 /* Test case: NULL reset_sgl callback, expect fail */ 892 rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, &sgl_ctx, 0, 893 NULL, nvme_request_next_sge); 894 CU_ASSERT(rc == -EINVAL); 895 896 /* PRP1 start address is page aligned while end address is not. NVME driver 897 * tries to split such a request but iov[0] length is not multiple of block size. 898 * Expect fail */ 899 iov[0].iov_base = (void *)(uintptr_t)0x3E8000; 900 iov[0].iov_len = 200; 901 iov[1].iov_base = (void *)(uintptr_t)0x3E9000; 902 iov[1].iov_len = 312; 903 sgl_ctx.iovs = iov; 904 sgl_ctx.iovcnt = 2; 905 rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 1, NULL, &sgl_ctx, 0, 906 nvme_request_reset_sgl, nvme_request_next_sge); 907 CU_ASSERT(rc == -EINVAL); 908 909 /* PRP1 end address is page aligned while start address is not. Expect pass */ 910 iov[0].iov_base = (void *)(((uintptr_t)iov[0].iov_base) + ctrlr.page_size - iov[0].iov_len); 911 rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 1, NULL, &sgl_ctx, 0, 912 nvme_request_reset_sgl, nvme_request_next_sge); 913 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE); 914 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL); 915 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl); 916 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge); 917 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sgl_ctx); 918 CU_ASSERT(g_request->cmd.nsid == ns.id); 919 920 nvme_free_request(g_request); 921 cleanup_after_test(&qpair); 922 } 923 924 static void 925 test_nvme_ns_cmd_comparev(void) 926 { 927 struct spdk_nvme_ns ns; 928 struct spdk_nvme_ctrlr ctrlr; 929 struct spdk_nvme_qpair qpair; 930 int rc = 0; 931 uint32_t lba_count = 256; 932 uint32_t sector_size = 512; 933 struct nvme_request_sgl_ctx sgl_ctx = {}; 934 struct iovec iov = {}; 935 936 iov.iov_base = (void *)(uintptr_t)0x10000000; 937 iov.iov_len = sector_size * lba_count; 938 sgl_ctx.iovs = &iov; 939 sgl_ctx.iovcnt = 1; 940 941 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false); 942 rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, lba_count, NULL, &sgl_ctx, 0, 943 nvme_request_reset_sgl, nvme_request_next_sge); 944 945 CU_ASSERT(rc == 0); 946 SPDK_CU_ASSERT_FATAL(g_request != NULL); 947 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE); 948 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL); 949 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl); 950 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge); 951 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sgl_ctx); 952 CU_ASSERT(g_request->cmd.nsid == ns.id); 953 954 rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, 256, NULL, &sgl_ctx, 0, 955 nvme_request_reset_sgl, NULL); 956 CU_ASSERT(rc != 0); 957 958 nvme_free_request(g_request); 959 cleanup_after_test(&qpair); 960 } 961 962 static void 963 test_nvme_ns_cmd_comparev_with_md(void) 964 { 965 struct spdk_nvme_ns ns; 966 struct spdk_nvme_ctrlr ctrlr; 967 struct spdk_nvme_qpair qpair; 968 int rc = 0; 969 char *buffer = NULL; 970 char *metadata = NULL; 971 uint32_t block_size, md_size; 972 struct nvme_request *child0, *child1; 973 uint32_t lba_count = 256; 974 uint32_t sector_size = 512; 975 struct nvme_request_sgl_ctx sgl_ctx = {}; 976 struct iovec iov = {}; 977 978 iov.iov_base = (void *)(uintptr_t)0x10000000; 979 iov.iov_len = sector_size * lba_count; 980 sgl_ctx.iovs = &iov; 981 sgl_ctx.iovcnt = 1; 982 983 block_size = 512; 984 md_size = 128; 985 986 buffer = malloc((block_size + md_size) * 384); 987 SPDK_CU_ASSERT_FATAL(buffer != NULL); 988 metadata = malloc(md_size * 384); 989 SPDK_CU_ASSERT_FATAL(metadata != NULL); 990 991 /* 992 * 512 byte data + 128 byte metadata 993 * Separate metadata buffer 994 * Max data transfer size 128 KB 995 * No stripe size 996 * 997 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required) 998 */ 999 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false); 1000 1001 rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sgl_ctx, 0, 1002 nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0); 1003 1004 CU_ASSERT(rc == 0); 1005 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1006 CU_ASSERT(g_request->num_children == 0); 1007 1008 CU_ASSERT(g_request->payload.md == metadata); 1009 CU_ASSERT(g_request->payload_size == 256 * 512); 1010 1011 nvme_free_request(g_request); 1012 cleanup_after_test(&qpair); 1013 1014 /* 1015 * 512 byte data + 128 byte metadata 1016 * Extended LBA 1017 * Max data transfer size 128 KB 1018 * No stripe size 1019 * 1020 * 256 blocks * (512 + 128) bytes per block = two I/Os: 1021 * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB 1022 * child 1: 52 blocks 1023 */ 1024 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true); 1025 1026 rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sgl_ctx, 0, 1027 nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0); 1028 1029 CU_ASSERT(rc == 0); 1030 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1031 CU_ASSERT(g_request->num_children == 2); 1032 child0 = TAILQ_FIRST(&g_request->children); 1033 1034 SPDK_CU_ASSERT_FATAL(child0 != NULL); 1035 CU_ASSERT(child0->payload.md == NULL); 1036 CU_ASSERT(child0->payload_offset == 0); 1037 CU_ASSERT(child0->payload_size == 204 * (512 + 128)); 1038 child1 = TAILQ_NEXT(child0, child_tailq); 1039 1040 SPDK_CU_ASSERT_FATAL(child1 != NULL); 1041 CU_ASSERT(child1->payload.md == NULL); 1042 CU_ASSERT(child1->payload_offset == 204 * (512 + 128)); 1043 CU_ASSERT(child1->payload_size == 52 * (512 + 128)); 1044 1045 nvme_request_free_children(g_request); 1046 nvme_free_request(g_request); 1047 cleanup_after_test(&qpair); 1048 1049 /* 1050 * 512 byte data + 8 byte metadata 1051 * Extended LBA 1052 * Max data transfer size 128 KB 1053 * No stripe size 1054 * No protection information 1055 * 1056 * 256 blocks * (512 + 8) bytes per block = two I/Os: 1057 * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB 1058 * child 1: 4 blocks 1059 */ 1060 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true); 1061 1062 rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sgl_ctx, 0, 1063 nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0); 1064 1065 CU_ASSERT(rc == 0); 1066 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1067 CU_ASSERT(g_request->num_children == 2); 1068 child0 = TAILQ_FIRST(&g_request->children); 1069 1070 SPDK_CU_ASSERT_FATAL(child0 != NULL); 1071 CU_ASSERT(child0->payload.md == NULL); 1072 CU_ASSERT(child0->payload_offset == 0); 1073 CU_ASSERT(child0->payload_size == 252 * (512 + 8)); 1074 child1 = TAILQ_NEXT(child0, child_tailq); 1075 1076 SPDK_CU_ASSERT_FATAL(child1 != NULL); 1077 CU_ASSERT(child1->payload.md == NULL); 1078 CU_ASSERT(child1->payload_offset == 252 * (512 + 8)); 1079 CU_ASSERT(child1->payload_size == 4 * (512 + 8)); 1080 1081 nvme_request_free_children(g_request); 1082 nvme_free_request(g_request); 1083 cleanup_after_test(&qpair); 1084 1085 /* 1086 * 512 byte data + 8 byte metadata 1087 * Extended LBA 1088 * Max data transfer size 128 KB 1089 * No stripe size 1090 * Protection information enabled + PRACT 1091 * 1092 * Special case for 8-byte metadata + PI + PRACT: no metadata transferred 1093 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required) 1094 */ 1095 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true); 1096 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 1097 1098 rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sgl_ctx, 1099 SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0); 1100 1101 CU_ASSERT(rc == 0); 1102 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1103 CU_ASSERT(g_request->num_children == 0); 1104 1105 CU_ASSERT(g_request->payload.md == NULL); 1106 CU_ASSERT(g_request->payload_offset == 0); 1107 CU_ASSERT(g_request->payload_size == 256 * 512); /* NOTE: does not include metadata! */ 1108 1109 nvme_request_free_children(g_request); 1110 nvme_free_request(g_request); 1111 cleanup_after_test(&qpair); 1112 1113 /* 1114 * 512 byte data + 8 byte metadata 1115 * Separate metadata buffer 1116 * Max data transfer size 128 KB 1117 * No stripe size 1118 * Protection information enabled + PRACT 1119 */ 1120 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false); 1121 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 1122 1123 rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sgl_ctx, 1124 SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0); 1125 1126 CU_ASSERT(rc == 0); 1127 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1128 CU_ASSERT(g_request->num_children == 0); 1129 1130 CU_ASSERT(g_request->payload.md == metadata); 1131 CU_ASSERT(g_request->payload_size == 256 * 512); 1132 1133 nvme_free_request(g_request); 1134 cleanup_after_test(&qpair); 1135 1136 /* 1137 * 512 byte data + 8 byte metadata 1138 * Separate metadata buffer 1139 * Max data transfer size 128 KB 1140 * No stripe size 1141 * Protection information enabled + PRACT 1142 * 1143 * 384 blocks * 512 bytes = two I/Os: 1144 * child 0: 256 blocks 1145 * child 1: 128 blocks 1146 */ 1147 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false); 1148 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 1149 1150 rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 384, NULL, &sgl_ctx, 1151 SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0); 1152 1153 CU_ASSERT(rc == 0); 1154 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1155 CU_ASSERT(g_request->num_children == 2); 1156 child0 = TAILQ_FIRST(&g_request->children); 1157 1158 SPDK_CU_ASSERT_FATAL(child0 != NULL); 1159 CU_ASSERT(child0->payload_offset == 0); 1160 CU_ASSERT(child0->payload_size == 256 * 512); 1161 CU_ASSERT(child0->md_offset == 0); 1162 child1 = TAILQ_NEXT(child0, child_tailq); 1163 1164 SPDK_CU_ASSERT_FATAL(child1 != NULL); 1165 CU_ASSERT(child1->payload_offset == 256 * 512); 1166 CU_ASSERT(child1->payload_size == 128 * 512); 1167 CU_ASSERT(child1->md_offset == 256 * 8); 1168 1169 nvme_request_free_children(g_request); 1170 nvme_free_request(g_request); 1171 cleanup_after_test(&qpair); 1172 1173 free(buffer); 1174 free(metadata); 1175 } 1176 1177 static void 1178 test_nvme_ns_cmd_compare_and_write(void) 1179 { 1180 struct spdk_nvme_ns ns; 1181 struct spdk_nvme_ctrlr ctrlr; 1182 struct spdk_nvme_qpair qpair; 1183 int rc = 0; 1184 uint64_t lba = 0x1000; 1185 uint32_t lba_count = 256; 1186 uint64_t cmd_lba; 1187 uint32_t cmd_lba_count; 1188 uint32_t sector_size = 512; 1189 1190 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false); 1191 1192 rc = spdk_nvme_ns_cmd_compare(&ns, &qpair, NULL, lba, lba_count, NULL, NULL, 1193 SPDK_NVME_IO_FLAGS_FUSE_FIRST); 1194 1195 CU_ASSERT(rc == 0); 1196 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1197 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE); 1198 CU_ASSERT(g_request->cmd.fuse == SPDK_NVME_CMD_FUSE_FIRST); 1199 CU_ASSERT(g_request->cmd.nsid == ns.id); 1200 1201 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count); 1202 CU_ASSERT_EQUAL(cmd_lba, lba); 1203 CU_ASSERT_EQUAL(cmd_lba_count, lba_count); 1204 1205 nvme_free_request(g_request); 1206 1207 rc = spdk_nvme_ns_cmd_write(&ns, &qpair, NULL, lba, lba_count, NULL, NULL, 1208 SPDK_NVME_IO_FLAGS_FUSE_SECOND); 1209 1210 CU_ASSERT(rc == 0); 1211 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1212 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE); 1213 CU_ASSERT(g_request->cmd.fuse == SPDK_NVME_CMD_FUSE_SECOND); 1214 CU_ASSERT(g_request->cmd.nsid == ns.id); 1215 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count); 1216 CU_ASSERT_EQUAL(cmd_lba, lba); 1217 CU_ASSERT_EQUAL(cmd_lba_count, lba_count); 1218 1219 nvme_free_request(g_request); 1220 1221 cleanup_after_test(&qpair); 1222 } 1223 1224 static void 1225 test_io_flags(void) 1226 { 1227 struct spdk_nvme_ns ns; 1228 struct spdk_nvme_ctrlr ctrlr; 1229 struct spdk_nvme_qpair qpair; 1230 void *payload; 1231 uint64_t lba; 1232 uint32_t lba_count; 1233 uint64_t cmd_lba; 1234 uint32_t cmd_lba_count; 1235 int rc; 1236 1237 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false); 1238 payload = malloc(256 * 1024); 1239 lba = 0; 1240 lba_count = (4 * 1024) / 512; 1241 1242 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 1243 SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS); 1244 CU_ASSERT(rc == 0); 1245 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1246 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); 1247 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0); 1248 nvme_free_request(g_request); 1249 1250 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 1251 SPDK_NVME_IO_FLAGS_LIMITED_RETRY); 1252 CU_ASSERT(rc == 0); 1253 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1254 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0); 1255 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) != 0); 1256 nvme_free_request(g_request); 1257 1258 rc = spdk_nvme_ns_cmd_write(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 1259 SPDK_NVME_IO_FLAGS_VALID_MASK); 1260 CU_ASSERT(rc == 0); 1261 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1262 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count); 1263 CU_ASSERT_EQUAL(cmd_lba_count, lba_count); 1264 CU_ASSERT_EQUAL(cmd_lba, lba); 1265 CU_ASSERT_EQUAL(g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_CDW12_MASK, 1266 SPDK_NVME_IO_FLAGS_CDW12_MASK); 1267 nvme_free_request(g_request); 1268 1269 rc = spdk_nvme_ns_cmd_write(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 1270 ~SPDK_NVME_IO_FLAGS_VALID_MASK); 1271 CU_ASSERT(rc == -EINVAL); 1272 1273 free(payload); 1274 cleanup_after_test(&qpair); 1275 } 1276 1277 static void 1278 test_nvme_ns_cmd_reservation_register(void) 1279 { 1280 struct spdk_nvme_ns ns; 1281 struct spdk_nvme_ctrlr ctrlr; 1282 struct spdk_nvme_qpair qpair; 1283 struct spdk_nvme_reservation_register_data *payload; 1284 bool ignore_key = 1; 1285 spdk_nvme_cmd_cb cb_fn = NULL; 1286 void *cb_arg = NULL; 1287 int rc = 0; 1288 uint32_t tmp_cdw10; 1289 1290 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 1291 payload = malloc(sizeof(struct spdk_nvme_reservation_register_data)); 1292 1293 rc = spdk_nvme_ns_cmd_reservation_register(&ns, &qpair, payload, ignore_key, 1294 SPDK_NVME_RESERVE_REGISTER_KEY, 1295 SPDK_NVME_RESERVE_PTPL_NO_CHANGES, 1296 cb_fn, cb_arg); 1297 1298 CU_ASSERT(rc == 0); 1299 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1300 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER); 1301 CU_ASSERT(g_request->cmd.nsid == ns.id); 1302 1303 tmp_cdw10 = SPDK_NVME_RESERVE_REGISTER_KEY; 1304 tmp_cdw10 |= ignore_key ? 1 << 3 : 0; 1305 tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_PTPL_NO_CHANGES << 30; 1306 1307 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10); 1308 1309 spdk_free(g_request->payload.contig_or_cb_arg); 1310 nvme_free_request(g_request); 1311 free(payload); 1312 cleanup_after_test(&qpair); 1313 } 1314 1315 static void 1316 test_nvme_ns_cmd_reservation_release(void) 1317 { 1318 struct spdk_nvme_ns ns; 1319 struct spdk_nvme_ctrlr ctrlr; 1320 struct spdk_nvme_qpair qpair; 1321 struct spdk_nvme_reservation_key_data *payload; 1322 bool ignore_key = 1; 1323 spdk_nvme_cmd_cb cb_fn = NULL; 1324 void *cb_arg = NULL; 1325 int rc = 0; 1326 uint32_t tmp_cdw10; 1327 1328 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 1329 payload = malloc(sizeof(struct spdk_nvme_reservation_key_data)); 1330 1331 rc = spdk_nvme_ns_cmd_reservation_release(&ns, &qpair, payload, ignore_key, 1332 SPDK_NVME_RESERVE_RELEASE, 1333 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 1334 cb_fn, cb_arg); 1335 1336 CU_ASSERT(rc == 0); 1337 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1338 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE); 1339 CU_ASSERT(g_request->cmd.nsid == ns.id); 1340 1341 tmp_cdw10 = SPDK_NVME_RESERVE_RELEASE; 1342 tmp_cdw10 |= ignore_key ? 1 << 3 : 0; 1343 tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8; 1344 1345 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10); 1346 1347 spdk_free(g_request->payload.contig_or_cb_arg); 1348 nvme_free_request(g_request); 1349 free(payload); 1350 cleanup_after_test(&qpair); 1351 } 1352 1353 static void 1354 test_nvme_ns_cmd_reservation_acquire(void) 1355 { 1356 struct spdk_nvme_ns ns; 1357 struct spdk_nvme_ctrlr ctrlr; 1358 struct spdk_nvme_qpair qpair; 1359 struct spdk_nvme_reservation_acquire_data *payload; 1360 bool ignore_key = 1; 1361 spdk_nvme_cmd_cb cb_fn = NULL; 1362 void *cb_arg = NULL; 1363 int rc = 0; 1364 uint32_t tmp_cdw10; 1365 1366 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 1367 payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data)); 1368 1369 rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, &qpair, payload, ignore_key, 1370 SPDK_NVME_RESERVE_ACQUIRE, 1371 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 1372 cb_fn, cb_arg); 1373 1374 CU_ASSERT(rc == 0); 1375 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1376 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE); 1377 CU_ASSERT(g_request->cmd.nsid == ns.id); 1378 1379 tmp_cdw10 = SPDK_NVME_RESERVE_ACQUIRE; 1380 tmp_cdw10 |= ignore_key ? 1 << 3 : 0; 1381 tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8; 1382 1383 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10); 1384 1385 spdk_free(g_request->payload.contig_or_cb_arg); 1386 nvme_free_request(g_request); 1387 free(payload); 1388 cleanup_after_test(&qpair); 1389 } 1390 1391 static void 1392 test_nvme_ns_cmd_reservation_report(void) 1393 { 1394 struct spdk_nvme_ns ns; 1395 struct spdk_nvme_ctrlr ctrlr; 1396 struct spdk_nvme_qpair qpair; 1397 struct spdk_nvme_reservation_status_data *payload; 1398 spdk_nvme_cmd_cb cb_fn = NULL; 1399 void *cb_arg = NULL; 1400 int rc = 0; 1401 uint32_t size = sizeof(struct spdk_nvme_reservation_status_data); 1402 1403 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 1404 1405 payload = calloc(1, size); 1406 SPDK_CU_ASSERT_FATAL(payload != NULL); 1407 1408 rc = spdk_nvme_ns_cmd_reservation_report(&ns, &qpair, payload, size, cb_fn, cb_arg); 1409 1410 CU_ASSERT(rc == 0); 1411 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1412 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT); 1413 CU_ASSERT(g_request->cmd.nsid == ns.id); 1414 1415 CU_ASSERT(g_request->cmd.cdw10 == (size >> 2) - 1); 1416 1417 spdk_free(g_request->payload.contig_or_cb_arg); 1418 nvme_free_request(g_request); 1419 free(payload); 1420 cleanup_after_test(&qpair); 1421 } 1422 1423 static void 1424 test_nvme_ns_cmd_write_with_md(void) 1425 { 1426 struct spdk_nvme_ns ns; 1427 struct spdk_nvme_ctrlr ctrlr; 1428 struct spdk_nvme_qpair qpair; 1429 int rc = 0; 1430 char *buffer = NULL; 1431 char *metadata = NULL; 1432 uint32_t block_size, md_size; 1433 struct nvme_request *child0, *child1; 1434 1435 block_size = 512; 1436 md_size = 128; 1437 1438 buffer = malloc((block_size + md_size) * 384); 1439 SPDK_CU_ASSERT_FATAL(buffer != NULL); 1440 metadata = malloc(md_size * 384); 1441 SPDK_CU_ASSERT_FATAL(metadata != NULL); 1442 1443 /* 1444 * 512 byte data + 128 byte metadata 1445 * Separate metadata buffer 1446 * Max data transfer size 128 KB 1447 * No stripe size 1448 * 1449 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required) 1450 */ 1451 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false); 1452 1453 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0, 1454 0); 1455 1456 CU_ASSERT(rc == 0); 1457 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1458 CU_ASSERT(g_request->num_children == 0); 1459 1460 CU_ASSERT(g_request->payload.md == metadata); 1461 CU_ASSERT(g_request->md_size == 256 * 128); 1462 CU_ASSERT(g_request->payload_size == 256 * 512); 1463 1464 nvme_free_request(g_request); 1465 cleanup_after_test(&qpair); 1466 1467 /* 1468 * 512 byte data + 128 byte metadata 1469 * Extended LBA 1470 * Max data transfer size 128 KB 1471 * No stripe size 1472 * 1473 * 256 blocks * (512 + 128) bytes per block = two I/Os: 1474 * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB 1475 * child 1: 52 blocks 1476 */ 1477 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true); 1478 1479 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0, 1480 0); 1481 1482 CU_ASSERT(rc == 0); 1483 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1484 CU_ASSERT(g_request->num_children == 2); 1485 child0 = TAILQ_FIRST(&g_request->children); 1486 1487 SPDK_CU_ASSERT_FATAL(child0 != NULL); 1488 CU_ASSERT(child0->payload.md == NULL); 1489 CU_ASSERT(child0->payload_offset == 0); 1490 CU_ASSERT(child0->payload_size == 204 * (512 + 128)); 1491 child1 = TAILQ_NEXT(child0, child_tailq); 1492 1493 SPDK_CU_ASSERT_FATAL(child1 != NULL); 1494 CU_ASSERT(child1->payload.md == NULL); 1495 CU_ASSERT(child1->payload_offset == 204 * (512 + 128)); 1496 CU_ASSERT(child1->payload_size == 52 * (512 + 128)); 1497 1498 nvme_request_free_children(g_request); 1499 nvme_free_request(g_request); 1500 cleanup_after_test(&qpair); 1501 1502 /* 1503 * 512 byte data + 128 byte metadata 1504 * Extended LBA 1505 * Max data transfer size 128 KB 1506 * No stripe size 1507 * Enable NVME_QUIRK_MDTS_EXCLUDE_MD quirk 1508 * 1509 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required) 1510 */ 1511 g_ctrlr_quirks = NVME_QUIRK_MDTS_EXCLUDE_MD; 1512 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true); 1513 1514 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0, 1515 0); 1516 1517 CU_ASSERT(rc == 0); 1518 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1519 CU_ASSERT(g_request->num_children == 0); 1520 CU_ASSERT(g_request->md_size == 256 * 128); 1521 CU_ASSERT(g_request->payload_size == 256 * (512 + 128)); 1522 1523 nvme_free_request(g_request); 1524 cleanup_after_test(&qpair); 1525 1526 /* 1527 * 512 byte data + 8 byte metadata 1528 * Extended LBA 1529 * Max data transfer size 128 KB 1530 * No stripe size 1531 * No protection information 1532 * 1533 * 256 blocks * (512 + 8) bytes per block = two I/Os: 1534 * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB 1535 * child 1: 4 blocks 1536 */ 1537 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true); 1538 1539 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0, 1540 0); 1541 1542 CU_ASSERT(rc == 0); 1543 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1544 CU_ASSERT(g_request->num_children == 2); 1545 child0 = TAILQ_FIRST(&g_request->children); 1546 1547 SPDK_CU_ASSERT_FATAL(child0 != NULL); 1548 CU_ASSERT(child0->payload.md == NULL); 1549 CU_ASSERT(child0->payload_offset == 0); 1550 CU_ASSERT(child0->payload_size == 252 * (512 + 8)); 1551 child1 = TAILQ_NEXT(child0, child_tailq); 1552 1553 SPDK_CU_ASSERT_FATAL(child1 != NULL); 1554 CU_ASSERT(child1->payload.md == NULL); 1555 CU_ASSERT(child1->payload_offset == 252 * (512 + 8)); 1556 CU_ASSERT(child1->payload_size == 4 * (512 + 8)); 1557 1558 nvme_request_free_children(g_request); 1559 nvme_free_request(g_request); 1560 cleanup_after_test(&qpair); 1561 1562 /* 1563 * 512 byte data + 8 byte metadata 1564 * Extended LBA 1565 * Max data transfer size 128 KB 1566 * No stripe size 1567 * Protection information enabled + PRACT 1568 * 1569 * Special case for 8-byte metadata + PI + PRACT: no metadata transferred 1570 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required) 1571 */ 1572 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true); 1573 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 1574 1575 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 1576 SPDK_NVME_IO_FLAGS_PRACT, 0, 0); 1577 1578 CU_ASSERT(rc == 0); 1579 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1580 CU_ASSERT(g_request->num_children == 0); 1581 1582 CU_ASSERT(g_request->payload.md == NULL); 1583 CU_ASSERT(g_request->payload_offset == 0); 1584 CU_ASSERT(g_request->payload_size == 256 * 512); /* NOTE: does not include metadata! */ 1585 1586 nvme_request_free_children(g_request); 1587 nvme_free_request(g_request); 1588 cleanup_after_test(&qpair); 1589 1590 /* 1591 * 512 byte data + 8 byte metadata 1592 * Separate metadata buffer 1593 * Max data transfer size 128 KB 1594 * No stripe size 1595 * Protection information enabled + PRACT 1596 */ 1597 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false); 1598 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 1599 1600 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 1601 SPDK_NVME_IO_FLAGS_PRACT, 0, 0); 1602 1603 CU_ASSERT(rc == 0); 1604 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1605 CU_ASSERT(g_request->num_children == 0); 1606 1607 CU_ASSERT(g_request->payload.md == metadata); 1608 CU_ASSERT(g_request->md_size == 256 * 8); 1609 CU_ASSERT(g_request->payload_size == 256 * 512); 1610 1611 nvme_free_request(g_request); 1612 cleanup_after_test(&qpair); 1613 1614 /* 1615 * 512 byte data + 8 byte metadata 1616 * Separate metadata buffer 1617 * Max data transfer size 128 KB 1618 * No stripe size 1619 * Protection information enabled + PRACT 1620 * 1621 * 384 blocks * 512 bytes = two I/Os: 1622 * child 0: 256 blocks 1623 * child 1: 128 blocks 1624 */ 1625 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false); 1626 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 1627 1628 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384, NULL, NULL, 1629 SPDK_NVME_IO_FLAGS_PRACT, 0, 0); 1630 1631 CU_ASSERT(rc == 0); 1632 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1633 CU_ASSERT(g_request->num_children == 2); 1634 child0 = TAILQ_FIRST(&g_request->children); 1635 1636 SPDK_CU_ASSERT_FATAL(child0 != NULL); 1637 CU_ASSERT(child0->payload_offset == 0); 1638 CU_ASSERT(child0->payload_size == 256 * 512); 1639 CU_ASSERT(child0->md_offset == 0); 1640 CU_ASSERT(child0->md_size == 256 * 8); 1641 child1 = TAILQ_NEXT(child0, child_tailq); 1642 1643 SPDK_CU_ASSERT_FATAL(child1 != NULL); 1644 CU_ASSERT(child1->payload_offset == 256 * 512); 1645 CU_ASSERT(child1->payload_size == 128 * 512); 1646 CU_ASSERT(child1->md_offset == 256 * 8); 1647 CU_ASSERT(child1->md_size == 128 * 8); 1648 1649 nvme_request_free_children(g_request); 1650 nvme_free_request(g_request); 1651 cleanup_after_test(&qpair); 1652 1653 free(buffer); 1654 free(metadata); 1655 } 1656 1657 static void 1658 test_nvme_ns_cmd_zone_append_with_md(void) 1659 { 1660 struct spdk_nvme_ns ns; 1661 struct spdk_nvme_ctrlr ctrlr; 1662 struct spdk_nvme_qpair qpair; 1663 int rc = 0; 1664 char *buffer = NULL; 1665 char *metadata = NULL; 1666 uint32_t block_size, md_size; 1667 1668 block_size = 512; 1669 md_size = 128; 1670 1671 buffer = malloc((block_size + md_size) * 384); 1672 SPDK_CU_ASSERT_FATAL(buffer != NULL); 1673 metadata = malloc(md_size * 384); 1674 SPDK_CU_ASSERT_FATAL(metadata != NULL); 1675 1676 /* 1677 * 512 byte data + 128 byte metadata 1678 * Separate metadata buffer 1679 * Max data transfer size 256 KB 1680 * Max zone append size 128 KB 1681 * 1682 * 256 blocks * 512 bytes per block = 128 KB I/O 1683 * 128 KB I/O <= max zone append size. Test should pass. 1684 */ 1685 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 256 * 1024, 0, false); 1686 ctrlr.max_zone_append_size = 128 * 1024; 1687 ctrlr.flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED; 1688 ns.csi = SPDK_NVME_CSI_ZNS; 1689 1690 rc = nvme_ns_cmd_zone_append_with_md(&ns, &qpair, buffer, metadata, 0x0, 256, 1691 NULL, NULL, 0, 0, 0); 1692 CU_ASSERT(rc == 0); 1693 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1694 CU_ASSERT(g_request->num_children == 0); 1695 1696 CU_ASSERT(g_request->payload.md == metadata); 1697 CU_ASSERT(g_request->md_size == 256 * 128); 1698 CU_ASSERT(g_request->payload_size == 256 * 512); 1699 1700 nvme_free_request(g_request); 1701 cleanup_after_test(&qpair); 1702 1703 /* 1704 * 512 byte data + 128 byte metadata 1705 * Separate metadata buffer 1706 * Max data transfer size 256 KB 1707 * Max zone append size 128 KB 1708 * 1709 * 512 blocks * 512 bytes per block = 256 KB I/O 1710 * 256 KB I/O > max zone append size. Test should fail. 1711 */ 1712 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 256 * 1024, 0, false); 1713 ctrlr.max_zone_append_size = 128 * 1024; 1714 ctrlr.flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED; 1715 ns.csi = SPDK_NVME_CSI_ZNS; 1716 1717 rc = nvme_ns_cmd_zone_append_with_md(&ns, &qpair, buffer, metadata, 0x0, 512, 1718 NULL, NULL, 0, 0, 0); 1719 CU_ASSERT(rc == -EINVAL); 1720 SPDK_CU_ASSERT_FATAL(g_request == NULL); 1721 1722 cleanup_after_test(&qpair); 1723 1724 /* 1725 * 512 byte data + 128 byte metadata 1726 * Extended LBA 1727 * Max data transfer size 256 KB 1728 * Max zone append size 128 KB 1729 * 1730 * 128 blocks * (512 + 128) bytes per block = 80 KB I/O 1731 * 80 KB I/O <= max zone append size. Test should pass. 1732 */ 1733 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 256 * 1024, 0, true); 1734 ctrlr.max_zone_append_size = 128 * 1024; 1735 ctrlr.flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED; 1736 ns.csi = SPDK_NVME_CSI_ZNS; 1737 1738 rc = nvme_ns_cmd_zone_append_with_md(&ns, &qpair, buffer, NULL, 0x0, 128, 1739 NULL, NULL, 0, 0, 0); 1740 CU_ASSERT(rc == 0); 1741 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1742 CU_ASSERT(g_request->num_children == 0); 1743 1744 CU_ASSERT(g_request->payload.md == NULL); 1745 CU_ASSERT(g_request->payload_offset == 0); 1746 CU_ASSERT(g_request->payload_size == 128 * (512 + 128)); 1747 1748 nvme_free_request(g_request); 1749 cleanup_after_test(&qpair); 1750 1751 /* 1752 * 512 byte data + 128 byte metadata 1753 * Extended LBA 1754 * Max data transfer size 256 KB 1755 * Max zone append size 128 KB 1756 * 1757 * 256 blocks * (512 + 128) bytes per block = 160 KB I/O 1758 * 160 KB I/O > max zone append size. Test should fail. 1759 */ 1760 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 256 * 1024, 0, true); 1761 ctrlr.max_zone_append_size = 128 * 1024; 1762 ctrlr.flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED; 1763 ns.csi = SPDK_NVME_CSI_ZNS; 1764 1765 rc = nvme_ns_cmd_zone_append_with_md(&ns, &qpair, buffer, NULL, 0x0, 256, 1766 NULL, NULL, 0, 0, 0); 1767 CU_ASSERT(rc == -EINVAL); 1768 SPDK_CU_ASSERT_FATAL(g_request == NULL); 1769 1770 cleanup_after_test(&qpair); 1771 1772 free(buffer); 1773 free(metadata); 1774 } 1775 1776 static void 1777 test_nvme_ns_cmd_zone_appendv_with_md(void) 1778 { 1779 struct spdk_nvme_ns ns; 1780 struct spdk_nvme_ctrlr ctrlr; 1781 struct spdk_nvme_qpair qpair; 1782 int rc = 0; 1783 uint32_t lba_count; 1784 uint32_t sector_size = 512; 1785 uint32_t md_size = 128; 1786 char *metadata = NULL; 1787 struct nvme_request_sgl_ctx sgl_ctx = {}; 1788 struct iovec iov = {}; 1789 1790 metadata = malloc(md_size * 384); 1791 SPDK_CU_ASSERT_FATAL(metadata != NULL); 1792 1793 /* 1794 * 512 byte data + 128 byte metadata 1795 * Separate metadata buffer 1796 * Max data transfer size 256 KB 1797 * Max zone append size 128 KB 1798 * 1799 * 256 blocks * 512 bytes per block = 128 KB I/O 1800 * 128 KB I/O <= max zone append size. Test should pass. 1801 */ 1802 lba_count = 256; 1803 iov.iov_base = (void *)(uintptr_t)0x10000000; 1804 iov.iov_len = sector_size * lba_count; 1805 sgl_ctx.iovs = &iov; 1806 sgl_ctx.iovcnt = 1; 1807 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, 256 * 1024, 0, false); 1808 ctrlr.max_zone_append_size = 128 * 1024; 1809 ctrlr.flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED; 1810 ns.csi = SPDK_NVME_CSI_ZNS; 1811 rc = nvme_ns_cmd_zone_appendv_with_md(&ns, &qpair, 0x0, lba_count, NULL, &sgl_ctx, 0, 1812 nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0); 1813 CU_ASSERT(rc == 0); 1814 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1815 CU_ASSERT(g_request->num_children == 0); 1816 1817 CU_ASSERT(g_request->payload.md == metadata); 1818 CU_ASSERT(g_request->md_size == lba_count * md_size); 1819 CU_ASSERT(g_request->payload_size == lba_count * sector_size); 1820 1821 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_ZONE_APPEND); 1822 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL); 1823 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl); 1824 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge); 1825 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sgl_ctx); 1826 CU_ASSERT(g_request->cmd.nsid == ns.id); 1827 1828 nvme_free_request(g_request); 1829 cleanup_after_test(&qpair); 1830 1831 /* 1832 * 512 byte data + 128 byte metadata 1833 * Separate metadata buffer 1834 * Max data transfer size 256 KB 1835 * Max zone append size 128 KB 1836 * 1837 * 512 blocks * 512 bytes per block = 256 KB I/O 1838 * 256 KB I/O > max zone append size. Test should fail. 1839 */ 1840 lba_count = 512; 1841 iov.iov_len = lba_count * sector_size; 1842 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, 256 * 1024, 0, false); 1843 ctrlr.max_zone_append_size = 128 * 1024; 1844 ctrlr.flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED; 1845 ns.csi = SPDK_NVME_CSI_ZNS; 1846 1847 rc = nvme_ns_cmd_zone_appendv_with_md(&ns, &qpair, 0x0, lba_count, NULL, &sgl_ctx, 0, 1848 nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0); 1849 CU_ASSERT(rc == -EINVAL); 1850 SPDK_CU_ASSERT_FATAL(g_request == NULL); 1851 1852 cleanup_after_test(&qpair); 1853 1854 free(metadata); 1855 } 1856 1857 static void 1858 test_nvme_ns_cmd_read_with_md(void) 1859 { 1860 struct spdk_nvme_ns ns; 1861 struct spdk_nvme_ctrlr ctrlr; 1862 struct spdk_nvme_qpair qpair; 1863 int rc = 0; 1864 char *buffer = NULL; 1865 char *metadata = NULL; 1866 uint32_t block_size, md_size; 1867 1868 block_size = 512; 1869 md_size = 128; 1870 1871 buffer = malloc(block_size * 256); 1872 SPDK_CU_ASSERT_FATAL(buffer != NULL); 1873 metadata = malloc(md_size * 256); 1874 SPDK_CU_ASSERT_FATAL(metadata != NULL); 1875 1876 /* 1877 * 512 byte data + 128 byte metadata 1878 * Separate metadata buffer 1879 * Max data transfer size 128 KB 1880 * No stripe size 1881 * 1882 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required) 1883 */ 1884 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false); 1885 1886 rc = spdk_nvme_ns_cmd_read_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0, 1887 0); 1888 1889 CU_ASSERT(rc == 0); 1890 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1891 CU_ASSERT(g_request->num_children == 0); 1892 1893 CU_ASSERT(g_request->payload.md == metadata); 1894 CU_ASSERT(g_request->md_size == 256 * md_size); 1895 CU_ASSERT(g_request->payload_size == 256 * 512); 1896 1897 nvme_free_request(g_request); 1898 cleanup_after_test(&qpair); 1899 free(buffer); 1900 free(metadata); 1901 } 1902 1903 static void 1904 test_nvme_ns_cmd_compare_with_md(void) 1905 { 1906 struct spdk_nvme_ns ns; 1907 struct spdk_nvme_ctrlr ctrlr; 1908 struct spdk_nvme_qpair qpair; 1909 int rc = 0; 1910 char *buffer = NULL; 1911 char *metadata = NULL; 1912 uint32_t block_size, md_size; 1913 struct nvme_request *child0, *child1; 1914 1915 block_size = 512; 1916 md_size = 128; 1917 1918 buffer = malloc((block_size + md_size) * 384); 1919 SPDK_CU_ASSERT_FATAL(buffer != NULL); 1920 metadata = malloc(md_size * 384); 1921 SPDK_CU_ASSERT_FATAL(metadata != NULL); 1922 1923 /* 1924 * 512 byte data + 128 byte metadata 1925 * Separate metadata buffer 1926 * Max data transfer size 128 KB 1927 * No stripe size 1928 * 1929 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required) 1930 */ 1931 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false); 1932 1933 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, 1934 NULL, NULL, 0, 0, 0); 1935 1936 CU_ASSERT(rc == 0); 1937 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1938 CU_ASSERT(g_request->num_children == 0); 1939 1940 CU_ASSERT(g_request->payload.md == metadata); 1941 CU_ASSERT(g_request->payload_size == 256 * 512); 1942 1943 nvme_free_request(g_request); 1944 cleanup_after_test(&qpair); 1945 1946 /* 1947 * 512 byte data + 128 byte metadata 1948 * Extended LBA 1949 * Max data transfer size 128 KB 1950 * No stripe size 1951 * 1952 * 256 blocks * (512 + 128) bytes per block = two I/Os: 1953 * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB 1954 * child 1: 52 blocks 1955 */ 1956 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true); 1957 1958 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, 1959 NULL, NULL, 0, 0, 0); 1960 1961 CU_ASSERT(rc == 0); 1962 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1963 CU_ASSERT(g_request->num_children == 2); 1964 child0 = TAILQ_FIRST(&g_request->children); 1965 1966 SPDK_CU_ASSERT_FATAL(child0 != NULL); 1967 CU_ASSERT(child0->payload.md == NULL); 1968 CU_ASSERT(child0->payload_offset == 0); 1969 CU_ASSERT(child0->payload_size == 204 * (512 + 128)); 1970 child1 = TAILQ_NEXT(child0, child_tailq); 1971 1972 SPDK_CU_ASSERT_FATAL(child1 != NULL); 1973 CU_ASSERT(child1->payload.md == NULL); 1974 CU_ASSERT(child1->payload_offset == 204 * (512 + 128)); 1975 CU_ASSERT(child1->payload_size == 52 * (512 + 128)); 1976 1977 nvme_request_free_children(g_request); 1978 nvme_free_request(g_request); 1979 cleanup_after_test(&qpair); 1980 1981 /* 1982 * 512 byte data + 8 byte metadata 1983 * Extended LBA 1984 * Max data transfer size 128 KB 1985 * No stripe size 1986 * No protection information 1987 * 1988 * 256 blocks * (512 + 8) bytes per block = two I/Os: 1989 * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB 1990 * child 1: 4 blocks 1991 */ 1992 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true); 1993 1994 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, 1995 NULL, NULL, 0, 0, 0); 1996 1997 CU_ASSERT(rc == 0); 1998 SPDK_CU_ASSERT_FATAL(g_request != NULL); 1999 CU_ASSERT(g_request->num_children == 2); 2000 child0 = TAILQ_FIRST(&g_request->children); 2001 2002 SPDK_CU_ASSERT_FATAL(child0 != NULL); 2003 CU_ASSERT(child0->payload.md == NULL); 2004 CU_ASSERT(child0->payload_offset == 0); 2005 CU_ASSERT(child0->payload_size == 252 * (512 + 8)); 2006 child1 = TAILQ_NEXT(child0, child_tailq); 2007 2008 SPDK_CU_ASSERT_FATAL(child1 != NULL); 2009 CU_ASSERT(child1->payload.md == NULL); 2010 CU_ASSERT(child1->payload_offset == 252 * (512 + 8)); 2011 CU_ASSERT(child1->payload_size == 4 * (512 + 8)); 2012 2013 nvme_request_free_children(g_request); 2014 nvme_free_request(g_request); 2015 cleanup_after_test(&qpair); 2016 2017 /* 2018 * 512 byte data + 8 byte metadata 2019 * Extended LBA 2020 * Max data transfer size 128 KB 2021 * No stripe size 2022 * Protection information enabled + PRACT 2023 * 2024 * Special case for 8-byte metadata + PI + PRACT: no metadata transferred 2025 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required) 2026 */ 2027 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true); 2028 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 2029 2030 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, 2031 NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0); 2032 2033 CU_ASSERT(rc == 0); 2034 SPDK_CU_ASSERT_FATAL(g_request != NULL); 2035 CU_ASSERT(g_request->num_children == 0); 2036 2037 CU_ASSERT(g_request->payload.md == NULL); 2038 CU_ASSERT(g_request->payload_offset == 0); 2039 CU_ASSERT(g_request->payload_size == 256 * 512); /* NOTE: does not include metadata! */ 2040 2041 nvme_request_free_children(g_request); 2042 nvme_free_request(g_request); 2043 cleanup_after_test(&qpair); 2044 2045 /* 2046 * 512 byte data + 8 byte metadata 2047 * Separate metadata buffer 2048 * Max data transfer size 128 KB 2049 * No stripe size 2050 * Protection information enabled + PRACT 2051 */ 2052 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false); 2053 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 2054 2055 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, 2056 NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0); 2057 2058 CU_ASSERT(rc == 0); 2059 SPDK_CU_ASSERT_FATAL(g_request != NULL); 2060 CU_ASSERT(g_request->num_children == 0); 2061 2062 CU_ASSERT(g_request->payload.md == metadata); 2063 CU_ASSERT(g_request->payload_size == 256 * 512); 2064 2065 nvme_free_request(g_request); 2066 cleanup_after_test(&qpair); 2067 2068 /* 2069 * 512 byte data + 8 byte metadata 2070 * Separate metadata buffer 2071 * Max data transfer size 128 KB 2072 * No stripe size 2073 * Protection information enabled + PRACT 2074 * 2075 * 384 blocks * 512 bytes = two I/Os: 2076 * child 0: 256 blocks 2077 * child 1: 128 blocks 2078 */ 2079 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false); 2080 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED; 2081 2082 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384, 2083 NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0); 2084 2085 CU_ASSERT(rc == 0); 2086 SPDK_CU_ASSERT_FATAL(g_request != NULL); 2087 CU_ASSERT(g_request->num_children == 2); 2088 child0 = TAILQ_FIRST(&g_request->children); 2089 2090 SPDK_CU_ASSERT_FATAL(child0 != NULL); 2091 CU_ASSERT(child0->payload_offset == 0); 2092 CU_ASSERT(child0->payload_size == 256 * 512); 2093 CU_ASSERT(child0->md_offset == 0); 2094 child1 = TAILQ_NEXT(child0, child_tailq); 2095 2096 SPDK_CU_ASSERT_FATAL(child1 != NULL); 2097 CU_ASSERT(child1->payload_offset == 256 * 512); 2098 CU_ASSERT(child1->payload_size == 128 * 512); 2099 CU_ASSERT(child1->md_offset == 256 * 8); 2100 2101 nvme_request_free_children(g_request); 2102 nvme_free_request(g_request); 2103 cleanup_after_test(&qpair); 2104 2105 free(buffer); 2106 free(metadata); 2107 } 2108 2109 static void 2110 test_nvme_ns_cmd_setup_request(void) 2111 { 2112 struct spdk_nvme_ns ns = {}; 2113 struct nvme_request req = {}; 2114 2115 ns.id = 1; 2116 ns.pi_type = SPDK_NVME_FMT_NVM_PROTECTION_TYPE1; 2117 ns.flags = SPDK_NVME_NS_DPS_PI_SUPPORTED; 2118 2119 _nvme_ns_cmd_setup_request(&ns, &req, SPDK_NVME_OPC_READ, 2120 1024, 256, SPDK_NVME_IO_FLAGS_PRACT, 1, 1, 0); 2121 CU_ASSERT(req.cmd.cdw10 == 1024); 2122 CU_ASSERT(req.cmd.opc == SPDK_NVME_OPC_READ); 2123 CU_ASSERT(req.cmd.nsid == 1); 2124 CU_ASSERT(req.cmd.cdw14 == 1024); 2125 CU_ASSERT(req.cmd.fuse == 0); 2126 CU_ASSERT(req.cmd.cdw12 == (255 | SPDK_NVME_IO_FLAGS_PRACT)); 2127 CU_ASSERT(req.cmd.cdw15 == (1 << 16 | 1)); 2128 } 2129 2130 static void 2131 test_spdk_nvme_ns_cmd_readv_with_md(void) 2132 { 2133 struct spdk_nvme_ns ns; 2134 struct spdk_nvme_ctrlr ctrlr; 2135 struct spdk_nvme_qpair qpair; 2136 int rc = 0; 2137 char *metadata = NULL; 2138 uint32_t lba_count = 256; 2139 uint32_t sector_size = 512; 2140 uint32_t md_size = 128; 2141 struct nvme_request_sgl_ctx sgl_ctx = {}; 2142 struct iovec iov = {}; 2143 2144 iov.iov_base = (void *)(uintptr_t)0x10000000; 2145 iov.iov_len = sector_size * lba_count; 2146 sgl_ctx.iovs = &iov; 2147 sgl_ctx.iovcnt = 1; 2148 2149 metadata = (void *)0xDEADBEEF; 2150 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 2151 md_size, 128 * 1024, 0, false); 2152 2153 rc = spdk_nvme_ns_cmd_readv_with_md(&ns, &qpair, 0x1000, lba_count, NULL, 2154 &sgl_ctx, 0, nvme_request_reset_sgl, 2155 nvme_request_next_sge, metadata, 0, 0); 2156 CU_ASSERT(rc == 0); 2157 SPDK_CU_ASSERT_FATAL(g_request != NULL); 2158 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ); 2159 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL); 2160 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl); 2161 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge); 2162 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sgl_ctx); 2163 CU_ASSERT(g_request->payload.md == (void *)0xDEADBEEF); 2164 CU_ASSERT(g_request->cmd.nsid == ns.id); 2165 CU_ASSERT(g_request->payload_size == 256 * 512); 2166 CU_ASSERT(g_request->qpair == &qpair); 2167 CU_ASSERT(g_request->md_offset == 0); 2168 CU_ASSERT(g_request->payload_offset == 0); 2169 2170 rc = spdk_nvme_ns_cmd_readv_with_md(&ns, &qpair, 0x1000, lba_count, NULL, 2171 NULL, 0, nvme_request_reset_sgl, NULL, 2172 metadata, 0, 0); 2173 CU_ASSERT(rc == -EINVAL); 2174 2175 nvme_free_request(g_request); 2176 cleanup_after_test(&qpair); 2177 } 2178 2179 static void 2180 test_spdk_nvme_ns_cmd_writev_ext(void) 2181 { 2182 struct spdk_nvme_ns ns; 2183 struct spdk_nvme_ctrlr ctrlr; 2184 struct spdk_nvme_qpair qpair; 2185 struct spdk_nvme_ns_cmd_ext_io_opts ext_opts = { 2186 .memory_domain = (struct spdk_memory_domain *)0xfeedbeef, 2187 .memory_domain_ctx = (void *)0xf00df00d, 2188 .metadata = (void *)0xdeadbeef, 2189 .apptag_mask = 0xf, 2190 .apptag = 0xff 2191 }; 2192 int rc = 0; 2193 uint32_t lba_count = 256; 2194 uint32_t sector_size = 512; 2195 uint32_t md_size = 128; 2196 struct nvme_request_sgl_ctx sgl_ctx = {}; 2197 struct iovec iov = {}; 2198 2199 iov.iov_base = (void *)(uintptr_t)0x10000000; 2200 iov.iov_len = sector_size * lba_count; 2201 sgl_ctx.iovs = &iov; 2202 sgl_ctx.iovcnt = 1; 2203 2204 ext_opts.size = SPDK_SIZEOF(&ext_opts, cdw13); 2205 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 2206 md_size, 128 * 1024, 0, false); 2207 2208 /* Invalid io_flags. Expect fail */ 2209 ext_opts.io_flags = 0xFFFF000F; 2210 rc = spdk_nvme_ns_cmd_writev_ext(&ns, &qpair, 0x1000, lba_count, 2211 NULL, &sgl_ctx, nvme_request_reset_sgl, 2212 nvme_request_next_sge, &ext_opts); 2213 CU_ASSERT(rc != 0); 2214 ext_opts.io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG | SPDK_NVME_IO_FLAGS_DATA_PLACEMENT_DIRECTIVE; 2215 ext_opts.cdw13 = (1 << 16); 2216 2217 /* Empty reset_sgl cb. Expect fail */ 2218 rc = spdk_nvme_ns_cmd_writev_ext(&ns, &qpair, 0x1000, lba_count, 2219 NULL, &sgl_ctx, NULL, 2220 nvme_request_next_sge, &ext_opts); 2221 CU_ASSERT(rc != 0); 2222 2223 /* Empty next_sgl cb. Expect fail */ 2224 rc = spdk_nvme_ns_cmd_writev_ext(&ns, &qpair, 0x1000, lba_count, 2225 NULL, &sgl_ctx, nvme_request_reset_sgl, 2226 NULL, &ext_opts); 2227 CU_ASSERT(rc != 0); 2228 2229 /* Expect pass */ 2230 rc = spdk_nvme_ns_cmd_writev_ext(&ns, &qpair, 0x1000, lba_count, 2231 NULL, &sgl_ctx, nvme_request_reset_sgl, 2232 nvme_request_next_sge, &ext_opts); 2233 CU_ASSERT(rc == 0); 2234 SPDK_CU_ASSERT_FATAL(g_request != NULL); 2235 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE); 2236 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL); 2237 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl); 2238 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge); 2239 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sgl_ctx); 2240 CU_ASSERT(g_request->payload.md == (void *)0xDEADBEEF); 2241 CU_ASSERT(g_request->payload.opts == &ext_opts); 2242 CU_ASSERT(g_request->cmd.nsid == ns.id); 2243 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_CDW12_MASK) == ext_opts.io_flags); 2244 CU_ASSERT(g_request->cmd.cdw13 == ext_opts.cdw13); 2245 CU_ASSERT(g_request->cmd.cdw15 >> 16 == ext_opts.apptag_mask); 2246 CU_ASSERT((g_request->cmd.cdw15 & 0xff) == ext_opts.apptag); 2247 2248 CU_ASSERT(g_request->payload_size == 256 * 512); 2249 CU_ASSERT(g_request->qpair == &qpair); 2250 CU_ASSERT(g_request->md_offset == 0); 2251 CU_ASSERT(g_request->payload_offset == 0); 2252 2253 nvme_free_request(g_request); 2254 cleanup_after_test(&qpair); 2255 } 2256 2257 static void 2258 test_spdk_nvme_ns_cmd_readv_ext(void) 2259 { 2260 struct spdk_nvme_ns ns; 2261 struct spdk_nvme_ctrlr ctrlr; 2262 struct spdk_nvme_qpair qpair; 2263 struct spdk_nvme_ns_cmd_ext_io_opts ext_opts = { 2264 .memory_domain = (struct spdk_memory_domain *)0xfeedbeef, 2265 .memory_domain_ctx = (void *)0xf00df00d, 2266 .metadata = (void *)0xdeadbeef, 2267 .apptag_mask = 0xf, 2268 .apptag = 0xff 2269 }; 2270 int rc = 0; 2271 uint32_t lba_count = 256; 2272 uint32_t sector_size = 512; 2273 uint32_t md_size = 128; 2274 struct nvme_request_sgl_ctx sgl_ctx = {}; 2275 struct iovec iov = {}; 2276 2277 iov.iov_base = (void *)(uintptr_t)0x10000000; 2278 iov.iov_len = sector_size * lba_count; 2279 sgl_ctx.iovs = &iov; 2280 sgl_ctx.iovcnt = 1; 2281 2282 ext_opts.size = SPDK_SIZEOF(&ext_opts, cdw13); 2283 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 2284 md_size, 128 * 1024, 0, false); 2285 2286 /* Invalid io_flags. Expect fail */ 2287 ext_opts.io_flags = 0xFFFF000F; 2288 rc = spdk_nvme_ns_cmd_readv_ext(&ns, &qpair, 0x1000, lba_count, 2289 NULL, &sgl_ctx, nvme_request_reset_sgl, 2290 nvme_request_next_sge, &ext_opts); 2291 CU_ASSERT(rc != 0); 2292 ext_opts.io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 2293 2294 /* Empty reset_sgl cb. Expect fail */ 2295 rc = spdk_nvme_ns_cmd_readv_ext(&ns, &qpair, 0x1000, lba_count, 2296 NULL, &sgl_ctx, NULL, 2297 nvme_request_next_sge, &ext_opts); 2298 CU_ASSERT(rc != 0); 2299 2300 /* Empty next_sgl cb. Expect fail */ 2301 rc = spdk_nvme_ns_cmd_readv_ext(&ns, &qpair, 0x1000, lba_count, 2302 NULL, &sgl_ctx, nvme_request_reset_sgl, 2303 NULL, &ext_opts); 2304 CU_ASSERT(rc != 0); 2305 2306 /* Expect pass */ 2307 rc = spdk_nvme_ns_cmd_readv_ext(&ns, &qpair, 0x1000, lba_count, 2308 NULL, &sgl_ctx, nvme_request_reset_sgl, 2309 nvme_request_next_sge, &ext_opts); 2310 CU_ASSERT(rc == 0); 2311 SPDK_CU_ASSERT_FATAL(g_request != NULL); 2312 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ); 2313 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL); 2314 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl); 2315 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge); 2316 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sgl_ctx); 2317 CU_ASSERT(g_request->payload.md == (void *)0xDEADBEEF); 2318 CU_ASSERT(g_request->payload.opts == &ext_opts); 2319 CU_ASSERT(g_request->cmd.nsid == ns.id); 2320 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_CDW12_MASK) == ext_opts.io_flags); 2321 CU_ASSERT(g_request->cmd.cdw15 >> 16 == ext_opts.apptag_mask); 2322 CU_ASSERT((g_request->cmd.cdw15 & 0xff) == ext_opts.apptag); 2323 2324 CU_ASSERT(g_request->payload_size == 256 * 512); 2325 CU_ASSERT(g_request->qpair == &qpair); 2326 CU_ASSERT(g_request->md_offset == 0); 2327 CU_ASSERT(g_request->payload_offset == 0); 2328 2329 nvme_free_request(g_request); 2330 cleanup_after_test(&qpair); 2331 } 2332 2333 static void 2334 test_nvme_ns_cmd_verify(void) 2335 { 2336 struct spdk_nvme_ns ns; 2337 struct spdk_nvme_ctrlr ctrlr; 2338 struct spdk_nvme_qpair qpair; 2339 uint64_t cmd_lba; 2340 uint32_t cmd_lba_count; 2341 int rc; 2342 2343 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 2344 2345 rc = spdk_nvme_ns_cmd_verify(&ns, &qpair, 0, 2, NULL, NULL, 0); 2346 CU_ASSERT(rc == 0); 2347 SPDK_CU_ASSERT_FATAL(g_request != NULL); 2348 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_VERIFY); 2349 CU_ASSERT(g_request->cmd.nsid == ns.id); 2350 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count); 2351 CU_ASSERT_EQUAL(cmd_lba, 0); 2352 CU_ASSERT_EQUAL(cmd_lba_count, 2); 2353 2354 nvme_free_request(g_request); 2355 cleanup_after_test(&qpair); 2356 } 2357 2358 static void 2359 test_nvme_ns_cmd_io_mgmt_send(void) 2360 { 2361 struct spdk_nvme_ns ns; 2362 struct spdk_nvme_ctrlr ctrlr; 2363 struct spdk_nvme_qpair qpair; 2364 spdk_nvme_cmd_cb cb_fn = NULL; 2365 void *cb_arg = NULL; 2366 uint16_t list[UT_SIZE_IOMS]; 2367 uint16_t i; 2368 uint32_t tmp_cdw10; 2369 int rc = 0; 2370 2371 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 2372 2373 for (i = 0; i < UT_SIZE_IOMS; i++) { 2374 list[i] = i; 2375 } 2376 2377 /* 2378 * Submit an I/O management send command with a list of 128 placement 2379 * identifiers. The management operation specific field is number of 2380 * placement identifiers which is 0 based value. 2381 */ 2382 rc = spdk_nvme_ns_cmd_io_mgmt_send(&ns, &qpair, list, UT_SIZE_IOMS * sizeof(uint16_t), 2383 SPDK_NVME_FDP_IO_MGMT_SEND_RUHU, UT_SIZE_IOMS - 1, cb_fn, cb_arg); 2384 CU_ASSERT(rc == 0); 2385 SPDK_CU_ASSERT_FATAL(g_request != NULL); 2386 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_IO_MANAGEMENT_SEND); 2387 CU_ASSERT(g_request->cmd.nsid == ns.id); 2388 tmp_cdw10 = SPDK_NVME_FDP_IO_MGMT_SEND_RUHU; 2389 tmp_cdw10 |= (UT_SIZE_IOMS - 1) << 16; 2390 2391 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10); 2392 spdk_free(g_request->payload.contig_or_cb_arg); 2393 nvme_free_request(g_request); 2394 cleanup_after_test(&qpair); 2395 } 2396 2397 static void 2398 test_nvme_ns_cmd_io_mgmt_recv(void) 2399 { 2400 struct spdk_nvme_ns ns; 2401 struct spdk_nvme_ctrlr ctrlr; 2402 struct spdk_nvme_qpair qpair; 2403 struct spdk_nvme_fdp_ruhs *payload;; 2404 spdk_nvme_cmd_cb cb_fn = NULL; 2405 void *cb_arg = NULL; 2406 int rc = 0; 2407 uint16_t mos = 2; 2408 uint32_t tmp_cdw10; 2409 uint32_t size = sizeof(struct spdk_nvme_fdp_ruhs); 2410 2411 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false); 2412 2413 payload = calloc(1, size); 2414 SPDK_CU_ASSERT_FATAL(payload != NULL); 2415 2416 rc = spdk_nvme_ns_cmd_io_mgmt_recv(&ns, &qpair, payload, size, 2417 SPDK_NVME_FDP_IO_MGMT_RECV_RUHS, mos, cb_fn, cb_arg); 2418 CU_ASSERT(rc == 0); 2419 SPDK_CU_ASSERT_FATAL(g_request != NULL); 2420 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_IO_MANAGEMENT_RECEIVE); 2421 CU_ASSERT(g_request->cmd.nsid == ns.id); 2422 2423 tmp_cdw10 = SPDK_NVME_FDP_IO_MGMT_RECV_RUHS; 2424 tmp_cdw10 |= (uint32_t)mos << 16; 2425 2426 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10); 2427 /* number of dwords which is 0-based */ 2428 CU_ASSERT(g_request->cmd.cdw11 == (size >> 2) - 1); 2429 2430 spdk_free(g_request->payload.contig_or_cb_arg); 2431 nvme_free_request(g_request); 2432 free(payload); 2433 2434 /* len not multiple of 4 */ 2435 rc = spdk_nvme_ns_cmd_io_mgmt_recv(&ns, &qpair, NULL, 6, 2436 SPDK_NVME_FDP_IO_MGMT_RECV_RUHS, mos, cb_fn, cb_arg); 2437 CU_ASSERT(rc != 0); 2438 cleanup_after_test(&qpair); 2439 } 2440 2441 int 2442 main(int argc, char **argv) 2443 { 2444 CU_pSuite suite = NULL; 2445 unsigned int num_failures; 2446 2447 CU_initialize_registry(); 2448 2449 suite = CU_add_suite("nvme_ns_cmd", NULL, NULL); 2450 2451 CU_ADD_TEST(suite, split_test); 2452 CU_ADD_TEST(suite, split_test2); 2453 CU_ADD_TEST(suite, split_test3); 2454 CU_ADD_TEST(suite, split_test4); 2455 CU_ADD_TEST(suite, test_nvme_ns_cmd_flush); 2456 CU_ADD_TEST(suite, test_nvme_ns_cmd_dataset_management); 2457 CU_ADD_TEST(suite, test_nvme_ns_cmd_copy); 2458 CU_ADD_TEST(suite, test_io_flags); 2459 CU_ADD_TEST(suite, test_nvme_ns_cmd_write_zeroes); 2460 CU_ADD_TEST(suite, test_nvme_ns_cmd_write_uncorrectable); 2461 CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_register); 2462 CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_release); 2463 CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_acquire); 2464 CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_report); 2465 CU_ADD_TEST(suite, test_cmd_child_request); 2466 CU_ADD_TEST(suite, test_nvme_ns_cmd_readv); 2467 CU_ADD_TEST(suite, test_nvme_ns_cmd_readv_sgl); 2468 CU_ADD_TEST(suite, test_nvme_ns_cmd_read_with_md); 2469 CU_ADD_TEST(suite, test_nvme_ns_cmd_writev); 2470 CU_ADD_TEST(suite, test_nvme_ns_cmd_write_with_md); 2471 CU_ADD_TEST(suite, test_nvme_ns_cmd_zone_append_with_md); 2472 CU_ADD_TEST(suite, test_nvme_ns_cmd_zone_appendv_with_md); 2473 CU_ADD_TEST(suite, test_nvme_ns_cmd_comparev); 2474 CU_ADD_TEST(suite, test_nvme_ns_cmd_compare_and_write); 2475 CU_ADD_TEST(suite, test_nvme_ns_cmd_compare_with_md); 2476 CU_ADD_TEST(suite, test_nvme_ns_cmd_comparev_with_md); 2477 CU_ADD_TEST(suite, test_nvme_ns_cmd_setup_request); 2478 CU_ADD_TEST(suite, test_spdk_nvme_ns_cmd_readv_with_md); 2479 CU_ADD_TEST(suite, test_spdk_nvme_ns_cmd_writev_ext); 2480 CU_ADD_TEST(suite, test_spdk_nvme_ns_cmd_readv_ext); 2481 CU_ADD_TEST(suite, test_nvme_ns_cmd_verify); 2482 CU_ADD_TEST(suite, test_nvme_ns_cmd_io_mgmt_send); 2483 CU_ADD_TEST(suite, test_nvme_ns_cmd_io_mgmt_recv); 2484 2485 g_spdk_nvme_driver = &_g_nvme_driver; 2486 2487 num_failures = spdk_ut_run_tests(argc, argv, NULL); 2488 CU_cleanup_registry(); 2489 return num_failures; 2490 } 2491