1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 */ 5 6 /* 7 * NVMe end-to-end data protection test 8 */ 9 10 #include "spdk/stdinc.h" 11 12 #include "spdk/nvme.h" 13 #include "spdk/env.h" 14 #include "spdk/crc16.h" 15 #include "spdk/endian.h" 16 #include "spdk/memory.h" 17 18 #define MAX_DEVS 64 19 20 #define DATA_PATTERN 0x5A 21 22 struct dev { 23 struct spdk_nvme_ctrlr *ctrlr; 24 char name[SPDK_NVMF_TRADDR_MAX_LEN + 1]; 25 }; 26 27 static struct dev devs[MAX_DEVS]; 28 static int num_devs = 0; 29 30 #define foreach_dev(iter) \ 31 for (iter = devs; iter - devs < num_devs; iter++) 32 33 static int io_complete_flag = 0; 34 35 struct io_request { 36 void *contig; 37 void *metadata; 38 bool use_extended_lba; 39 bool use_sgl; 40 uint32_t sgl_offset; 41 uint32_t buf_size; 42 uint64_t lba; 43 uint32_t lba_count; 44 uint16_t apptag_mask; 45 uint16_t apptag; 46 }; 47 48 static void 49 io_complete(void *ctx, const struct spdk_nvme_cpl *cpl) 50 { 51 if (spdk_nvme_cpl_is_error(cpl)) { 52 io_complete_flag = 2; 53 } else { 54 io_complete_flag = 1; 55 } 56 } 57 58 static void 59 ns_data_buffer_reset(struct spdk_nvme_ns *ns, struct io_request *req, uint8_t data_pattern) 60 { 61 uint32_t md_size, sector_size; 62 uint32_t i, offset = 0; 63 uint8_t *buf; 64 65 sector_size = spdk_nvme_ns_get_sector_size(ns); 66 md_size = spdk_nvme_ns_get_md_size(ns); 67 68 for (i = 0; i < req->lba_count; i++) { 69 if (req->use_extended_lba) { 70 offset = (sector_size + md_size) * i; 71 } else { 72 offset = sector_size * i; 73 } 74 75 buf = (uint8_t *)req->contig + offset; 76 memset(buf, data_pattern, sector_size); 77 } 78 } 79 80 static void 81 nvme_req_reset_sgl(void *cb_arg, uint32_t sgl_offset) 82 { 83 struct io_request *req = (struct io_request *)cb_arg; 84 85 req->sgl_offset = sgl_offset; 86 return; 87 } 88 89 static int 90 nvme_req_next_sge(void *cb_arg, void **address, uint32_t *length) 91 { 92 struct io_request *req = (struct io_request *)cb_arg; 93 void *payload; 94 95 payload = req->contig + req->sgl_offset; 96 *address = payload; 97 98 *length = req->buf_size - req->sgl_offset; 99 100 return 0; 101 } 102 103 /* CRC-16 Guard checked for extended lba format */ 104 static uint32_t 105 dp_guard_check_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req, 106 uint32_t *io_flags) 107 { 108 struct spdk_nvme_protection_info *pi; 109 uint32_t md_size, sector_size, chksum_size; 110 111 req->lba_count = 2; 112 113 /* extended LBA only for the test case */ 114 if (!(spdk_nvme_ns_supports_extended_lba(ns))) { 115 return 0; 116 } 117 118 sector_size = spdk_nvme_ns_get_sector_size(ns); 119 md_size = spdk_nvme_ns_get_md_size(ns); 120 chksum_size = sector_size + md_size - 8; 121 req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL, 122 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 123 assert(req->contig); 124 125 req->lba = 0; 126 req->use_extended_lba = true; 127 req->use_sgl = true; 128 req->buf_size = (sector_size + md_size) * req->lba_count; 129 req->metadata = NULL; 130 ns_data_buffer_reset(ns, req, DATA_PATTERN); 131 pi = (struct spdk_nvme_protection_info *)(req->contig + chksum_size); 132 /* big-endian for guard */ 133 to_be16(&pi->guard, spdk_crc16_t10dif(0, req->contig, chksum_size)); 134 135 pi = (struct spdk_nvme_protection_info *)(req->contig + (sector_size + md_size) * 2 - 8); 136 to_be16(&pi->guard, spdk_crc16_t10dif(0, req->contig + sector_size + md_size, chksum_size)); 137 138 *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD; 139 140 return req->lba_count; 141 } 142 143 /* 144 * No protection information with PRACT setting to 1, 145 * both extended LBA format and separate metadata can 146 * run the test case. 147 */ 148 static uint32_t 149 dp_with_pract_test(struct spdk_nvme_ns *ns, struct io_request *req, 150 uint32_t *io_flags) 151 { 152 uint32_t md_size, sector_size, data_len; 153 154 req->lba_count = 8; 155 req->use_extended_lba = spdk_nvme_ns_supports_extended_lba(ns) ? true : false; 156 157 sector_size = spdk_nvme_ns_get_sector_size(ns); 158 md_size = spdk_nvme_ns_get_md_size(ns); 159 if (md_size == 8) { 160 /* No additional metadata buffer provided */ 161 data_len = sector_size * req->lba_count; 162 req->use_extended_lba = false; 163 } else { 164 data_len = (sector_size + md_size) * req->lba_count; 165 } 166 req->contig = spdk_zmalloc(data_len, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, 167 SPDK_MALLOC_DMA); 168 assert(req->contig); 169 170 req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, 171 SPDK_MALLOC_DMA); 172 assert(req->metadata); 173 174 switch (spdk_nvme_ns_get_pi_type(ns)) { 175 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3: 176 *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRACT; 177 break; 178 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1: 179 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2: 180 *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRCHK_REFTAG | 181 SPDK_NVME_IO_FLAGS_PRACT; 182 break; 183 default: 184 *io_flags = 0; 185 break; 186 } 187 188 req->lba = 0; 189 190 return req->lba_count; 191 } 192 193 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */ 194 static uint32_t 195 dp_without_pract_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req, 196 uint32_t *io_flags) 197 { 198 struct spdk_nvme_protection_info *pi; 199 uint32_t md_size, sector_size; 200 201 req->lba_count = 2; 202 203 switch (spdk_nvme_ns_get_pi_type(ns)) { 204 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3: 205 return 0; 206 default: 207 break; 208 } 209 210 /* extended LBA only for the test case */ 211 if (!(spdk_nvme_ns_supports_extended_lba(ns))) { 212 return 0; 213 } 214 215 sector_size = spdk_nvme_ns_get_sector_size(ns); 216 md_size = spdk_nvme_ns_get_md_size(ns); 217 req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL, 218 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 219 assert(req->contig); 220 221 req->lba = 0; 222 req->use_extended_lba = true; 223 req->metadata = NULL; 224 pi = (struct spdk_nvme_protection_info *)(req->contig + sector_size + md_size - 8); 225 /* big-endian for reference tag */ 226 to_be32(&pi->ref_tag, (uint32_t)req->lba); 227 228 pi = (struct spdk_nvme_protection_info *)(req->contig + (sector_size + md_size) * 2 - 8); 229 /* is incremented for each subsequent logical block */ 230 to_be32(&pi->ref_tag, (uint32_t)(req->lba + 1)); 231 232 *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 233 234 return req->lba_count; 235 } 236 237 /* LBA + Metadata without data protection bits setting */ 238 static uint32_t 239 dp_without_flags_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req, 240 uint32_t *io_flags) 241 { 242 uint32_t md_size, sector_size; 243 244 req->lba_count = 16; 245 246 /* extended LBA only for the test case */ 247 if (!(spdk_nvme_ns_supports_extended_lba(ns))) { 248 return 0; 249 } 250 251 sector_size = spdk_nvme_ns_get_sector_size(ns); 252 md_size = spdk_nvme_ns_get_md_size(ns); 253 req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL, 254 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 255 assert(req->contig); 256 257 req->lba = 0; 258 req->use_extended_lba = true; 259 req->metadata = NULL; 260 *io_flags = 0; 261 262 return req->lba_count; 263 } 264 265 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */ 266 static uint32_t 267 dp_without_pract_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req, 268 uint32_t *io_flags) 269 { 270 struct spdk_nvme_protection_info *pi; 271 uint32_t md_size, sector_size; 272 273 req->lba_count = 2; 274 275 switch (spdk_nvme_ns_get_pi_type(ns)) { 276 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3: 277 return 0; 278 default: 279 break; 280 } 281 282 /* separate metadata payload for the test case */ 283 if (spdk_nvme_ns_supports_extended_lba(ns)) { 284 return 0; 285 } 286 287 sector_size = spdk_nvme_ns_get_sector_size(ns); 288 md_size = spdk_nvme_ns_get_md_size(ns); 289 req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, 290 SPDK_MALLOC_DMA); 291 assert(req->contig); 292 293 req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, 294 SPDK_MALLOC_DMA); 295 assert(req->metadata); 296 297 req->lba = 0; 298 req->use_extended_lba = false; 299 300 /* last 8 bytes if the metadata size bigger than 8 */ 301 pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8); 302 /* big-endian for reference tag */ 303 to_be32(&pi->ref_tag, (uint32_t)req->lba); 304 305 pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size * 2 - 8); 306 /* is incremented for each subsequent logical block */ 307 to_be32(&pi->ref_tag, (uint32_t)(req->lba + 1)); 308 309 *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; 310 311 return req->lba_count; 312 } 313 314 /* Application Tag checked with PRACT setting to 0 */ 315 static uint32_t 316 dp_without_pract_separate_meta_apptag_test(struct spdk_nvme_ns *ns, 317 struct io_request *req, 318 uint32_t *io_flags) 319 { 320 struct spdk_nvme_protection_info *pi; 321 uint32_t md_size, sector_size; 322 323 req->lba_count = 1; 324 325 /* separate metadata payload for the test case */ 326 if (spdk_nvme_ns_supports_extended_lba(ns)) { 327 return 0; 328 } 329 330 sector_size = spdk_nvme_ns_get_sector_size(ns); 331 md_size = spdk_nvme_ns_get_md_size(ns); 332 req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, 333 SPDK_MALLOC_DMA); 334 assert(req->contig); 335 336 req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, 337 SPDK_MALLOC_DMA); 338 assert(req->metadata); 339 340 req->lba = 0; 341 req->use_extended_lba = false; 342 req->apptag_mask = 0xFFFF; 343 req->apptag = req->lba_count; 344 345 /* last 8 bytes if the metadata size bigger than 8 */ 346 pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8); 347 to_be16(&pi->app_tag, req->lba_count); 348 349 *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_APPTAG; 350 351 return req->lba_count; 352 } 353 354 /* 355 * LBA + Metadata without data protection bits setting, 356 * separate metadata payload for the test case. 357 */ 358 static uint32_t 359 dp_without_flags_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req, 360 uint32_t *io_flags) 361 { 362 uint32_t md_size, sector_size; 363 364 req->lba_count = 16; 365 366 /* separate metadata payload for the test case */ 367 if (spdk_nvme_ns_supports_extended_lba(ns)) { 368 return 0; 369 } 370 371 sector_size = spdk_nvme_ns_get_sector_size(ns); 372 md_size = spdk_nvme_ns_get_md_size(ns); 373 req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, 374 SPDK_MALLOC_DMA); 375 assert(req->contig); 376 377 req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, 378 SPDK_MALLOC_DMA); 379 assert(req->metadata); 380 381 req->lba = 0; 382 req->use_extended_lba = false; 383 *io_flags = 0; 384 385 return req->lba_count; 386 } 387 388 typedef uint32_t (*nvme_build_io_req_fn_t)(struct spdk_nvme_ns *ns, struct io_request *req, 389 uint32_t *lba_count); 390 391 static void 392 free_req(struct io_request *req) 393 { 394 if (req == NULL) { 395 return; 396 } 397 398 if (req->contig) { 399 spdk_free(req->contig); 400 } 401 402 if (req->metadata) { 403 spdk_free(req->metadata); 404 } 405 406 spdk_free(req); 407 } 408 409 static int 410 ns_data_buffer_compare(struct spdk_nvme_ns *ns, struct io_request *req, uint8_t data_pattern) 411 { 412 uint32_t md_size, sector_size; 413 uint32_t i, j, offset = 0; 414 uint8_t *buf; 415 416 sector_size = spdk_nvme_ns_get_sector_size(ns); 417 md_size = spdk_nvme_ns_get_md_size(ns); 418 419 for (i = 0; i < req->lba_count; i++) { 420 if (req->use_extended_lba) { 421 offset = (sector_size + md_size) * i; 422 } else { 423 offset = sector_size * i; 424 } 425 426 buf = (uint8_t *)req->contig + offset; 427 for (j = 0; j < sector_size; j++) { 428 if (buf[j] != data_pattern) { 429 return -1; 430 } 431 } 432 } 433 434 return 0; 435 } 436 437 static int 438 write_read_e2e_dp_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name) 439 { 440 int rc = 0; 441 uint32_t lba_count; 442 uint32_t io_flags = 0; 443 444 struct io_request *req; 445 struct spdk_nvme_ns *ns; 446 struct spdk_nvme_qpair *qpair; 447 const struct spdk_nvme_ns_data *nsdata; 448 449 ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1); 450 if (!ns) { 451 printf("Null namespace\n"); 452 return 0; 453 } 454 455 if (!(spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED)) { 456 return 0; 457 } 458 459 nsdata = spdk_nvme_ns_get_data(ns); 460 if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) { 461 fprintf(stderr, "Empty nsdata or wrong sector size\n"); 462 return -EINVAL; 463 } 464 465 req = spdk_zmalloc(sizeof(*req), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 466 assert(req); 467 468 /* IO parameters setting */ 469 lba_count = build_io_fn(ns, req, &io_flags); 470 if (!lba_count) { 471 printf("%s: %s bypass the test case\n", dev->name, test_name); 472 free_req(req); 473 return 0; 474 } 475 476 qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0); 477 if (!qpair) { 478 free_req(req); 479 return -1; 480 } 481 482 ns_data_buffer_reset(ns, req, DATA_PATTERN); 483 if (req->use_extended_lba && req->use_sgl) { 484 rc = spdk_nvme_ns_cmd_writev(ns, qpair, req->lba, lba_count, io_complete, req, io_flags, 485 nvme_req_reset_sgl, nvme_req_next_sge); 486 } else if (req->use_extended_lba) { 487 rc = spdk_nvme_ns_cmd_write(ns, qpair, req->contig, req->lba, lba_count, 488 io_complete, req, io_flags); 489 } else { 490 rc = spdk_nvme_ns_cmd_write_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count, 491 io_complete, req, io_flags, req->apptag_mask, req->apptag); 492 } 493 494 if (rc != 0) { 495 fprintf(stderr, "%s: %s write submit failed\n", dev->name, test_name); 496 spdk_nvme_ctrlr_free_io_qpair(qpair); 497 free_req(req); 498 return -1; 499 } 500 501 io_complete_flag = 0; 502 503 while (!io_complete_flag) { 504 spdk_nvme_qpair_process_completions(qpair, 1); 505 } 506 507 if (io_complete_flag != 1) { 508 fprintf(stderr, "%s: %s write exec failed\n", dev->name, test_name); 509 spdk_nvme_ctrlr_free_io_qpair(qpair); 510 free_req(req); 511 return -1; 512 } 513 514 /* reset completion flag */ 515 io_complete_flag = 0; 516 517 ns_data_buffer_reset(ns, req, 0); 518 if (req->use_extended_lba && req->use_sgl) { 519 rc = spdk_nvme_ns_cmd_readv(ns, qpair, req->lba, lba_count, io_complete, req, io_flags, 520 nvme_req_reset_sgl, nvme_req_next_sge); 521 522 } else if (req->use_extended_lba) { 523 rc = spdk_nvme_ns_cmd_read(ns, qpair, req->contig, req->lba, lba_count, 524 io_complete, req, io_flags); 525 } else { 526 rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count, 527 io_complete, req, io_flags, req->apptag_mask, req->apptag); 528 } 529 530 if (rc != 0) { 531 fprintf(stderr, "%s: %s read failed\n", dev->name, test_name); 532 spdk_nvme_ctrlr_free_io_qpair(qpair); 533 free_req(req); 534 return -1; 535 } 536 537 while (!io_complete_flag) { 538 spdk_nvme_qpair_process_completions(qpair, 1); 539 } 540 541 if (io_complete_flag != 1) { 542 fprintf(stderr, "%s: %s read failed\n", dev->name, test_name); 543 spdk_nvme_ctrlr_free_io_qpair(qpair); 544 free_req(req); 545 return -1; 546 } 547 548 rc = ns_data_buffer_compare(ns, req, DATA_PATTERN); 549 if (rc < 0) { 550 fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name); 551 spdk_nvme_ctrlr_free_io_qpair(qpair); 552 free_req(req); 553 return -1; 554 } 555 556 printf("%s: %s test passed\n", dev->name, test_name); 557 spdk_nvme_ctrlr_free_io_qpair(qpair); 558 free_req(req); 559 return 0; 560 } 561 562 static bool 563 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 564 struct spdk_nvme_ctrlr_opts *opts) 565 { 566 printf("Attaching to %s\n", trid->traddr); 567 568 return true; 569 } 570 571 static void 572 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 573 struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts) 574 { 575 struct dev *dev; 576 577 /* add to dev list */ 578 dev = &devs[num_devs++]; 579 580 dev->ctrlr = ctrlr; 581 582 snprintf(dev->name, sizeof(dev->name), "%s", 583 trid->traddr); 584 585 printf("Attached to %s\n", dev->name); 586 } 587 588 int 589 main(int argc, char **argv) 590 { 591 struct dev *iter; 592 int rc; 593 struct spdk_env_opts opts; 594 struct spdk_nvme_detach_ctx *detach_ctx = NULL; 595 596 spdk_env_opts_init(&opts); 597 opts.name = "nvme_dp"; 598 opts.core_mask = "0x1"; 599 opts.shm_id = 0; 600 if (spdk_env_init(&opts) < 0) { 601 fprintf(stderr, "Unable to initialize SPDK env\n"); 602 return 1; 603 } 604 605 printf("NVMe Write/Read with End-to-End data protection test\n"); 606 607 if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) { 608 fprintf(stderr, "nvme_probe() failed\n"); 609 exit(1); 610 } 611 612 rc = 0; 613 foreach_dev(iter) { 614 #define TEST(x) write_read_e2e_dp_tests(iter, x, #x) 615 if (TEST(dp_with_pract_test) 616 || TEST(dp_guard_check_extended_lba_test) 617 || TEST(dp_without_pract_extended_lba_test) 618 || TEST(dp_without_flags_extended_lba_test) 619 || TEST(dp_without_pract_separate_meta_test) 620 || TEST(dp_without_pract_separate_meta_apptag_test) 621 || TEST(dp_without_flags_separate_meta_test)) { 622 #undef TEST 623 rc = 1; 624 printf("%s: failed End-to-End data protection tests\n", iter->name); 625 } 626 } 627 628 printf("Cleaning up...\n"); 629 630 foreach_dev(iter) { 631 spdk_nvme_detach_async(iter->ctrlr, &detach_ctx); 632 } 633 634 if (detach_ctx) { 635 spdk_nvme_detach_poll(detach_ctx); 636 } 637 638 return rc; 639 } 640