1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/dif.h" 35 #include "spdk/crc16.h" 36 #include "spdk/crc32.h" 37 #include "spdk/endian.h" 38 #include "spdk/log.h" 39 #include "spdk/util.h" 40 41 /* Context to iterate or create a iovec array. 42 * Each sgl is either iterated or created at a time. 43 */ 44 struct _dif_sgl { 45 /* Current iovec in the iteration or creation */ 46 struct iovec *iov; 47 48 /* Remaining count of iovecs in the iteration or creation. */ 49 int iovcnt; 50 51 /* Current offset in the iovec */ 52 uint32_t iov_offset; 53 54 /* Size of the created iovec array in bytes */ 55 uint32_t total_size; 56 }; 57 58 static inline void 59 _dif_sgl_init(struct _dif_sgl *s, struct iovec *iovs, int iovcnt) 60 { 61 s->iov = iovs; 62 s->iovcnt = iovcnt; 63 s->iov_offset = 0; 64 s->total_size = 0; 65 } 66 67 static void 68 _dif_sgl_advance(struct _dif_sgl *s, uint32_t step) 69 { 70 s->iov_offset += step; 71 while (s->iovcnt != 0) { 72 if (s->iov_offset < s->iov->iov_len) { 73 break; 74 } 75 76 s->iov_offset -= s->iov->iov_len; 77 s->iov++; 78 s->iovcnt--; 79 } 80 } 81 82 static inline void 83 _dif_sgl_get_buf(struct _dif_sgl *s, void **_buf, uint32_t *_buf_len) 84 { 85 if (_buf != NULL) { 86 *_buf = s->iov->iov_base + s->iov_offset; 87 } 88 if (_buf_len != NULL) { 89 *_buf_len = s->iov->iov_len - s->iov_offset; 90 } 91 } 92 93 static inline bool 94 _dif_sgl_append(struct _dif_sgl *s, uint8_t *data, uint32_t data_len) 95 { 96 assert(s->iovcnt > 0); 97 s->iov->iov_base = data; 98 s->iov->iov_len = data_len; 99 s->total_size += data_len; 100 s->iov++; 101 s->iovcnt--; 102 103 if (s->iovcnt > 0) { 104 return true; 105 } else { 106 return false; 107 } 108 } 109 110 static inline bool 111 _dif_sgl_append_split(struct _dif_sgl *dst, struct _dif_sgl *src, uint32_t data_len) 112 { 113 uint8_t *buf; 114 uint32_t buf_len; 115 116 while (data_len != 0) { 117 _dif_sgl_get_buf(src, (void *)&buf, &buf_len); 118 buf_len = spdk_min(buf_len, data_len); 119 120 if (!_dif_sgl_append(dst, buf, buf_len)) { 121 return false; 122 } 123 124 _dif_sgl_advance(src, buf_len); 125 data_len -= buf_len; 126 } 127 128 return true; 129 } 130 131 /* This function must be used before starting iteration. */ 132 static bool 133 _dif_sgl_is_bytes_multiple(struct _dif_sgl *s, uint32_t bytes) 134 { 135 int i; 136 137 for (i = 0; i < s->iovcnt; i++) { 138 if (s->iov[i].iov_len % bytes) { 139 return false; 140 } 141 } 142 143 return true; 144 } 145 146 static bool 147 _dif_sgl_is_valid_block_aligned(struct _dif_sgl *s, uint32_t num_blocks, uint32_t block_size) 148 { 149 uint32_t count = 0; 150 int i; 151 152 for (i = 0; i < s->iovcnt; i++) { 153 if (s->iov[i].iov_len % block_size) { 154 return false; 155 } 156 count += s->iov[i].iov_len / block_size; 157 } 158 159 return count >= num_blocks; 160 } 161 162 /* This function must be used before starting iteration. */ 163 static bool 164 _dif_sgl_is_valid(struct _dif_sgl *s, uint32_t bytes) 165 { 166 uint64_t total = 0; 167 int i; 168 169 for (i = 0; i < s->iovcnt; i++) { 170 total += s->iov[i].iov_len; 171 } 172 173 return total >= bytes; 174 } 175 176 static void 177 _dif_sgl_copy(struct _dif_sgl *to, struct _dif_sgl *from) 178 { 179 memcpy(to, from, sizeof(struct _dif_sgl)); 180 } 181 182 static bool 183 _dif_type_is_valid(enum spdk_dif_type dif_type, uint32_t dif_flags) 184 { 185 switch (dif_type) { 186 case SPDK_DIF_TYPE1: 187 case SPDK_DIF_TYPE2: 188 case SPDK_DIF_DISABLE: 189 break; 190 case SPDK_DIF_TYPE3: 191 if (dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 192 SPDK_ERRLOG("Reference Tag should not be checked for Type 3\n"); 193 return false; 194 } 195 break; 196 default: 197 SPDK_ERRLOG("Unknown DIF Type: %d\n", dif_type); 198 return false; 199 } 200 201 return true; 202 } 203 204 static bool 205 _dif_is_disabled(enum spdk_dif_type dif_type) 206 { 207 if (dif_type == SPDK_DIF_DISABLE) { 208 return true; 209 } else { 210 return false; 211 } 212 } 213 214 215 static uint32_t 216 _get_guard_interval(uint32_t block_size, uint32_t md_size, bool dif_loc, bool md_interleave) 217 { 218 if (!dif_loc) { 219 /* For metadata formats with more than 8 bytes, if the DIF is 220 * contained in the last 8 bytes of metadata, then the CRC 221 * covers all metadata up to but excluding these last 8 bytes. 222 */ 223 if (md_interleave) { 224 return block_size - sizeof(struct spdk_dif); 225 } else { 226 return md_size - sizeof(struct spdk_dif); 227 } 228 } else { 229 /* For metadata formats with more than 8 bytes, if the DIF is 230 * contained in the first 8 bytes of metadata, then the CRC 231 * does not cover any metadata. 232 */ 233 if (md_interleave) { 234 return block_size - md_size; 235 } else { 236 return 0; 237 } 238 } 239 } 240 241 int 242 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size, 243 bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags, 244 uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag, 245 uint32_t data_offset, uint16_t guard_seed) 246 { 247 uint32_t data_block_size; 248 249 if (md_size < sizeof(struct spdk_dif)) { 250 SPDK_ERRLOG("Metadata size is smaller than DIF size.\n"); 251 return -EINVAL; 252 } 253 254 if (md_interleave) { 255 if (block_size < md_size) { 256 SPDK_ERRLOG("Block size is smaller than DIF size.\n"); 257 return -EINVAL; 258 } 259 data_block_size = block_size - md_size; 260 } else { 261 if (block_size == 0 || (block_size % 512) != 0) { 262 SPDK_ERRLOG("Zero block size is not allowed\n"); 263 return -EINVAL; 264 } 265 data_block_size = block_size; 266 } 267 268 if (!_dif_type_is_valid(dif_type, dif_flags)) { 269 SPDK_ERRLOG("DIF type is invalid.\n"); 270 return -EINVAL; 271 } 272 273 ctx->block_size = block_size; 274 ctx->md_size = md_size; 275 ctx->md_interleave = md_interleave; 276 ctx->guard_interval = _get_guard_interval(block_size, md_size, dif_loc, md_interleave); 277 ctx->dif_type = dif_type; 278 ctx->dif_flags = dif_flags; 279 ctx->init_ref_tag = init_ref_tag; 280 ctx->apptag_mask = apptag_mask; 281 ctx->app_tag = app_tag; 282 ctx->data_offset = data_offset; 283 ctx->ref_tag_offset = data_offset / data_block_size; 284 ctx->last_guard = guard_seed; 285 ctx->guard_seed = guard_seed; 286 ctx->remapped_init_ref_tag = 0; 287 288 return 0; 289 } 290 291 void 292 spdk_dif_ctx_set_data_offset(struct spdk_dif_ctx *ctx, uint32_t data_offset) 293 { 294 uint32_t data_block_size; 295 296 if (ctx->md_interleave) { 297 data_block_size = ctx->block_size - ctx->md_size; 298 } else { 299 data_block_size = ctx->block_size; 300 } 301 302 ctx->data_offset = data_offset; 303 ctx->ref_tag_offset = data_offset / data_block_size; 304 } 305 306 void 307 spdk_dif_ctx_set_remapped_init_ref_tag(struct spdk_dif_ctx *ctx, 308 uint32_t remapped_init_ref_tag) 309 { 310 ctx->remapped_init_ref_tag = remapped_init_ref_tag; 311 } 312 313 static void 314 _dif_generate(void *_dif, uint16_t guard, uint32_t offset_blocks, 315 const struct spdk_dif_ctx *ctx) 316 { 317 struct spdk_dif *dif = _dif; 318 uint32_t ref_tag; 319 320 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 321 to_be16(&dif->guard, guard); 322 } 323 324 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 325 to_be16(&dif->app_tag, ctx->app_tag); 326 } 327 328 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 329 /* For type 1 and 2, the reference tag is incremented for each 330 * subsequent logical block. For type 3, the reference tag 331 * remains the same as the initial reference tag. 332 */ 333 if (ctx->dif_type != SPDK_DIF_TYPE3) { 334 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 335 } else { 336 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 337 } 338 339 to_be32(&dif->ref_tag, ref_tag); 340 } 341 } 342 343 static void 344 dif_generate(struct _dif_sgl *sgl, uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 345 { 346 uint32_t offset_blocks = 0; 347 void *buf; 348 uint16_t guard = 0; 349 350 while (offset_blocks < num_blocks) { 351 _dif_sgl_get_buf(sgl, &buf, NULL); 352 353 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 354 guard = spdk_crc16_t10dif(ctx->guard_seed, buf, ctx->guard_interval); 355 } 356 357 _dif_generate(buf + ctx->guard_interval, guard, offset_blocks, ctx); 358 359 _dif_sgl_advance(sgl, ctx->block_size); 360 offset_blocks++; 361 } 362 } 363 364 static uint16_t 365 _dif_generate_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 366 uint16_t guard, uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 367 { 368 uint32_t offset_in_dif, buf_len; 369 void *buf; 370 struct spdk_dif dif = {}; 371 372 assert(offset_in_block < ctx->guard_interval); 373 assert(offset_in_block + data_len < ctx->guard_interval || 374 offset_in_block + data_len == ctx->block_size); 375 376 /* Compute CRC over split logical block data. */ 377 while (data_len != 0 && offset_in_block < ctx->guard_interval) { 378 _dif_sgl_get_buf(sgl, &buf, &buf_len); 379 buf_len = spdk_min(buf_len, data_len); 380 buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block); 381 382 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 383 guard = spdk_crc16_t10dif(guard, buf, buf_len); 384 } 385 386 _dif_sgl_advance(sgl, buf_len); 387 offset_in_block += buf_len; 388 data_len -= buf_len; 389 } 390 391 if (offset_in_block < ctx->guard_interval) { 392 return guard; 393 } 394 395 /* If a whole logical block data is parsed, generate DIF 396 * and save it to the temporary DIF area. 397 */ 398 _dif_generate(&dif, guard, offset_blocks, ctx); 399 400 /* Copy generated DIF field to the split DIF field, and then 401 * skip metadata field after DIF field (if any). 402 */ 403 while (offset_in_block < ctx->block_size) { 404 _dif_sgl_get_buf(sgl, &buf, &buf_len); 405 406 if (offset_in_block < ctx->guard_interval + sizeof(struct spdk_dif)) { 407 offset_in_dif = offset_in_block - ctx->guard_interval; 408 buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset_in_dif); 409 410 memcpy(buf, ((uint8_t *)&dif) + offset_in_dif, buf_len); 411 } else { 412 buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block); 413 } 414 415 _dif_sgl_advance(sgl, buf_len); 416 offset_in_block += buf_len; 417 } 418 419 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 420 guard = ctx->guard_seed; 421 } 422 423 return guard; 424 } 425 426 static void 427 dif_generate_split(struct _dif_sgl *sgl, uint32_t num_blocks, 428 const struct spdk_dif_ctx *ctx) 429 { 430 uint32_t offset_blocks; 431 uint16_t guard = 0; 432 433 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 434 guard = ctx->guard_seed; 435 } 436 437 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 438 _dif_generate_split(sgl, 0, ctx->block_size, guard, offset_blocks, ctx); 439 } 440 } 441 442 int 443 spdk_dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 444 const struct spdk_dif_ctx *ctx) 445 { 446 struct _dif_sgl sgl; 447 448 _dif_sgl_init(&sgl, iovs, iovcnt); 449 450 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 451 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 452 return -EINVAL; 453 } 454 455 if (_dif_is_disabled(ctx->dif_type)) { 456 return 0; 457 } 458 459 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 460 dif_generate(&sgl, num_blocks, ctx); 461 } else { 462 dif_generate_split(&sgl, num_blocks, ctx); 463 } 464 465 return 0; 466 } 467 468 static void 469 _dif_error_set(struct spdk_dif_error *err_blk, uint8_t err_type, 470 uint32_t expected, uint32_t actual, uint32_t err_offset) 471 { 472 if (err_blk) { 473 err_blk->err_type = err_type; 474 err_blk->expected = expected; 475 err_blk->actual = actual; 476 err_blk->err_offset = err_offset; 477 } 478 } 479 480 static int 481 _dif_verify(void *_dif, uint16_t guard, uint32_t offset_blocks, 482 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 483 { 484 struct spdk_dif *dif = _dif; 485 uint16_t _guard; 486 uint16_t _app_tag; 487 uint32_t ref_tag, _ref_tag; 488 489 switch (ctx->dif_type) { 490 case SPDK_DIF_TYPE1: 491 case SPDK_DIF_TYPE2: 492 /* If Type 1 or 2 is used, then all DIF checks are disabled when 493 * the Application Tag is 0xFFFF. 494 */ 495 if (dif->app_tag == 0xFFFF) { 496 return 0; 497 } 498 break; 499 case SPDK_DIF_TYPE3: 500 /* If Type 3 is used, then all DIF checks are disabled when the 501 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF. 502 */ 503 if (dif->app_tag == 0xFFFF && dif->ref_tag == 0xFFFFFFFF) { 504 return 0; 505 } 506 break; 507 default: 508 break; 509 } 510 511 /* For type 1 and 2, the reference tag is incremented for each 512 * subsequent logical block. For type 3, the reference tag 513 * remains the same as the initial reference tag. 514 */ 515 if (ctx->dif_type != SPDK_DIF_TYPE3) { 516 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 517 } else { 518 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 519 } 520 521 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 522 /* Compare the DIF Guard field to the CRC computed over the logical 523 * block data. 524 */ 525 _guard = from_be16(&dif->guard); 526 if (_guard != guard) { 527 _dif_error_set(err_blk, SPDK_DIF_GUARD_ERROR, _guard, guard, 528 offset_blocks); 529 SPDK_ERRLOG("Failed to compare Guard: LBA=%" PRIu32 "," \ 530 " Expected=%x, Actual=%x\n", 531 ref_tag, _guard, guard); 532 return -1; 533 } 534 } 535 536 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 537 /* Compare unmasked bits in the DIF Application Tag field to the 538 * passed Application Tag. 539 */ 540 _app_tag = from_be16(&dif->app_tag); 541 if ((_app_tag & ctx->apptag_mask) != ctx->app_tag) { 542 _dif_error_set(err_blk, SPDK_DIF_APPTAG_ERROR, ctx->app_tag, 543 (_app_tag & ctx->apptag_mask), offset_blocks); 544 SPDK_ERRLOG("Failed to compare App Tag: LBA=%" PRIu32 "," \ 545 " Expected=%x, Actual=%x\n", 546 ref_tag, ctx->app_tag, (_app_tag & ctx->apptag_mask)); 547 return -1; 548 } 549 } 550 551 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 552 switch (ctx->dif_type) { 553 case SPDK_DIF_TYPE1: 554 case SPDK_DIF_TYPE2: 555 /* Compare the DIF Reference Tag field to the passed Reference Tag. 556 * The passed Reference Tag will be the least significant 4 bytes 557 * of the LBA when Type 1 is used, and application specific value 558 * if Type 2 is used, 559 */ 560 _ref_tag = from_be32(&dif->ref_tag); 561 if (_ref_tag != ref_tag) { 562 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, ref_tag, 563 _ref_tag, offset_blocks); 564 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \ 565 " Expected=%x, Actual=%x\n", 566 ref_tag, ref_tag, _ref_tag); 567 return -1; 568 } 569 break; 570 case SPDK_DIF_TYPE3: 571 /* For Type 3, computed Reference Tag remains unchanged. 572 * Hence ignore the Reference Tag field. 573 */ 574 break; 575 default: 576 break; 577 } 578 } 579 580 return 0; 581 } 582 583 static int 584 dif_verify(struct _dif_sgl *sgl, uint32_t num_blocks, 585 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 586 { 587 uint32_t offset_blocks = 0; 588 int rc; 589 void *buf; 590 uint16_t guard = 0; 591 592 while (offset_blocks < num_blocks) { 593 _dif_sgl_get_buf(sgl, &buf, NULL); 594 595 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 596 guard = spdk_crc16_t10dif(ctx->guard_seed, buf, ctx->guard_interval); 597 } 598 599 rc = _dif_verify(buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 600 if (rc != 0) { 601 return rc; 602 } 603 604 _dif_sgl_advance(sgl, ctx->block_size); 605 offset_blocks++; 606 } 607 608 return 0; 609 } 610 611 static int 612 _dif_verify_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 613 uint16_t *_guard, uint32_t offset_blocks, 614 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 615 { 616 uint32_t offset_in_dif, buf_len; 617 void *buf; 618 uint16_t guard; 619 struct spdk_dif dif = {}; 620 int rc; 621 622 assert(_guard != NULL); 623 assert(offset_in_block < ctx->guard_interval); 624 assert(offset_in_block + data_len < ctx->guard_interval || 625 offset_in_block + data_len == ctx->block_size); 626 627 guard = *_guard; 628 629 /* Compute CRC over split logical block data. */ 630 while (data_len != 0 && offset_in_block < ctx->guard_interval) { 631 _dif_sgl_get_buf(sgl, &buf, &buf_len); 632 buf_len = spdk_min(buf_len, data_len); 633 buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block); 634 635 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 636 guard = spdk_crc16_t10dif(guard, buf, buf_len); 637 } 638 639 _dif_sgl_advance(sgl, buf_len); 640 offset_in_block += buf_len; 641 data_len -= buf_len; 642 } 643 644 if (offset_in_block < ctx->guard_interval) { 645 *_guard = guard; 646 return 0; 647 } 648 649 /* Copy the split DIF field to the temporary DIF buffer, and then 650 * skip metadata field after DIF field (if any). */ 651 while (offset_in_block < ctx->block_size) { 652 _dif_sgl_get_buf(sgl, &buf, &buf_len); 653 654 if (offset_in_block < ctx->guard_interval + sizeof(struct spdk_dif)) { 655 offset_in_dif = offset_in_block - ctx->guard_interval; 656 buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset_in_dif); 657 658 memcpy((uint8_t *)&dif + offset_in_dif, buf, buf_len); 659 } else { 660 buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block); 661 } 662 _dif_sgl_advance(sgl, buf_len); 663 offset_in_block += buf_len; 664 } 665 666 rc = _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 667 if (rc != 0) { 668 return rc; 669 } 670 671 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 672 guard = ctx->guard_seed; 673 } 674 675 *_guard = guard; 676 return 0; 677 } 678 679 static int 680 dif_verify_split(struct _dif_sgl *sgl, uint32_t num_blocks, 681 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 682 { 683 uint32_t offset_blocks; 684 uint16_t guard = 0; 685 int rc; 686 687 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 688 guard = ctx->guard_seed; 689 } 690 691 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 692 rc = _dif_verify_split(sgl, 0, ctx->block_size, &guard, offset_blocks, 693 ctx, err_blk); 694 if (rc != 0) { 695 return rc; 696 } 697 } 698 699 return 0; 700 } 701 702 int 703 spdk_dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 704 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 705 { 706 struct _dif_sgl sgl; 707 708 _dif_sgl_init(&sgl, iovs, iovcnt); 709 710 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 711 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 712 return -EINVAL; 713 } 714 715 if (_dif_is_disabled(ctx->dif_type)) { 716 return 0; 717 } 718 719 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 720 return dif_verify(&sgl, num_blocks, ctx, err_blk); 721 } else { 722 return dif_verify_split(&sgl, num_blocks, ctx, err_blk); 723 } 724 } 725 726 static uint32_t 727 dif_update_crc32c(struct _dif_sgl *sgl, uint32_t num_blocks, 728 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 729 { 730 uint32_t offset_blocks; 731 void *buf; 732 733 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 734 _dif_sgl_get_buf(sgl, &buf, NULL); 735 736 crc32c = spdk_crc32c_update(buf, ctx->block_size - ctx->md_size, crc32c); 737 738 _dif_sgl_advance(sgl, ctx->block_size); 739 } 740 741 return crc32c; 742 } 743 744 static uint32_t 745 _dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 746 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 747 { 748 uint32_t data_block_size, buf_len; 749 void *buf; 750 751 data_block_size = ctx->block_size - ctx->md_size; 752 753 assert(offset_in_block + data_len <= ctx->block_size); 754 755 while (data_len != 0) { 756 _dif_sgl_get_buf(sgl, &buf, &buf_len); 757 buf_len = spdk_min(buf_len, data_len); 758 759 if (offset_in_block < data_block_size) { 760 buf_len = spdk_min(buf_len, data_block_size - offset_in_block); 761 crc32c = spdk_crc32c_update(buf, buf_len, crc32c); 762 } 763 764 _dif_sgl_advance(sgl, buf_len); 765 offset_in_block += buf_len; 766 data_len -= buf_len; 767 } 768 769 return crc32c; 770 } 771 772 static uint32_t 773 dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t num_blocks, 774 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 775 { 776 uint32_t offset_blocks; 777 778 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 779 crc32c = _dif_update_crc32c_split(sgl, 0, ctx->block_size, crc32c, ctx); 780 } 781 782 return crc32c; 783 } 784 785 int 786 spdk_dif_update_crc32c(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 787 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 788 { 789 struct _dif_sgl sgl; 790 791 if (_crc32c == NULL) { 792 return -EINVAL; 793 } 794 795 _dif_sgl_init(&sgl, iovs, iovcnt); 796 797 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 798 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 799 return -EINVAL; 800 } 801 802 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 803 *_crc32c = dif_update_crc32c(&sgl, num_blocks, *_crc32c, ctx); 804 } else { 805 *_crc32c = dif_update_crc32c_split(&sgl, num_blocks, *_crc32c, ctx); 806 } 807 808 return 0; 809 } 810 811 static void 812 dif_generate_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 813 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 814 { 815 uint32_t offset_blocks = 0, data_block_size; 816 void *src, *dst; 817 uint16_t guard; 818 819 data_block_size = ctx->block_size - ctx->md_size; 820 821 while (offset_blocks < num_blocks) { 822 _dif_sgl_get_buf(src_sgl, &src, NULL); 823 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 824 825 guard = 0; 826 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 827 guard = spdk_crc16_t10dif_copy(ctx->guard_seed, dst, src, data_block_size); 828 guard = spdk_crc16_t10dif(guard, dst + data_block_size, 829 ctx->guard_interval - data_block_size); 830 } else { 831 memcpy(dst, src, data_block_size); 832 } 833 834 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 835 836 _dif_sgl_advance(src_sgl, data_block_size); 837 _dif_sgl_advance(dst_sgl, ctx->block_size); 838 offset_blocks++; 839 } 840 } 841 842 static void 843 _dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 844 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 845 { 846 uint32_t offset_in_block, src_len, data_block_size; 847 uint16_t guard = 0; 848 void *src, *dst; 849 850 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 851 852 data_block_size = ctx->block_size - ctx->md_size; 853 854 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 855 guard = ctx->guard_seed; 856 } 857 offset_in_block = 0; 858 859 while (offset_in_block < data_block_size) { 860 /* Compute CRC over split logical block data and copy 861 * data to bounce buffer. 862 */ 863 _dif_sgl_get_buf(src_sgl, &src, &src_len); 864 src_len = spdk_min(src_len, data_block_size - offset_in_block); 865 866 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 867 guard = spdk_crc16_t10dif_copy(guard, dst + offset_in_block, 868 src, src_len); 869 } else { 870 memcpy(dst + offset_in_block, src, src_len); 871 } 872 873 _dif_sgl_advance(src_sgl, src_len); 874 offset_in_block += src_len; 875 } 876 877 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 878 guard = spdk_crc16_t10dif(guard, dst + data_block_size, 879 ctx->guard_interval - data_block_size); 880 } 881 882 _dif_sgl_advance(dst_sgl, ctx->block_size); 883 884 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 885 } 886 887 static void 888 dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 889 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 890 { 891 uint32_t offset_blocks; 892 893 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 894 _dif_generate_copy_split(src_sgl, dst_sgl, offset_blocks, ctx); 895 } 896 } 897 898 int 899 spdk_dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 900 int bounce_iovcnt, uint32_t num_blocks, 901 const struct spdk_dif_ctx *ctx) 902 { 903 struct _dif_sgl src_sgl, dst_sgl; 904 uint32_t data_block_size; 905 906 _dif_sgl_init(&src_sgl, iovs, iovcnt); 907 _dif_sgl_init(&dst_sgl, bounce_iovs, bounce_iovcnt); 908 909 data_block_size = ctx->block_size - ctx->md_size; 910 911 if (!_dif_sgl_is_valid(&src_sgl, data_block_size * num_blocks)) { 912 SPDK_ERRLOG("Size of iovec arrays are not valid.\n"); 913 return -EINVAL; 914 } 915 916 if (!_dif_sgl_is_valid_block_aligned(&dst_sgl, num_blocks, ctx->block_size)) { 917 SPDK_ERRLOG("Size of bounce_iovs arrays are not valid or misaligned with block_size.\n"); 918 return -EINVAL; 919 } 920 921 if (_dif_is_disabled(ctx->dif_type)) { 922 return 0; 923 } 924 925 if (_dif_sgl_is_bytes_multiple(&src_sgl, data_block_size)) { 926 dif_generate_copy(&src_sgl, &dst_sgl, num_blocks, ctx); 927 } else { 928 dif_generate_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx); 929 } 930 931 return 0; 932 } 933 934 static int 935 dif_verify_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 936 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 937 struct spdk_dif_error *err_blk) 938 { 939 uint32_t offset_blocks = 0, data_block_size; 940 void *src, *dst; 941 int rc; 942 uint16_t guard; 943 944 data_block_size = ctx->block_size - ctx->md_size; 945 946 while (offset_blocks < num_blocks) { 947 _dif_sgl_get_buf(src_sgl, &src, NULL); 948 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 949 950 guard = 0; 951 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 952 guard = spdk_crc16_t10dif_copy(ctx->guard_seed, dst, src, data_block_size); 953 guard = spdk_crc16_t10dif(guard, src + data_block_size, 954 ctx->guard_interval - data_block_size); 955 } else { 956 memcpy(dst, src, data_block_size); 957 } 958 959 rc = _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 960 if (rc != 0) { 961 return rc; 962 } 963 964 _dif_sgl_advance(src_sgl, ctx->block_size); 965 _dif_sgl_advance(dst_sgl, data_block_size); 966 offset_blocks++; 967 } 968 969 return 0; 970 } 971 972 static int 973 _dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 974 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 975 struct spdk_dif_error *err_blk) 976 { 977 uint32_t offset_in_block, dst_len, data_block_size; 978 uint16_t guard = 0; 979 void *src, *dst; 980 981 _dif_sgl_get_buf(src_sgl, &src, NULL); 982 983 data_block_size = ctx->block_size - ctx->md_size; 984 985 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 986 guard = ctx->guard_seed; 987 } 988 offset_in_block = 0; 989 990 while (offset_in_block < data_block_size) { 991 /* Compute CRC over split logical block data and copy 992 * data to bounce buffer. 993 */ 994 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 995 dst_len = spdk_min(dst_len, data_block_size - offset_in_block); 996 997 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 998 guard = spdk_crc16_t10dif_copy(guard, dst, 999 src + offset_in_block, dst_len); 1000 } else { 1001 memcpy(dst, src + offset_in_block, dst_len); 1002 } 1003 1004 _dif_sgl_advance(dst_sgl, dst_len); 1005 offset_in_block += dst_len; 1006 } 1007 1008 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1009 guard = spdk_crc16_t10dif(guard, src + data_block_size, 1010 ctx->guard_interval - data_block_size); 1011 } 1012 1013 _dif_sgl_advance(src_sgl, ctx->block_size); 1014 1015 return _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1016 } 1017 1018 static int 1019 dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1020 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1021 struct spdk_dif_error *err_blk) 1022 { 1023 uint32_t offset_blocks; 1024 int rc; 1025 1026 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1027 rc = _dif_verify_copy_split(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1028 if (rc != 0) { 1029 return rc; 1030 } 1031 } 1032 1033 return 0; 1034 } 1035 1036 int 1037 spdk_dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1038 int bounce_iovcnt, uint32_t num_blocks, 1039 const struct spdk_dif_ctx *ctx, 1040 struct spdk_dif_error *err_blk) 1041 { 1042 struct _dif_sgl src_sgl, dst_sgl; 1043 uint32_t data_block_size; 1044 1045 _dif_sgl_init(&src_sgl, bounce_iovs, bounce_iovcnt); 1046 _dif_sgl_init(&dst_sgl, iovs, iovcnt); 1047 1048 data_block_size = ctx->block_size - ctx->md_size; 1049 1050 if (!_dif_sgl_is_valid(&dst_sgl, data_block_size * num_blocks)) { 1051 SPDK_ERRLOG("Size of iovec arrays are not valid\n"); 1052 return -EINVAL; 1053 } 1054 1055 if (!_dif_sgl_is_valid_block_aligned(&src_sgl, num_blocks, ctx->block_size)) { 1056 SPDK_ERRLOG("Size of bounce_iovs arrays are not valid or misaligned with block_size.\n"); 1057 return -EINVAL; 1058 } 1059 1060 if (_dif_is_disabled(ctx->dif_type)) { 1061 return 0; 1062 } 1063 1064 if (_dif_sgl_is_bytes_multiple(&dst_sgl, data_block_size)) { 1065 return dif_verify_copy(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1066 } else { 1067 return dif_verify_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1068 } 1069 } 1070 1071 static void 1072 _bit_flip(uint8_t *buf, uint32_t flip_bit) 1073 { 1074 uint8_t byte; 1075 1076 byte = *buf; 1077 byte ^= 1 << flip_bit; 1078 *buf = byte; 1079 } 1080 1081 static int 1082 _dif_inject_error(struct _dif_sgl *sgl, 1083 uint32_t block_size, uint32_t num_blocks, 1084 uint32_t inject_offset_blocks, 1085 uint32_t inject_offset_bytes, 1086 uint32_t inject_offset_bits) 1087 { 1088 uint32_t offset_in_block, buf_len; 1089 void *buf; 1090 1091 _dif_sgl_advance(sgl, block_size * inject_offset_blocks); 1092 1093 offset_in_block = 0; 1094 1095 while (offset_in_block < block_size) { 1096 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1097 buf_len = spdk_min(buf_len, block_size - offset_in_block); 1098 1099 if (inject_offset_bytes >= offset_in_block && 1100 inject_offset_bytes < offset_in_block + buf_len) { 1101 buf += inject_offset_bytes - offset_in_block; 1102 _bit_flip(buf, inject_offset_bits); 1103 return 0; 1104 } 1105 1106 _dif_sgl_advance(sgl, buf_len); 1107 offset_in_block += buf_len; 1108 } 1109 1110 return -1; 1111 } 1112 1113 static int 1114 dif_inject_error(struct _dif_sgl *sgl, uint32_t block_size, uint32_t num_blocks, 1115 uint32_t start_inject_bytes, uint32_t inject_range_bytes, 1116 uint32_t *inject_offset) 1117 { 1118 uint32_t inject_offset_blocks, inject_offset_bytes, inject_offset_bits; 1119 uint32_t offset_blocks; 1120 int rc; 1121 1122 srand(time(0)); 1123 1124 inject_offset_blocks = rand() % num_blocks; 1125 inject_offset_bytes = start_inject_bytes + (rand() % inject_range_bytes); 1126 inject_offset_bits = rand() % 8; 1127 1128 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1129 if (offset_blocks == inject_offset_blocks) { 1130 rc = _dif_inject_error(sgl, block_size, num_blocks, 1131 inject_offset_blocks, 1132 inject_offset_bytes, 1133 inject_offset_bits); 1134 if (rc == 0) { 1135 *inject_offset = inject_offset_blocks; 1136 } 1137 return rc; 1138 } 1139 } 1140 1141 return -1; 1142 } 1143 1144 int 1145 spdk_dif_inject_error(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1146 const struct spdk_dif_ctx *ctx, uint32_t inject_flags, 1147 uint32_t *inject_offset) 1148 { 1149 struct _dif_sgl sgl; 1150 int rc; 1151 1152 _dif_sgl_init(&sgl, iovs, iovcnt); 1153 1154 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1155 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1156 return -EINVAL; 1157 } 1158 1159 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1160 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1161 ctx->guard_interval + offsetof(struct spdk_dif, ref_tag), 1162 SPDK_SIZEOF_MEMBER(struct spdk_dif, ref_tag), 1163 inject_offset); 1164 if (rc != 0) { 1165 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1166 return rc; 1167 } 1168 } 1169 1170 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1171 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1172 ctx->guard_interval + offsetof(struct spdk_dif, app_tag), 1173 SPDK_SIZEOF_MEMBER(struct spdk_dif, app_tag), 1174 inject_offset); 1175 if (rc != 0) { 1176 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1177 return rc; 1178 } 1179 } 1180 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1181 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1182 ctx->guard_interval, 1183 SPDK_SIZEOF_MEMBER(struct spdk_dif, guard), 1184 inject_offset); 1185 if (rc != 0) { 1186 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1187 return rc; 1188 } 1189 } 1190 1191 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1192 /* If the DIF information is contained within the last 8 bytes of 1193 * metadata, then the CRC covers all metadata bytes up to but excluding 1194 * the last 8 bytes. But error injection does not cover these metadata 1195 * because classification is not determined yet. 1196 * 1197 * Note: Error injection to data block is expected to be detected as 1198 * guard error. 1199 */ 1200 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1201 0, 1202 ctx->block_size - ctx->md_size, 1203 inject_offset); 1204 if (rc != 0) { 1205 SPDK_ERRLOG("Failed to inject error to data block.\n"); 1206 return rc; 1207 } 1208 } 1209 1210 return 0; 1211 } 1212 1213 static void 1214 dix_generate(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1215 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1216 { 1217 uint32_t offset_blocks = 0; 1218 uint16_t guard; 1219 void *data_buf, *md_buf; 1220 1221 while (offset_blocks < num_blocks) { 1222 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1223 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1224 1225 guard = 0; 1226 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1227 guard = spdk_crc16_t10dif(ctx->guard_seed, data_buf, ctx->block_size); 1228 guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval); 1229 } 1230 1231 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1232 1233 _dif_sgl_advance(data_sgl, ctx->block_size); 1234 _dif_sgl_advance(md_sgl, ctx->md_size); 1235 offset_blocks++; 1236 } 1237 } 1238 1239 static void 1240 _dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1241 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1242 { 1243 uint32_t offset_in_block, data_buf_len; 1244 uint16_t guard = 0; 1245 void *data_buf, *md_buf; 1246 1247 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1248 1249 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1250 guard = ctx->guard_seed; 1251 } 1252 offset_in_block = 0; 1253 1254 while (offset_in_block < ctx->block_size) { 1255 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1256 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1257 1258 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1259 guard = spdk_crc16_t10dif(guard, data_buf, data_buf_len); 1260 } 1261 1262 _dif_sgl_advance(data_sgl, data_buf_len); 1263 offset_in_block += data_buf_len; 1264 } 1265 1266 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1267 guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval); 1268 } 1269 1270 _dif_sgl_advance(md_sgl, ctx->md_size); 1271 1272 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1273 } 1274 1275 static void 1276 dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1277 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1278 { 1279 uint32_t offset_blocks; 1280 1281 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1282 _dix_generate_split(data_sgl, md_sgl, offset_blocks, ctx); 1283 } 1284 } 1285 1286 int 1287 spdk_dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1288 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1289 { 1290 struct _dif_sgl data_sgl, md_sgl; 1291 1292 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1293 _dif_sgl_init(&md_sgl, md_iov, 1); 1294 1295 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1296 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1297 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1298 return -EINVAL; 1299 } 1300 1301 if (_dif_is_disabled(ctx->dif_type)) { 1302 return 0; 1303 } 1304 1305 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1306 dix_generate(&data_sgl, &md_sgl, num_blocks, ctx); 1307 } else { 1308 dix_generate_split(&data_sgl, &md_sgl, num_blocks, ctx); 1309 } 1310 1311 return 0; 1312 } 1313 1314 static int 1315 dix_verify(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1316 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1317 struct spdk_dif_error *err_blk) 1318 { 1319 uint32_t offset_blocks = 0; 1320 uint16_t guard; 1321 void *data_buf, *md_buf; 1322 int rc; 1323 1324 while (offset_blocks < num_blocks) { 1325 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1326 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1327 1328 guard = 0; 1329 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1330 guard = spdk_crc16_t10dif(ctx->guard_seed, data_buf, ctx->block_size); 1331 guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval); 1332 } 1333 1334 rc = _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1335 if (rc != 0) { 1336 return rc; 1337 } 1338 1339 _dif_sgl_advance(data_sgl, ctx->block_size); 1340 _dif_sgl_advance(md_sgl, ctx->md_size); 1341 offset_blocks++; 1342 } 1343 1344 return 0; 1345 } 1346 1347 static int 1348 _dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1349 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1350 struct spdk_dif_error *err_blk) 1351 { 1352 uint32_t offset_in_block, data_buf_len; 1353 uint16_t guard = 0; 1354 void *data_buf, *md_buf; 1355 1356 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1357 1358 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1359 guard = ctx->guard_seed; 1360 } 1361 offset_in_block = 0; 1362 1363 while (offset_in_block < ctx->block_size) { 1364 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1365 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1366 1367 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1368 guard = spdk_crc16_t10dif(guard, data_buf, data_buf_len); 1369 } 1370 1371 _dif_sgl_advance(data_sgl, data_buf_len); 1372 offset_in_block += data_buf_len; 1373 } 1374 1375 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1376 guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval); 1377 } 1378 1379 _dif_sgl_advance(md_sgl, ctx->md_size); 1380 1381 return _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1382 } 1383 1384 static int 1385 dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1386 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1387 struct spdk_dif_error *err_blk) 1388 { 1389 uint32_t offset_blocks; 1390 int rc; 1391 1392 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1393 rc = _dix_verify_split(data_sgl, md_sgl, offset_blocks, ctx, err_blk); 1394 if (rc != 0) { 1395 return rc; 1396 } 1397 } 1398 1399 return 0; 1400 } 1401 1402 int 1403 spdk_dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1404 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1405 struct spdk_dif_error *err_blk) 1406 { 1407 struct _dif_sgl data_sgl, md_sgl; 1408 1409 if (md_iov->iov_base == NULL) { 1410 SPDK_ERRLOG("Metadata buffer is NULL.\n"); 1411 return -EINVAL; 1412 } 1413 1414 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1415 _dif_sgl_init(&md_sgl, md_iov, 1); 1416 1417 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1418 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1419 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1420 return -EINVAL; 1421 } 1422 1423 if (_dif_is_disabled(ctx->dif_type)) { 1424 return 0; 1425 } 1426 1427 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1428 return dix_verify(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1429 } else { 1430 return dix_verify_split(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1431 } 1432 } 1433 1434 int 1435 spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1436 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1437 uint32_t inject_flags, uint32_t *inject_offset) 1438 { 1439 struct _dif_sgl data_sgl, md_sgl; 1440 int rc; 1441 1442 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1443 _dif_sgl_init(&md_sgl, md_iov, 1); 1444 1445 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1446 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1447 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1448 return -EINVAL; 1449 } 1450 1451 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1452 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1453 ctx->guard_interval + offsetof(struct spdk_dif, ref_tag), 1454 SPDK_SIZEOF_MEMBER(struct spdk_dif, ref_tag), 1455 inject_offset); 1456 if (rc != 0) { 1457 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1458 return rc; 1459 } 1460 } 1461 1462 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1463 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1464 ctx->guard_interval + offsetof(struct spdk_dif, app_tag), 1465 SPDK_SIZEOF_MEMBER(struct spdk_dif, app_tag), 1466 inject_offset); 1467 if (rc != 0) { 1468 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1469 return rc; 1470 } 1471 } 1472 1473 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1474 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1475 ctx->guard_interval, 1476 SPDK_SIZEOF_MEMBER(struct spdk_dif, guard), 1477 inject_offset); 1478 if (rc != 0) { 1479 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1480 return rc; 1481 } 1482 } 1483 1484 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1485 /* Note: Error injection to data block is expected to be detected 1486 * as guard error. 1487 */ 1488 rc = dif_inject_error(&data_sgl, ctx->block_size, num_blocks, 1489 0, 1490 ctx->block_size, 1491 inject_offset); 1492 if (rc != 0) { 1493 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1494 return rc; 1495 } 1496 } 1497 1498 return 0; 1499 } 1500 1501 static uint32_t 1502 _to_next_boundary(uint32_t offset, uint32_t boundary) 1503 { 1504 return boundary - (offset % boundary); 1505 } 1506 1507 static uint32_t 1508 _to_size_with_md(uint32_t size, uint32_t data_block_size, uint32_t block_size) 1509 { 1510 return (size / data_block_size) * block_size + (size % data_block_size); 1511 } 1512 1513 int 1514 spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int iovcnt, 1515 struct iovec *buf_iovs, int buf_iovcnt, 1516 uint32_t data_offset, uint32_t data_len, 1517 uint32_t *_mapped_len, 1518 const struct spdk_dif_ctx *ctx) 1519 { 1520 uint32_t data_block_size, data_unalign, buf_len, buf_offset, len; 1521 struct _dif_sgl dif_sgl; 1522 struct _dif_sgl buf_sgl; 1523 1524 if (iovs == NULL || iovcnt == 0 || buf_iovs == NULL || buf_iovcnt == 0) { 1525 return -EINVAL; 1526 } 1527 1528 data_block_size = ctx->block_size - ctx->md_size; 1529 1530 data_unalign = ctx->data_offset % data_block_size; 1531 1532 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1533 ctx->block_size); 1534 buf_len -= data_unalign; 1535 1536 _dif_sgl_init(&dif_sgl, iovs, iovcnt); 1537 _dif_sgl_init(&buf_sgl, buf_iovs, buf_iovcnt); 1538 1539 if (!_dif_sgl_is_valid(&buf_sgl, buf_len)) { 1540 SPDK_ERRLOG("Buffer overflow will occur.\n"); 1541 return -ERANGE; 1542 } 1543 1544 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1545 buf_offset -= data_unalign; 1546 1547 _dif_sgl_advance(&buf_sgl, buf_offset); 1548 1549 while (data_len != 0) { 1550 len = spdk_min(data_len, _to_next_boundary(ctx->data_offset + data_offset, data_block_size)); 1551 if (!_dif_sgl_append_split(&dif_sgl, &buf_sgl, len)) { 1552 break; 1553 } 1554 _dif_sgl_advance(&buf_sgl, ctx->md_size); 1555 data_offset += len; 1556 data_len -= len; 1557 } 1558 1559 if (_mapped_len != NULL) { 1560 *_mapped_len = dif_sgl.total_size; 1561 } 1562 1563 return iovcnt - dif_sgl.iovcnt; 1564 } 1565 1566 static int 1567 _dif_sgl_setup_stream(struct _dif_sgl *sgl, uint32_t *_buf_offset, uint32_t *_buf_len, 1568 uint32_t data_offset, uint32_t data_len, 1569 const struct spdk_dif_ctx *ctx) 1570 { 1571 uint32_t data_block_size, data_unalign, buf_len, buf_offset; 1572 1573 data_block_size = ctx->block_size - ctx->md_size; 1574 1575 data_unalign = ctx->data_offset % data_block_size; 1576 1577 /* If the last data block is complete, DIF of the data block is 1578 * inserted or verified in this turn. 1579 */ 1580 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1581 ctx->block_size); 1582 buf_len -= data_unalign; 1583 1584 if (!_dif_sgl_is_valid(sgl, buf_len)) { 1585 return -ERANGE; 1586 } 1587 1588 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1589 buf_offset -= data_unalign; 1590 1591 _dif_sgl_advance(sgl, buf_offset); 1592 buf_len -= buf_offset; 1593 1594 buf_offset += data_unalign; 1595 1596 *_buf_offset = buf_offset; 1597 *_buf_len = buf_len; 1598 1599 return 0; 1600 } 1601 1602 int 1603 spdk_dif_generate_stream(struct iovec *iovs, int iovcnt, 1604 uint32_t data_offset, uint32_t data_len, 1605 struct spdk_dif_ctx *ctx) 1606 { 1607 uint32_t buf_len = 0, buf_offset = 0; 1608 uint32_t len, offset_in_block, offset_blocks; 1609 uint16_t guard = 0; 1610 struct _dif_sgl sgl; 1611 int rc; 1612 1613 if (iovs == NULL || iovcnt == 0) { 1614 return -EINVAL; 1615 } 1616 1617 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1618 guard = ctx->last_guard; 1619 } 1620 1621 _dif_sgl_init(&sgl, iovs, iovcnt); 1622 1623 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 1624 if (rc != 0) { 1625 return rc; 1626 } 1627 1628 while (buf_len != 0) { 1629 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 1630 offset_in_block = buf_offset % ctx->block_size; 1631 offset_blocks = buf_offset / ctx->block_size; 1632 1633 guard = _dif_generate_split(&sgl, offset_in_block, len, guard, offset_blocks, ctx); 1634 1635 buf_len -= len; 1636 buf_offset += len; 1637 } 1638 1639 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1640 ctx->last_guard = guard; 1641 } 1642 1643 return 0; 1644 } 1645 1646 int 1647 spdk_dif_verify_stream(struct iovec *iovs, int iovcnt, 1648 uint32_t data_offset, uint32_t data_len, 1649 struct spdk_dif_ctx *ctx, 1650 struct spdk_dif_error *err_blk) 1651 { 1652 uint32_t buf_len = 0, buf_offset = 0; 1653 uint32_t len, offset_in_block, offset_blocks; 1654 uint16_t guard = 0; 1655 struct _dif_sgl sgl; 1656 int rc = 0; 1657 1658 if (iovs == NULL || iovcnt == 0) { 1659 return -EINVAL; 1660 } 1661 1662 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1663 guard = ctx->last_guard; 1664 } 1665 1666 _dif_sgl_init(&sgl, iovs, iovcnt); 1667 1668 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 1669 if (rc != 0) { 1670 return rc; 1671 } 1672 1673 while (buf_len != 0) { 1674 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 1675 offset_in_block = buf_offset % ctx->block_size; 1676 offset_blocks = buf_offset / ctx->block_size; 1677 1678 rc = _dif_verify_split(&sgl, offset_in_block, len, &guard, offset_blocks, 1679 ctx, err_blk); 1680 if (rc != 0) { 1681 goto error; 1682 } 1683 1684 buf_len -= len; 1685 buf_offset += len; 1686 } 1687 1688 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1689 ctx->last_guard = guard; 1690 } 1691 error: 1692 return rc; 1693 } 1694 1695 int 1696 spdk_dif_update_crc32c_stream(struct iovec *iovs, int iovcnt, 1697 uint32_t data_offset, uint32_t data_len, 1698 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 1699 { 1700 uint32_t buf_len = 0, buf_offset = 0, len, offset_in_block; 1701 uint32_t crc32c; 1702 struct _dif_sgl sgl; 1703 int rc; 1704 1705 if (iovs == NULL || iovcnt == 0) { 1706 return -EINVAL; 1707 } 1708 1709 crc32c = *_crc32c; 1710 _dif_sgl_init(&sgl, iovs, iovcnt); 1711 1712 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 1713 if (rc != 0) { 1714 return rc; 1715 } 1716 1717 while (buf_len != 0) { 1718 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 1719 offset_in_block = buf_offset % ctx->block_size; 1720 1721 crc32c = _dif_update_crc32c_split(&sgl, offset_in_block, len, crc32c, ctx); 1722 1723 buf_len -= len; 1724 buf_offset += len; 1725 } 1726 1727 *_crc32c = crc32c; 1728 1729 return 0; 1730 } 1731 1732 void 1733 spdk_dif_get_range_with_md(uint32_t data_offset, uint32_t data_len, 1734 uint32_t *_buf_offset, uint32_t *_buf_len, 1735 const struct spdk_dif_ctx *ctx) 1736 { 1737 uint32_t data_block_size, data_unalign, buf_offset, buf_len; 1738 1739 if (!ctx->md_interleave) { 1740 buf_offset = data_offset; 1741 buf_len = data_len; 1742 } else { 1743 data_block_size = ctx->block_size - ctx->md_size; 1744 1745 data_unalign = data_offset % data_block_size; 1746 1747 buf_offset = _to_size_with_md(data_offset, data_block_size, ctx->block_size); 1748 buf_len = _to_size_with_md(data_unalign + data_len, data_block_size, ctx->block_size) - 1749 data_unalign; 1750 } 1751 1752 if (_buf_offset != NULL) { 1753 *_buf_offset = buf_offset; 1754 } 1755 1756 if (_buf_len != NULL) { 1757 *_buf_len = buf_len; 1758 } 1759 } 1760 1761 uint32_t 1762 spdk_dif_get_length_with_md(uint32_t data_len, const struct spdk_dif_ctx *ctx) 1763 { 1764 uint32_t data_block_size; 1765 1766 if (!ctx->md_interleave) { 1767 return data_len; 1768 } else { 1769 data_block_size = ctx->block_size - ctx->md_size; 1770 1771 return _to_size_with_md(data_len, data_block_size, ctx->block_size); 1772 } 1773 } 1774 1775 static int 1776 _dif_remap_ref_tag(struct _dif_sgl *sgl, uint32_t offset_blocks, 1777 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1778 { 1779 uint32_t offset, buf_len, expected = 0, _actual, remapped; 1780 void *buf; 1781 struct _dif_sgl tmp_sgl; 1782 struct spdk_dif dif; 1783 1784 /* Fast forward to DIF field. */ 1785 _dif_sgl_advance(sgl, ctx->guard_interval); 1786 _dif_sgl_copy(&tmp_sgl, sgl); 1787 1788 /* Copy the split DIF field to the temporary DIF buffer */ 1789 offset = 0; 1790 while (offset < sizeof(struct spdk_dif)) { 1791 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1792 buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset); 1793 1794 memcpy((uint8_t *)&dif + offset, buf, buf_len); 1795 1796 _dif_sgl_advance(sgl, buf_len); 1797 offset += buf_len; 1798 } 1799 1800 switch (ctx->dif_type) { 1801 case SPDK_DIF_TYPE1: 1802 case SPDK_DIF_TYPE2: 1803 /* If Type 1 or 2 is used, then all DIF checks are disabled when 1804 * the Application Tag is 0xFFFF. 1805 */ 1806 if (dif.app_tag == 0xFFFF) { 1807 goto end; 1808 } 1809 break; 1810 case SPDK_DIF_TYPE3: 1811 /* If Type 3 is used, then all DIF checks are disabled when the 1812 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF. 1813 */ 1814 if (dif.app_tag == 0xFFFF && dif.ref_tag == 0xFFFFFFFF) { 1815 goto end; 1816 } 1817 break; 1818 default: 1819 break; 1820 } 1821 1822 /* For type 1 and 2, the Reference Tag is incremented for each 1823 * subsequent logical block. For type 3, the Reference Tag 1824 * remains the same as the initial Reference Tag. 1825 */ 1826 if (ctx->dif_type != SPDK_DIF_TYPE3) { 1827 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 1828 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 1829 } else { 1830 remapped = ctx->remapped_init_ref_tag; 1831 } 1832 1833 /* Verify the stored Reference Tag. */ 1834 switch (ctx->dif_type) { 1835 case SPDK_DIF_TYPE1: 1836 case SPDK_DIF_TYPE2: 1837 /* Compare the DIF Reference Tag field to the computed Reference Tag. 1838 * The computed Reference Tag will be the least significant 4 bytes 1839 * of the LBA when Type 1 is used, and application specific value 1840 * if Type 2 is used. 1841 */ 1842 _actual = from_be32(&dif.ref_tag); 1843 if (_actual != expected) { 1844 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected, 1845 _actual, offset_blocks); 1846 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \ 1847 " Expected=%x, Actual=%x\n", 1848 expected, expected, _actual); 1849 return -1; 1850 } 1851 break; 1852 case SPDK_DIF_TYPE3: 1853 /* For type 3, the computed Reference Tag remains unchanged. 1854 * Hence ignore the Reference Tag field. 1855 */ 1856 break; 1857 default: 1858 break; 1859 } 1860 1861 /* Update the stored Reference Tag to the remapped one. */ 1862 to_be32(&dif.ref_tag, remapped); 1863 1864 offset = 0; 1865 while (offset < sizeof(struct spdk_dif)) { 1866 _dif_sgl_get_buf(&tmp_sgl, &buf, &buf_len); 1867 buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset); 1868 1869 memcpy(buf, (uint8_t *)&dif + offset, buf_len); 1870 1871 _dif_sgl_advance(&tmp_sgl, buf_len); 1872 offset += buf_len; 1873 } 1874 1875 end: 1876 _dif_sgl_advance(sgl, ctx->block_size - ctx->guard_interval - sizeof(struct spdk_dif)); 1877 1878 return 0; 1879 } 1880 1881 int 1882 spdk_dif_remap_ref_tag(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1883 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1884 { 1885 struct _dif_sgl sgl; 1886 uint32_t offset_blocks; 1887 int rc; 1888 1889 _dif_sgl_init(&sgl, iovs, iovcnt); 1890 1891 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1892 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1893 return -EINVAL; 1894 } 1895 1896 if (_dif_is_disabled(ctx->dif_type)) { 1897 return 0; 1898 } 1899 1900 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 1901 return 0; 1902 } 1903 1904 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1905 rc = _dif_remap_ref_tag(&sgl, offset_blocks, ctx, err_blk); 1906 if (rc != 0) { 1907 return rc; 1908 } 1909 } 1910 1911 return 0; 1912 } 1913 1914 static int 1915 _dix_remap_ref_tag(struct _dif_sgl *md_sgl, uint32_t offset_blocks, 1916 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1917 { 1918 uint32_t expected = 0, _actual, remapped; 1919 uint8_t *md_buf; 1920 struct spdk_dif *dif; 1921 1922 _dif_sgl_get_buf(md_sgl, (void *)&md_buf, NULL); 1923 1924 dif = (struct spdk_dif *)(md_buf + ctx->guard_interval); 1925 1926 switch (ctx->dif_type) { 1927 case SPDK_DIF_TYPE1: 1928 case SPDK_DIF_TYPE2: 1929 /* If Type 1 or 2 is used, then all DIF checks are disabled when 1930 * the Application Tag is 0xFFFF. 1931 */ 1932 if (dif->app_tag == 0xFFFF) { 1933 goto end; 1934 } 1935 break; 1936 case SPDK_DIF_TYPE3: 1937 /* If Type 3 is used, then all DIF checks are disabled when the 1938 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF. 1939 */ 1940 if (dif->app_tag == 0xFFFF && dif->ref_tag == 0xFFFFFFFF) { 1941 goto end; 1942 } 1943 break; 1944 default: 1945 break; 1946 } 1947 1948 /* For type 1 and 2, the Reference Tag is incremented for each 1949 * subsequent logical block. For type 3, the Reference Tag 1950 * remains the same as the initialReference Tag. 1951 */ 1952 if (ctx->dif_type != SPDK_DIF_TYPE3) { 1953 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 1954 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 1955 } else { 1956 remapped = ctx->remapped_init_ref_tag; 1957 } 1958 1959 /* Verify the stored Reference Tag. */ 1960 switch (ctx->dif_type) { 1961 case SPDK_DIF_TYPE1: 1962 case SPDK_DIF_TYPE2: 1963 /* Compare the DIF Reference Tag field to the computed Reference Tag. 1964 * The computed Reference Tag will be the least significant 4 bytes 1965 * of the LBA when Type 1 is used, and application specific value 1966 * if Type 2 is used. 1967 */ 1968 _actual = from_be32(&dif->ref_tag); 1969 if (_actual != expected) { 1970 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected, 1971 _actual, offset_blocks); 1972 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \ 1973 " Expected=%x, Actual=%x\n", 1974 expected, expected, _actual); 1975 return -1; 1976 } 1977 break; 1978 case SPDK_DIF_TYPE3: 1979 /* For type 3, the computed Reference Tag remains unchanged. 1980 * Hence ignore the Reference Tag field. 1981 */ 1982 break; 1983 default: 1984 break; 1985 } 1986 1987 /* Update the stored Reference Tag to the remapped one. */ 1988 to_be32(&dif->ref_tag, remapped); 1989 1990 end: 1991 _dif_sgl_advance(md_sgl, ctx->md_size); 1992 1993 return 0; 1994 } 1995 1996 int 1997 spdk_dix_remap_ref_tag(struct iovec *md_iov, uint32_t num_blocks, 1998 const struct spdk_dif_ctx *ctx, 1999 struct spdk_dif_error *err_blk) 2000 { 2001 struct _dif_sgl md_sgl; 2002 uint32_t offset_blocks; 2003 int rc; 2004 2005 _dif_sgl_init(&md_sgl, md_iov, 1); 2006 2007 if (!_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 2008 SPDK_ERRLOG("Size of metadata iovec array is not valid.\n"); 2009 return -EINVAL; 2010 } 2011 2012 if (_dif_is_disabled(ctx->dif_type)) { 2013 return 0; 2014 } 2015 2016 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2017 return 0; 2018 } 2019 2020 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2021 rc = _dix_remap_ref_tag(&md_sgl, offset_blocks, ctx, err_blk); 2022 if (rc != 0) { 2023 return rc; 2024 } 2025 } 2026 2027 return 0; 2028 } 2029