1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/dif.h" 35 #include "spdk/crc16.h" 36 #include "spdk/endian.h" 37 #include "spdk/log.h" 38 #include "spdk/util.h" 39 40 /* Context to iterate a iovec array. */ 41 struct _iov_iter { 42 /* Current iovec in the iteration */ 43 struct iovec *iov; 44 45 /* Remaining count of iovecs in the iteration. */ 46 int iovcnt; 47 48 /* Current offset in the iovec */ 49 uint32_t iov_offset; 50 }; 51 52 static inline void 53 _iov_iter_init(struct _iov_iter *i, struct iovec *iovs, int iovcnt) 54 { 55 i->iov = iovs; 56 i->iovcnt = iovcnt; 57 i->iov_offset = 0; 58 } 59 60 static inline void 61 _iov_iter_advance(struct _iov_iter *i, uint32_t step) 62 { 63 i->iov_offset += step; 64 if (i->iov_offset == i->iov->iov_len) { 65 i->iov++; 66 assert(i->iovcnt > 0); 67 i->iovcnt--; 68 i->iov_offset = 0; 69 } 70 } 71 72 static inline void 73 _iov_iter_get_buf(struct _iov_iter *i, void **_buf, uint32_t *_buf_len) 74 { 75 if (_buf != NULL) { 76 *_buf = i->iov->iov_base + i->iov_offset; 77 } 78 if (_buf_len != NULL) { 79 *_buf_len = i->iov->iov_len - i->iov_offset; 80 } 81 } 82 83 static void 84 _iov_iter_fast_forward(struct _iov_iter *i, uint32_t offset) 85 { 86 i->iov_offset = offset; 87 while (i->iovcnt != 0) { 88 if (i->iov_offset < i->iov->iov_len) { 89 break; 90 } 91 92 i->iov_offset -= i->iov->iov_len; 93 i->iov++; 94 i->iovcnt--; 95 } 96 } 97 98 static bool 99 _are_iovs_bytes_multiple(struct iovec *iovs, int iovcnt, uint32_t bytes) 100 { 101 int i; 102 103 for (i = 0; i < iovcnt; i++) { 104 if (iovs[i].iov_len % bytes) { 105 return false; 106 } 107 } 108 109 return true; 110 } 111 112 static bool 113 _are_iovs_valid(struct iovec *iovs, int iovcnt, uint32_t bytes) 114 { 115 uint64_t total = 0; 116 int i; 117 118 for (i = 0; i < iovcnt; i++) { 119 total += iovs[i].iov_len; 120 } 121 122 return total >= bytes; 123 } 124 125 static bool 126 _dif_type_is_valid(enum spdk_dif_type dif_type, uint32_t dif_flags) 127 { 128 switch (dif_type) { 129 case SPDK_DIF_TYPE1: 130 case SPDK_DIF_TYPE2: 131 case SPDK_DIF_DISABLE: 132 break; 133 case SPDK_DIF_TYPE3: 134 if (dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 135 SPDK_ERRLOG("Reference Tag should not be checked for Type 3\n"); 136 return false; 137 } 138 break; 139 default: 140 SPDK_ERRLOG("Unknown DIF Type: %d\n", dif_type); 141 return false; 142 } 143 144 return true; 145 } 146 147 static bool 148 _dif_is_disabled(enum spdk_dif_type dif_type) 149 { 150 if (dif_type == SPDK_DIF_DISABLE) { 151 return true; 152 } else { 153 return false; 154 } 155 } 156 157 158 static uint32_t 159 _get_guard_interval(uint32_t block_size, uint32_t md_size, bool dif_loc, bool md_interleave) 160 { 161 if (!dif_loc) { 162 /* For metadata formats with more than 8 bytes, if the DIF is 163 * contained in the last 8 bytes of metadata, then the CRC 164 * covers all metadata up to but excluding these last 8 bytes. 165 */ 166 if (md_interleave) { 167 return block_size - sizeof(struct spdk_dif); 168 } else { 169 return md_size - sizeof(struct spdk_dif); 170 } 171 } else { 172 /* For metadata formats with more than 8 bytes, if the DIF is 173 * contained in the first 8 bytes of metadata, then the CRC 174 * does not cover any metadata. 175 */ 176 if (md_interleave) { 177 return block_size - md_size; 178 } else { 179 return 0; 180 } 181 } 182 } 183 184 int 185 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size, 186 bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags, 187 uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag, 188 uint16_t guard_seed) 189 { 190 if (md_size < sizeof(struct spdk_dif)) { 191 SPDK_ERRLOG("Metadata size is smaller than DIF size.\n"); 192 return -EINVAL; 193 } 194 195 if (md_interleave) { 196 if (block_size < md_size) { 197 SPDK_ERRLOG("Block size is smaller than DIF size.\n"); 198 return -EINVAL; 199 } 200 } else { 201 if (block_size == 0 || (block_size % 512) != 0) { 202 SPDK_ERRLOG("Zero block size is not allowed\n"); 203 return -EINVAL; 204 } 205 } 206 207 if (!_dif_type_is_valid(dif_type, dif_flags)) { 208 SPDK_ERRLOG("DIF type is invalid.\n"); 209 return -EINVAL; 210 } 211 212 ctx->block_size = block_size; 213 ctx->md_size = md_size; 214 ctx->guard_interval = _get_guard_interval(block_size, md_size, dif_loc, md_interleave); 215 ctx->dif_type = dif_type; 216 ctx->dif_flags = dif_flags; 217 ctx->init_ref_tag = init_ref_tag; 218 ctx->apptag_mask = apptag_mask; 219 ctx->app_tag = app_tag; 220 ctx->guard_seed = guard_seed; 221 222 return 0; 223 } 224 225 static void 226 _dif_generate(void *_dif, uint16_t guard, uint32_t offset_blocks, 227 const struct spdk_dif_ctx *ctx) 228 { 229 struct spdk_dif *dif = _dif; 230 uint32_t ref_tag; 231 232 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 233 to_be16(&dif->guard, guard); 234 } 235 236 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 237 to_be16(&dif->app_tag, ctx->app_tag); 238 } 239 240 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 241 /* For type 1 and 2, the reference tag is incremented for each 242 * subsequent logical block. For type 3, the reference tag 243 * remains the same as the initial reference tag. 244 */ 245 if (ctx->dif_type != SPDK_DIF_TYPE3) { 246 ref_tag = ctx->init_ref_tag + offset_blocks; 247 } else { 248 ref_tag = ctx->init_ref_tag; 249 } 250 251 to_be32(&dif->ref_tag, ref_tag); 252 } 253 } 254 255 static void 256 dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 257 const struct spdk_dif_ctx *ctx) 258 { 259 struct _iov_iter iter; 260 uint32_t offset_blocks; 261 void *buf; 262 uint16_t guard = 0; 263 264 offset_blocks = 0; 265 _iov_iter_init(&iter, iovs, iovcnt); 266 267 while (offset_blocks < num_blocks) { 268 _iov_iter_get_buf(&iter, &buf, NULL); 269 270 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 271 guard = spdk_crc16_t10dif(ctx->guard_seed, buf, ctx->guard_interval); 272 } 273 274 _dif_generate(buf + ctx->guard_interval, guard, offset_blocks, ctx); 275 276 _iov_iter_advance(&iter, ctx->block_size); 277 offset_blocks++; 278 } 279 } 280 281 static void 282 _dif_generate_split(struct _iov_iter *iter, uint32_t offset_blocks, 283 const struct spdk_dif_ctx *ctx) 284 { 285 uint32_t offset_in_block, offset_in_dif, buf_len; 286 void *buf; 287 uint16_t guard = 0; 288 struct spdk_dif dif = {}; 289 290 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 291 guard = ctx->guard_seed; 292 } 293 offset_in_block = 0; 294 295 while (offset_in_block < ctx->block_size) { 296 _iov_iter_get_buf(iter, &buf, &buf_len); 297 298 if (offset_in_block < ctx->guard_interval) { 299 buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block); 300 301 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 302 /* Compute CRC over split logical block data. */ 303 guard = spdk_crc16_t10dif(guard, buf, buf_len); 304 } 305 306 if (offset_in_block + buf_len == ctx->guard_interval) { 307 /* If a whole logical block data is parsed, generate DIF 308 * and save it to the temporary DIF area. 309 */ 310 _dif_generate(&dif, guard, offset_blocks, ctx); 311 } 312 } else if (offset_in_block < ctx->guard_interval + sizeof(struct spdk_dif)) { 313 /* Copy generated DIF to the split DIF field. */ 314 offset_in_dif = offset_in_block - ctx->guard_interval; 315 buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset_in_dif); 316 317 memcpy(buf, ((uint8_t *)&dif) + offset_in_dif, buf_len); 318 } else { 319 /* Skip metadata field after DIF field. */ 320 buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block); 321 } 322 323 _iov_iter_advance(iter, buf_len); 324 offset_in_block += buf_len; 325 } 326 } 327 328 static void 329 dif_generate_split(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 330 const struct spdk_dif_ctx *ctx) 331 { 332 struct _iov_iter iter; 333 uint32_t offset_blocks; 334 335 offset_blocks = 0; 336 _iov_iter_init(&iter, iovs, iovcnt); 337 338 while (offset_blocks < num_blocks) { 339 _dif_generate_split(&iter, offset_blocks, ctx); 340 offset_blocks++; 341 } 342 } 343 344 int 345 spdk_dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 346 const struct spdk_dif_ctx *ctx) 347 { 348 if (!_are_iovs_valid(iovs, iovcnt, ctx->block_size * num_blocks)) { 349 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 350 return -EINVAL; 351 } 352 353 if (_dif_is_disabled(ctx->dif_type)) { 354 return 0; 355 } 356 357 if (_are_iovs_bytes_multiple(iovs, iovcnt, ctx->block_size)) { 358 dif_generate(iovs, iovcnt, num_blocks, ctx); 359 } else { 360 dif_generate_split(iovs, iovcnt, num_blocks, ctx); 361 } 362 363 return 0; 364 } 365 366 static void 367 _dif_error_set(struct spdk_dif_error *err_blk, uint8_t err_type, 368 uint32_t expected, uint32_t actual, uint32_t err_offset) 369 { 370 if (err_blk) { 371 err_blk->err_type = err_type; 372 err_blk->expected = expected; 373 err_blk->actual = actual; 374 err_blk->err_offset = err_offset; 375 } 376 } 377 378 static int 379 _dif_verify(void *_dif, uint16_t guard, uint32_t offset_blocks, 380 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 381 { 382 struct spdk_dif *dif = _dif; 383 uint16_t _guard; 384 uint16_t _app_tag; 385 uint32_t ref_tag, _ref_tag; 386 387 switch (ctx->dif_type) { 388 case SPDK_DIF_TYPE1: 389 case SPDK_DIF_TYPE2: 390 /* If Type 1 or 2 is used, then all DIF checks are disabled when 391 * the Application Tag is 0xFFFF. 392 */ 393 if (dif->app_tag == 0xFFFF) { 394 return 0; 395 } 396 break; 397 case SPDK_DIF_TYPE3: 398 /* If Type 3 is used, then all DIF checks are disabled when the 399 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF. 400 */ 401 if (dif->app_tag == 0xFFFF && dif->ref_tag == 0xFFFFFFFF) { 402 return 0; 403 } 404 break; 405 default: 406 break; 407 } 408 409 /* For type 1 and 2, the reference tag is incremented for each 410 * subsequent logical block. For type 3, the reference tag 411 * remains the same as the initial reference tag. 412 */ 413 if (ctx->dif_type != SPDK_DIF_TYPE3) { 414 ref_tag = ctx->init_ref_tag + offset_blocks; 415 } else { 416 ref_tag = ctx->init_ref_tag; 417 } 418 419 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 420 /* Compare the DIF Guard field to the CRC computed over the logical 421 * block data. 422 */ 423 _guard = from_be16(&dif->guard); 424 if (_guard != guard) { 425 _dif_error_set(err_blk, SPDK_DIF_GUARD_ERROR, _guard, guard, 426 offset_blocks); 427 SPDK_ERRLOG("Failed to compare Guard: LBA=%" PRIu32 "," \ 428 " Expected=%x, Actual=%x\n", 429 ref_tag, _guard, guard); 430 return -1; 431 } 432 } 433 434 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 435 /* Compare unmasked bits in the DIF Application Tag field to the 436 * passed Application Tag. 437 */ 438 _app_tag = from_be16(&dif->app_tag); 439 if ((_app_tag & ctx->apptag_mask) != ctx->app_tag) { 440 _dif_error_set(err_blk, SPDK_DIF_APPTAG_ERROR, ctx->app_tag, 441 (_app_tag & ctx->apptag_mask), offset_blocks); 442 SPDK_ERRLOG("Failed to compare App Tag: LBA=%" PRIu32 "," \ 443 " Expected=%x, Actual=%x\n", 444 ref_tag, ctx->app_tag, (_app_tag & ctx->apptag_mask)); 445 return -1; 446 } 447 } 448 449 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 450 switch (ctx->dif_type) { 451 case SPDK_DIF_TYPE1: 452 case SPDK_DIF_TYPE2: 453 /* Compare the DIF Reference Tag field to the passed Reference Tag. 454 * The passed Reference Tag will be the least significant 4 bytes 455 * of the LBA when Type 1 is used, and application specific value 456 * if Type 2 is used, 457 */ 458 _ref_tag = from_be32(&dif->ref_tag); 459 if (_ref_tag != ref_tag) { 460 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, ref_tag, 461 _ref_tag, offset_blocks); 462 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \ 463 " Expected=%x, Actual=%x\n", 464 ref_tag, ref_tag, _ref_tag); 465 return -1; 466 } 467 break; 468 case SPDK_DIF_TYPE3: 469 /* For Type 3, computed Reference Tag remains unchanged. 470 * Hence ignore the Reference Tag field. 471 */ 472 break; 473 default: 474 break; 475 } 476 } 477 478 return 0; 479 } 480 481 static int 482 dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 483 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 484 { 485 struct _iov_iter iter; 486 uint32_t offset_blocks; 487 int rc; 488 void *buf; 489 uint16_t guard = 0; 490 491 offset_blocks = 0; 492 _iov_iter_init(&iter, iovs, iovcnt); 493 494 while (offset_blocks < num_blocks) { 495 _iov_iter_get_buf(&iter, &buf, NULL); 496 497 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 498 guard = spdk_crc16_t10dif(ctx->guard_seed, buf, ctx->guard_interval); 499 } 500 501 rc = _dif_verify(buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 502 if (rc != 0) { 503 return rc; 504 } 505 506 _iov_iter_advance(&iter, ctx->block_size); 507 offset_blocks++; 508 } 509 510 return 0; 511 } 512 513 static int 514 _dif_verify_split(struct _iov_iter *iter, uint32_t offset_blocks, 515 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 516 { 517 uint32_t offset_in_block, offset_in_dif, buf_len; 518 void *buf; 519 uint16_t guard = 0; 520 struct spdk_dif dif = {}; 521 522 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 523 guard = ctx->guard_seed; 524 } 525 offset_in_block = 0; 526 527 while (offset_in_block < ctx->block_size) { 528 _iov_iter_get_buf(iter, &buf, &buf_len); 529 530 if (offset_in_block < ctx->guard_interval) { 531 buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block); 532 533 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 534 /* Compute CRC over split logical block data. */ 535 guard = spdk_crc16_t10dif(guard, buf, buf_len); 536 } 537 } else if (offset_in_block < ctx->guard_interval + sizeof(struct spdk_dif)) { 538 /* Copy the split DIF field to the temporary DIF buffer. */ 539 offset_in_dif = offset_in_block - ctx->guard_interval; 540 buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset_in_dif); 541 542 memcpy((uint8_t *)&dif + offset_in_dif, buf, buf_len); 543 } else { 544 /* Skip metadata field after DIF field. */ 545 buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block); 546 } 547 548 _iov_iter_advance(iter, buf_len); 549 offset_in_block += buf_len; 550 } 551 552 return _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 553 } 554 555 static int 556 dif_verify_split(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 557 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 558 { 559 struct _iov_iter iter; 560 uint32_t offset_blocks; 561 int rc; 562 563 offset_blocks = 0; 564 _iov_iter_init(&iter, iovs, iovcnt); 565 566 while (offset_blocks < num_blocks) { 567 rc = _dif_verify_split(&iter, offset_blocks, ctx, err_blk); 568 if (rc != 0) { 569 return rc; 570 } 571 offset_blocks++; 572 } 573 574 return 0; 575 } 576 577 int 578 spdk_dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 579 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 580 { 581 if (!_are_iovs_valid(iovs, iovcnt, ctx->block_size * num_blocks)) { 582 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 583 return -EINVAL; 584 } 585 586 if (_dif_is_disabled(ctx->dif_type)) { 587 return 0; 588 } 589 590 if (_are_iovs_bytes_multiple(iovs, iovcnt, ctx->block_size)) { 591 return dif_verify(iovs, iovcnt, num_blocks, ctx, err_blk); 592 } else { 593 return dif_verify_split(iovs, iovcnt, num_blocks, ctx, err_blk); 594 } 595 } 596 597 static void 598 dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov, 599 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 600 { 601 struct _iov_iter src_iter, dst_iter; 602 uint32_t offset_blocks, data_block_size; 603 void *src, *dst; 604 uint16_t guard; 605 606 offset_blocks = 0; 607 _iov_iter_init(&src_iter, iovs, iovcnt); 608 _iov_iter_init(&dst_iter, bounce_iov, 1); 609 610 data_block_size = ctx->block_size - ctx->md_size; 611 612 while (offset_blocks < num_blocks) { 613 614 _iov_iter_get_buf(&src_iter, &src, NULL); 615 _iov_iter_get_buf(&dst_iter, &dst, NULL); 616 617 guard = 0; 618 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 619 guard = spdk_crc16_t10dif_copy(ctx->guard_seed, dst, src, data_block_size); 620 guard = spdk_crc16_t10dif(guard, dst + data_block_size, 621 ctx->guard_interval - data_block_size); 622 } else { 623 memcpy(dst, src, data_block_size); 624 } 625 626 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 627 628 _iov_iter_advance(&src_iter, data_block_size); 629 _iov_iter_advance(&dst_iter, ctx->block_size); 630 offset_blocks++; 631 } 632 } 633 634 static void 635 _dif_generate_copy_split(struct _iov_iter *src_iter, struct _iov_iter *dst_iter, 636 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 637 { 638 uint32_t offset_in_block, src_len, data_block_size; 639 uint16_t guard = 0; 640 void *src, *dst; 641 642 _iov_iter_get_buf(dst_iter, &dst, NULL); 643 644 data_block_size = ctx->block_size - ctx->md_size; 645 646 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 647 guard = ctx->guard_seed; 648 } 649 offset_in_block = 0; 650 651 while (offset_in_block < data_block_size) { 652 /* Compute CRC over split logical block data and copy 653 * data to bounce buffer. 654 */ 655 _iov_iter_get_buf(src_iter, &src, &src_len); 656 src_len = spdk_min(src_len, data_block_size - offset_in_block); 657 658 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 659 guard = spdk_crc16_t10dif_copy(guard, dst + offset_in_block, 660 src, src_len); 661 } else { 662 memcpy(dst + offset_in_block, src, src_len); 663 } 664 665 _iov_iter_advance(src_iter, src_len); 666 offset_in_block += src_len; 667 } 668 669 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 670 guard = spdk_crc16_t10dif(guard, dst + data_block_size, 671 ctx->guard_interval - data_block_size); 672 } 673 674 _iov_iter_advance(dst_iter, ctx->block_size); 675 676 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 677 } 678 679 static void 680 dif_generate_copy_split(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov, 681 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 682 { 683 struct _iov_iter src_iter, dst_iter; 684 uint32_t offset_blocks; 685 686 offset_blocks = 0; 687 _iov_iter_init(&src_iter, iovs, iovcnt); 688 _iov_iter_init(&dst_iter, bounce_iov, 1); 689 690 while (offset_blocks < num_blocks) { 691 _dif_generate_copy_split(&src_iter, &dst_iter, offset_blocks, ctx); 692 offset_blocks++; 693 } 694 } 695 696 int 697 spdk_dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov, 698 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 699 { 700 uint32_t data_block_size; 701 702 data_block_size = ctx->block_size - ctx->md_size; 703 704 if (!_are_iovs_valid(iovs, iovcnt, data_block_size * num_blocks) || 705 !_are_iovs_valid(bounce_iov, 1, ctx->block_size * num_blocks)) { 706 SPDK_ERRLOG("Size of iovec arrays are not valid.\n"); 707 return -EINVAL; 708 } 709 710 if (_dif_is_disabled(ctx->dif_type)) { 711 return 0; 712 } 713 714 if (_are_iovs_bytes_multiple(iovs, iovcnt, data_block_size)) { 715 dif_generate_copy(iovs, iovcnt, bounce_iov, num_blocks, ctx); 716 } else { 717 dif_generate_copy_split(iovs, iovcnt, bounce_iov, num_blocks, ctx); 718 } 719 720 return 0; 721 } 722 723 static int 724 dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov, 725 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 726 struct spdk_dif_error *err_blk) 727 { 728 struct _iov_iter src_iter, dst_iter; 729 uint32_t offset_blocks, data_block_size; 730 void *src, *dst; 731 int rc; 732 uint16_t guard; 733 734 offset_blocks = 0; 735 _iov_iter_init(&src_iter, bounce_iov, 1); 736 _iov_iter_init(&dst_iter, iovs, iovcnt); 737 738 data_block_size = ctx->block_size - ctx->md_size; 739 740 while (offset_blocks < num_blocks) { 741 742 _iov_iter_get_buf(&src_iter, &src, NULL); 743 _iov_iter_get_buf(&dst_iter, &dst, NULL); 744 745 guard = 0; 746 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 747 guard = spdk_crc16_t10dif_copy(ctx->guard_seed, dst, src, data_block_size); 748 guard = spdk_crc16_t10dif(guard, src + data_block_size, 749 ctx->guard_interval - data_block_size); 750 } else { 751 memcpy(dst, src, data_block_size); 752 } 753 754 rc = _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 755 if (rc != 0) { 756 return rc; 757 } 758 759 _iov_iter_advance(&src_iter, ctx->block_size); 760 _iov_iter_advance(&dst_iter, data_block_size); 761 offset_blocks++; 762 } 763 764 return 0; 765 } 766 767 static int 768 _dif_verify_copy_split(struct _iov_iter *src_iter, struct _iov_iter *dst_iter, 769 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 770 struct spdk_dif_error *err_blk) 771 { 772 uint32_t offset_in_block, dst_len, data_block_size; 773 uint16_t guard = 0; 774 void *src, *dst; 775 776 _iov_iter_get_buf(src_iter, &src, NULL); 777 778 data_block_size = ctx->block_size - ctx->md_size; 779 780 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 781 guard = ctx->guard_seed; 782 } 783 offset_in_block = 0; 784 785 while (offset_in_block < data_block_size) { 786 /* Compute CRC over split logical block data and copy 787 * data to bounce buffer. 788 */ 789 _iov_iter_get_buf(dst_iter, &dst, &dst_len); 790 dst_len = spdk_min(dst_len, data_block_size - offset_in_block); 791 792 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 793 guard = spdk_crc16_t10dif_copy(guard, dst, 794 src + offset_in_block, dst_len); 795 } else { 796 memcpy(dst, src + offset_in_block, dst_len); 797 } 798 799 _iov_iter_advance(dst_iter, dst_len); 800 offset_in_block += dst_len; 801 } 802 803 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 804 guard = spdk_crc16_t10dif(guard, src + data_block_size, 805 ctx->guard_interval - data_block_size); 806 } 807 808 _iov_iter_advance(src_iter, ctx->block_size); 809 810 return _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 811 } 812 813 static int 814 dif_verify_copy_split(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov, 815 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 816 struct spdk_dif_error *err_blk) 817 { 818 struct _iov_iter src_iter, dst_iter; 819 uint32_t offset_blocks; 820 int rc; 821 822 offset_blocks = 0; 823 _iov_iter_init(&src_iter, bounce_iov, 1); 824 _iov_iter_init(&dst_iter, iovs, iovcnt); 825 826 while (offset_blocks < num_blocks) { 827 rc = _dif_verify_copy_split(&src_iter, &dst_iter, offset_blocks, ctx, err_blk); 828 if (rc != 0) { 829 return rc; 830 } 831 offset_blocks++; 832 } 833 834 return 0; 835 } 836 837 int 838 spdk_dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov, 839 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 840 struct spdk_dif_error *err_blk) 841 { 842 uint32_t data_block_size; 843 844 data_block_size = ctx->block_size - ctx->md_size; 845 846 if (!_are_iovs_valid(iovs, iovcnt, data_block_size * num_blocks) || 847 !_are_iovs_valid(bounce_iov, 1, ctx->block_size * num_blocks)) { 848 SPDK_ERRLOG("Size of iovec arrays are not valid\n"); 849 return -EINVAL; 850 } 851 852 if (_dif_is_disabled(ctx->dif_type)) { 853 return 0; 854 } 855 856 if (_are_iovs_bytes_multiple(iovs, iovcnt, data_block_size)) { 857 return dif_verify_copy(iovs, iovcnt, bounce_iov, num_blocks, ctx, err_blk); 858 } else { 859 return dif_verify_copy_split(iovs, iovcnt, bounce_iov, num_blocks, ctx, err_blk); 860 } 861 } 862 863 static void 864 _bit_flip(uint8_t *buf, uint32_t flip_bit) 865 { 866 uint8_t byte; 867 868 byte = *buf; 869 byte ^= 1 << flip_bit; 870 *buf = byte; 871 } 872 873 static int 874 _dif_inject_error(struct iovec *iovs, int iovcnt, 875 uint32_t block_size, uint32_t num_blocks, 876 uint32_t inject_offset_blocks, 877 uint32_t inject_offset_bytes, 878 uint32_t inject_offset_bits) 879 { 880 struct _iov_iter iter; 881 uint32_t offset_in_block, buf_len; 882 void *buf; 883 884 _iov_iter_init(&iter, iovs, iovcnt); 885 886 _iov_iter_fast_forward(&iter, block_size * inject_offset_blocks); 887 888 offset_in_block = 0; 889 890 while (offset_in_block < block_size) { 891 _iov_iter_get_buf(&iter, &buf, &buf_len); 892 buf_len = spdk_min(buf_len, block_size - offset_in_block); 893 894 if (inject_offset_bytes >= offset_in_block && 895 inject_offset_bytes < offset_in_block + buf_len) { 896 buf += inject_offset_bytes - offset_in_block; 897 _bit_flip(buf, inject_offset_bits); 898 return 0; 899 } 900 901 _iov_iter_advance(&iter, buf_len); 902 offset_in_block += buf_len; 903 } 904 905 return -1; 906 } 907 908 static int 909 dif_inject_error(struct iovec *iovs, int iovcnt, 910 uint32_t block_size, uint32_t num_blocks, 911 uint32_t start_inject_bytes, uint32_t inject_range_bytes, 912 uint32_t *inject_offset) 913 { 914 uint32_t inject_offset_blocks, inject_offset_bytes, inject_offset_bits; 915 uint32_t offset_blocks; 916 int rc; 917 918 srand(time(0)); 919 920 inject_offset_blocks = rand() % num_blocks; 921 inject_offset_bytes = start_inject_bytes + (rand() % inject_range_bytes); 922 inject_offset_bits = rand() % 8; 923 924 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 925 if (offset_blocks == inject_offset_blocks) { 926 rc = _dif_inject_error(iovs, iovcnt, block_size, num_blocks, 927 inject_offset_blocks, 928 inject_offset_bytes, 929 inject_offset_bits); 930 if (rc == 0) { 931 *inject_offset = inject_offset_blocks; 932 } 933 return rc; 934 } 935 } 936 937 return -1; 938 } 939 940 #define _member_size(type, member) sizeof(((type *)0)->member) 941 942 int 943 spdk_dif_inject_error(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 944 const struct spdk_dif_ctx *ctx, uint32_t inject_flags, 945 uint32_t *inject_offset) 946 { 947 int rc; 948 949 if (!_are_iovs_valid(iovs, iovcnt, ctx->block_size * num_blocks)) { 950 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 951 return -EINVAL; 952 } 953 954 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 955 rc = dif_inject_error(iovs, iovcnt, ctx->block_size, num_blocks, 956 ctx->guard_interval + offsetof(struct spdk_dif, ref_tag), 957 _member_size(struct spdk_dif, ref_tag), 958 inject_offset); 959 if (rc != 0) { 960 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 961 return rc; 962 } 963 } 964 965 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 966 rc = dif_inject_error(iovs, iovcnt, ctx->block_size, num_blocks, 967 ctx->guard_interval + offsetof(struct spdk_dif, app_tag), 968 _member_size(struct spdk_dif, app_tag), 969 inject_offset); 970 if (rc != 0) { 971 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 972 return rc; 973 } 974 } 975 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 976 rc = dif_inject_error(iovs, iovcnt, ctx->block_size, num_blocks, 977 ctx->guard_interval, 978 _member_size(struct spdk_dif, guard), 979 inject_offset); 980 if (rc != 0) { 981 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 982 return rc; 983 } 984 } 985 986 if (inject_flags & SPDK_DIF_DATA_ERROR) { 987 /* If the DIF information is contained within the last 8 bytes of 988 * metadata, then the CRC covers all metadata bytes up to but excluding 989 * the last 8 bytes. But error injection does not cover these metadata 990 * because classification is not determined yet. 991 * 992 * Note: Error injection to data block is expected to be detected as 993 * guard error. 994 */ 995 rc = dif_inject_error(iovs, iovcnt, ctx->block_size, num_blocks, 996 0, 997 ctx->block_size - ctx->md_size, 998 inject_offset); 999 if (rc != 0) { 1000 SPDK_ERRLOG("Failed to inject error to data block.\n"); 1001 return rc; 1002 } 1003 } 1004 1005 return 0; 1006 } 1007 1008 static void 1009 dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1010 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1011 { 1012 struct _iov_iter data_iter, md_iter; 1013 uint32_t offset_blocks; 1014 uint16_t guard; 1015 void *data_buf, *md_buf; 1016 1017 offset_blocks = 0; 1018 _iov_iter_init(&data_iter, iovs, iovcnt); 1019 _iov_iter_init(&md_iter, md_iov, 1); 1020 1021 while (offset_blocks < num_blocks) { 1022 1023 _iov_iter_get_buf(&data_iter, &data_buf, NULL); 1024 _iov_iter_get_buf(&md_iter, &md_buf, NULL); 1025 1026 guard = 0; 1027 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1028 guard = spdk_crc16_t10dif(ctx->guard_seed, data_buf, ctx->block_size); 1029 guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval); 1030 } 1031 1032 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1033 1034 _iov_iter_advance(&data_iter, ctx->block_size); 1035 _iov_iter_advance(&md_iter, ctx->md_size); 1036 offset_blocks++; 1037 } 1038 } 1039 1040 static void 1041 _dix_generate_split(struct _iov_iter *data_iter, struct _iov_iter *md_iter, 1042 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1043 { 1044 uint32_t offset_in_block, data_buf_len; 1045 uint16_t guard = 0; 1046 void *data_buf, *md_buf; 1047 1048 _iov_iter_get_buf(md_iter, &md_buf, NULL); 1049 1050 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1051 guard = ctx->guard_seed; 1052 } 1053 offset_in_block = 0; 1054 1055 while (offset_in_block < ctx->block_size) { 1056 _iov_iter_get_buf(data_iter, &data_buf, &data_buf_len); 1057 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1058 1059 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1060 guard = spdk_crc16_t10dif(guard, data_buf, data_buf_len); 1061 } 1062 1063 _iov_iter_advance(data_iter, data_buf_len); 1064 offset_in_block += data_buf_len; 1065 } 1066 1067 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1068 guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval); 1069 } 1070 1071 _iov_iter_advance(md_iter, ctx->md_size); 1072 1073 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1074 } 1075 1076 static void 1077 dix_generate_split(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1078 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1079 { 1080 struct _iov_iter data_iter, md_iter; 1081 uint32_t offset_blocks; 1082 1083 offset_blocks = 0; 1084 _iov_iter_init(&data_iter, iovs, iovcnt); 1085 _iov_iter_init(&md_iter, md_iov, 1); 1086 1087 while (offset_blocks < num_blocks) { 1088 _dix_generate_split(&data_iter, &md_iter, offset_blocks, ctx); 1089 offset_blocks++; 1090 } 1091 } 1092 1093 int 1094 spdk_dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1095 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1096 { 1097 if (!_are_iovs_valid(iovs, iovcnt, ctx->block_size * num_blocks) || 1098 !_are_iovs_valid(md_iov, 1, ctx->md_size * num_blocks)) { 1099 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1100 return -EINVAL; 1101 } 1102 1103 if (_dif_is_disabled(ctx->dif_type)) { 1104 return 0; 1105 } 1106 1107 if (_are_iovs_bytes_multiple(iovs, iovcnt, ctx->block_size)) { 1108 dix_generate(iovs, iovcnt, md_iov, num_blocks, ctx); 1109 } else { 1110 dix_generate_split(iovs, iovcnt, md_iov, num_blocks, ctx); 1111 } 1112 1113 return 0; 1114 } 1115 1116 static int 1117 dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1118 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1119 struct spdk_dif_error *err_blk) 1120 { 1121 struct _iov_iter data_iter, md_iter; 1122 uint32_t offset_blocks; 1123 uint16_t guard; 1124 void *data_buf, *md_buf; 1125 int rc; 1126 1127 offset_blocks = 0; 1128 _iov_iter_init(&data_iter, iovs, iovcnt); 1129 _iov_iter_init(&md_iter, md_iov, 1); 1130 1131 while (offset_blocks < num_blocks) { 1132 1133 _iov_iter_get_buf(&data_iter, &data_buf, NULL); 1134 _iov_iter_get_buf(&md_iter, &md_buf, NULL); 1135 1136 guard = 0; 1137 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1138 guard = spdk_crc16_t10dif(ctx->guard_seed, data_buf, ctx->block_size); 1139 guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval); 1140 } 1141 1142 rc = _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1143 if (rc != 0) { 1144 return rc; 1145 } 1146 1147 _iov_iter_advance(&data_iter, ctx->block_size); 1148 _iov_iter_advance(&md_iter, ctx->md_size); 1149 offset_blocks++; 1150 } 1151 1152 return 0; 1153 } 1154 1155 static int 1156 _dix_verify_split(struct _iov_iter *data_iter, struct _iov_iter *md_iter, 1157 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1158 struct spdk_dif_error *err_blk) 1159 { 1160 uint32_t offset_in_block, data_buf_len; 1161 uint16_t guard = 0; 1162 void *data_buf, *md_buf; 1163 1164 _iov_iter_get_buf(md_iter, &md_buf, NULL); 1165 1166 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1167 guard = ctx->guard_seed; 1168 } 1169 offset_in_block = 0; 1170 1171 while (offset_in_block < ctx->block_size) { 1172 _iov_iter_get_buf(data_iter, &data_buf, &data_buf_len); 1173 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1174 1175 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1176 guard = spdk_crc16_t10dif(guard, data_buf, data_buf_len); 1177 } 1178 1179 _iov_iter_advance(data_iter, data_buf_len); 1180 offset_in_block += data_buf_len; 1181 } 1182 1183 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1184 guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval); 1185 } 1186 1187 _iov_iter_advance(md_iter, ctx->md_size); 1188 1189 return _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1190 } 1191 1192 static int 1193 dix_verify_split(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1194 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1195 struct spdk_dif_error *err_blk) 1196 { 1197 struct _iov_iter data_iter, md_iter; 1198 uint32_t offset_blocks; 1199 int rc; 1200 1201 offset_blocks = 0; 1202 _iov_iter_init(&data_iter, iovs, iovcnt); 1203 _iov_iter_init(&md_iter, md_iov, 1); 1204 1205 while (offset_blocks < num_blocks) { 1206 rc = _dix_verify_split(&data_iter, &md_iter, offset_blocks, ctx, err_blk); 1207 if (rc != 0) { 1208 return rc; 1209 } 1210 offset_blocks++; 1211 } 1212 1213 return 0; 1214 } 1215 1216 int 1217 spdk_dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1218 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1219 struct spdk_dif_error *err_blk) 1220 { 1221 if (!_are_iovs_valid(iovs, iovcnt, ctx->block_size * num_blocks) || 1222 !_are_iovs_valid(md_iov, 1, ctx->md_size * num_blocks)) { 1223 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1224 return -EINVAL; 1225 } 1226 1227 if (_dif_is_disabled(ctx->dif_type)) { 1228 return 0; 1229 } 1230 1231 if (_are_iovs_bytes_multiple(iovs, iovcnt, ctx->block_size)) { 1232 return dix_verify(iovs, iovcnt, md_iov, num_blocks, ctx, err_blk); 1233 } else { 1234 return dix_verify_split(iovs, iovcnt, md_iov, num_blocks, ctx, err_blk); 1235 } 1236 } 1237 1238 int 1239 spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1240 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1241 uint32_t inject_flags, uint32_t *inject_offset) 1242 { 1243 int rc; 1244 1245 if (!_are_iovs_valid(iovs, iovcnt, ctx->block_size * num_blocks) || 1246 !_are_iovs_valid(md_iov, 1, ctx->md_size * num_blocks)) { 1247 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1248 return -EINVAL; 1249 } 1250 1251 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1252 rc = dif_inject_error(md_iov, 1, ctx->md_size, num_blocks, 1253 ctx->guard_interval + offsetof(struct spdk_dif, ref_tag), 1254 _member_size(struct spdk_dif, ref_tag), 1255 inject_offset); 1256 if (rc != 0) { 1257 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1258 return rc; 1259 } 1260 } 1261 1262 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1263 rc = dif_inject_error(md_iov, 1, ctx->md_size, num_blocks, 1264 ctx->guard_interval + offsetof(struct spdk_dif, app_tag), 1265 _member_size(struct spdk_dif, app_tag), 1266 inject_offset); 1267 if (rc != 0) { 1268 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1269 return rc; 1270 } 1271 } 1272 1273 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1274 rc = dif_inject_error(md_iov, 1, ctx->md_size, num_blocks, 1275 ctx->guard_interval, 1276 _member_size(struct spdk_dif, guard), 1277 inject_offset); 1278 if (rc != 0) { 1279 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1280 return rc; 1281 } 1282 } 1283 1284 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1285 /* Note: Error injection to data block is expected to be detected 1286 * as guard error. 1287 */ 1288 rc = dif_inject_error(iovs, iovcnt, ctx->block_size, num_blocks, 1289 0, 1290 ctx->block_size, 1291 inject_offset); 1292 if (rc != 0) { 1293 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1294 return rc; 1295 } 1296 } 1297 1298 return 0; 1299 } 1300 1301 int 1302 spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int num_iovs, 1303 uint8_t *buf, uint32_t buf_len, 1304 uint32_t data_offset, uint32_t data_len, 1305 uint32_t *_mapped_len, 1306 const struct spdk_dif_ctx *ctx) 1307 { 1308 uint32_t data_block_size, head_unalign, mapped_len = 0; 1309 uint32_t num_blocks, offset_blocks; 1310 struct iovec *iov = iovs; 1311 int iovcnt = 0; 1312 1313 if (iovs == NULL || num_iovs == 0) { 1314 return -EINVAL; 1315 } 1316 1317 data_block_size = ctx->block_size - ctx->md_size; 1318 1319 if ((data_len % data_block_size) != 0) { 1320 SPDK_ERRLOG("Data length must be a multiple of data block size\n"); 1321 return -EINVAL; 1322 } 1323 1324 if (data_offset >= data_len) { 1325 SPDK_ERRLOG("Data offset must be smaller than data length\n"); 1326 return -ERANGE; 1327 } 1328 1329 num_blocks = data_len / data_block_size; 1330 1331 if (buf_len < num_blocks * ctx->block_size) { 1332 SPDK_ERRLOG("Buffer overflow will occur. Buffer size is %" PRIu32 " but" 1333 " necessary size is %" PRIu32 "\n", 1334 buf_len, num_blocks * ctx->block_size); 1335 return -ERANGE; 1336 } 1337 1338 offset_blocks = data_offset / data_block_size; 1339 head_unalign = data_offset % data_block_size; 1340 1341 buf += offset_blocks * ctx->block_size; 1342 1343 if (head_unalign != 0) { 1344 buf += head_unalign; 1345 1346 iov->iov_base = buf; 1347 iov->iov_len = data_block_size - head_unalign; 1348 mapped_len += data_block_size - head_unalign; 1349 iov++; 1350 iovcnt++; 1351 1352 buf += ctx->block_size - head_unalign; 1353 offset_blocks++; 1354 } 1355 1356 while (offset_blocks < num_blocks && iovcnt < num_iovs) { 1357 iov->iov_base = buf; 1358 iov->iov_len = data_block_size; 1359 mapped_len += data_block_size; 1360 iov++; 1361 iovcnt++; 1362 1363 buf += ctx->block_size; 1364 offset_blocks++; 1365 } 1366 1367 if (_mapped_len != NULL) { 1368 *_mapped_len = mapped_len; 1369 } 1370 1371 return iovcnt; 1372 } 1373