1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/dif.h" 7 #include "spdk/crc16.h" 8 #include "spdk/crc32.h" 9 #include "spdk/crc64.h" 10 #include "spdk/endian.h" 11 #include "spdk/log.h" 12 #include "spdk/util.h" 13 14 #define REFTAG_MASK_16 0x00000000FFFFFFFF 15 #define REFTAG_MASK_32 0xFFFFFFFFFFFFFFFF 16 #define REFTAG_MASK_64 0x0000FFFFFFFFFFFF 17 18 /* The variable size Storage Tag and Reference Tag is not supported yet, 19 * so the maximum size of the Reference Tag is assumed. 20 */ 21 struct spdk_dif { 22 union { 23 struct { 24 uint16_t guard; 25 uint16_t app_tag; 26 uint32_t stor_ref_space; 27 } g16; 28 struct { 29 uint32_t guard; 30 uint16_t app_tag; 31 uint16_t stor_ref_space_p1; 32 uint64_t stor_ref_space_p2; 33 } g32; 34 struct { 35 uint64_t guard; 36 uint16_t app_tag; 37 uint16_t stor_ref_space_p1; 38 uint32_t stor_ref_space_p2; 39 } g64; 40 }; 41 }; 42 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g16) == 8, "Incorrect size"); 43 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g32) == 16, "Incorrect size"); 44 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g64) == 16, "Incorrect size"); 45 46 /* Context to iterate or create a iovec array. 47 * Each sgl is either iterated or created at a time. 48 */ 49 struct _dif_sgl { 50 /* Current iovec in the iteration or creation */ 51 struct iovec *iov; 52 53 /* Remaining count of iovecs in the iteration or creation. */ 54 int iovcnt; 55 56 /* Current offset in the iovec */ 57 uint32_t iov_offset; 58 59 /* Size of the created iovec array in bytes */ 60 uint32_t total_size; 61 }; 62 63 static inline void 64 _dif_sgl_init(struct _dif_sgl *s, struct iovec *iovs, int iovcnt) 65 { 66 s->iov = iovs; 67 s->iovcnt = iovcnt; 68 s->iov_offset = 0; 69 s->total_size = 0; 70 } 71 72 static void 73 _dif_sgl_advance(struct _dif_sgl *s, uint32_t step) 74 { 75 s->iov_offset += step; 76 while (s->iovcnt != 0) { 77 if (s->iov_offset < s->iov->iov_len) { 78 break; 79 } 80 81 s->iov_offset -= s->iov->iov_len; 82 s->iov++; 83 s->iovcnt--; 84 } 85 } 86 87 static inline void 88 _dif_sgl_get_buf(struct _dif_sgl *s, uint8_t **_buf, uint32_t *_buf_len) 89 { 90 if (_buf != NULL) { 91 *_buf = (uint8_t *)s->iov->iov_base + s->iov_offset; 92 } 93 if (_buf_len != NULL) { 94 *_buf_len = s->iov->iov_len - s->iov_offset; 95 } 96 } 97 98 static inline bool 99 _dif_sgl_append(struct _dif_sgl *s, uint8_t *data, uint32_t data_len) 100 { 101 assert(s->iovcnt > 0); 102 s->iov->iov_base = data; 103 s->iov->iov_len = data_len; 104 s->total_size += data_len; 105 s->iov++; 106 s->iovcnt--; 107 108 if (s->iovcnt > 0) { 109 return true; 110 } else { 111 return false; 112 } 113 } 114 115 static inline bool 116 _dif_sgl_append_split(struct _dif_sgl *dst, struct _dif_sgl *src, uint32_t data_len) 117 { 118 uint8_t *buf; 119 uint32_t buf_len; 120 121 while (data_len != 0) { 122 _dif_sgl_get_buf(src, &buf, &buf_len); 123 buf_len = spdk_min(buf_len, data_len); 124 125 if (!_dif_sgl_append(dst, buf, buf_len)) { 126 return false; 127 } 128 129 _dif_sgl_advance(src, buf_len); 130 data_len -= buf_len; 131 } 132 133 return true; 134 } 135 136 /* This function must be used before starting iteration. */ 137 static bool 138 _dif_sgl_is_bytes_multiple(struct _dif_sgl *s, uint32_t bytes) 139 { 140 int i; 141 142 for (i = 0; i < s->iovcnt; i++) { 143 if (s->iov[i].iov_len % bytes) { 144 return false; 145 } 146 } 147 148 return true; 149 } 150 151 static bool 152 _dif_sgl_is_valid_block_aligned(struct _dif_sgl *s, uint32_t num_blocks, uint32_t block_size) 153 { 154 uint32_t count = 0; 155 int i; 156 157 for (i = 0; i < s->iovcnt; i++) { 158 if (s->iov[i].iov_len % block_size) { 159 return false; 160 } 161 count += s->iov[i].iov_len / block_size; 162 } 163 164 return count >= num_blocks; 165 } 166 167 /* This function must be used before starting iteration. */ 168 static bool 169 _dif_sgl_is_valid(struct _dif_sgl *s, uint32_t bytes) 170 { 171 uint64_t total = 0; 172 int i; 173 174 for (i = 0; i < s->iovcnt; i++) { 175 total += s->iov[i].iov_len; 176 } 177 178 return total >= bytes; 179 } 180 181 static void 182 _dif_sgl_copy(struct _dif_sgl *to, struct _dif_sgl *from) 183 { 184 memcpy(to, from, sizeof(struct _dif_sgl)); 185 } 186 187 static bool 188 _dif_is_disabled(enum spdk_dif_type dif_type) 189 { 190 if (dif_type == SPDK_DIF_DISABLE) { 191 return true; 192 } else { 193 return false; 194 } 195 } 196 197 static inline size_t 198 _dif_size(enum spdk_dif_pi_format dif_pi_format) 199 { 200 uint8_t size; 201 202 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 203 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16); 204 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 205 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32); 206 } else { 207 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64); 208 } 209 210 return size; 211 } 212 213 static uint32_t 214 _get_guard_interval(uint32_t block_size, uint32_t md_size, bool dif_loc, bool md_interleave, 215 size_t dif_size) 216 { 217 if (!dif_loc) { 218 /* For metadata formats with more than 8/16 bytes (depending on 219 * the PI format), if the DIF is contained in the last 8/16 bytes 220 * of metadata, then the CRC covers all metadata up to but excluding 221 * these last 8/16 bytes. 222 */ 223 if (md_interleave) { 224 return block_size - dif_size; 225 } else { 226 return md_size - dif_size; 227 } 228 } else { 229 /* For metadata formats with more than 8/16 bytes (depending on 230 * the PI format), if the DIF is contained in the first 8/16 bytes 231 * of metadata, then the CRC does not cover any metadata. 232 */ 233 if (md_interleave) { 234 return block_size - md_size; 235 } else { 236 return 0; 237 } 238 } 239 } 240 241 static inline uint8_t 242 _dif_guard_size(enum spdk_dif_pi_format dif_pi_format) 243 { 244 uint8_t size; 245 246 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 247 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.guard); 248 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 249 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.guard); 250 } else { 251 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.guard); 252 } 253 254 return size; 255 } 256 257 static inline void 258 _dif_set_guard(struct spdk_dif *dif, uint64_t guard, enum spdk_dif_pi_format dif_pi_format) 259 { 260 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 261 to_be16(&(dif->g16.guard), (uint16_t)guard); 262 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 263 to_be32(&(dif->g32.guard), (uint32_t)guard); 264 } else { 265 to_be64(&(dif->g64.guard), guard); 266 } 267 } 268 269 static inline uint64_t 270 _dif_get_guard(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 271 { 272 uint64_t guard; 273 274 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 275 guard = (uint64_t)from_be16(&(dif->g16.guard)); 276 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 277 guard = (uint64_t)from_be32(&(dif->g32.guard)); 278 } else { 279 guard = from_be64(&(dif->g64.guard)); 280 } 281 282 return guard; 283 } 284 285 static inline uint64_t 286 _dif_generate_guard(uint64_t guard_seed, void *buf, size_t buf_len, 287 enum spdk_dif_pi_format dif_pi_format) 288 { 289 uint64_t guard; 290 291 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 292 guard = (uint64_t)spdk_crc16_t10dif((uint16_t)guard_seed, buf, buf_len); 293 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 294 guard = (uint64_t)spdk_crc32c_nvme(buf, buf_len, guard_seed); 295 } else { 296 guard = spdk_crc64_nvme(buf, buf_len, guard_seed); 297 } 298 299 return guard; 300 } 301 302 static uint64_t 303 dif_generate_guard_split(uint64_t guard_seed, struct _dif_sgl *sgl, uint32_t start, 304 uint32_t len, const struct spdk_dif_ctx *ctx) 305 { 306 uint64_t guard = guard_seed; 307 uint32_t offset, end, buf_len; 308 uint8_t *buf; 309 310 offset = start; 311 end = start + spdk_min(len, ctx->guard_interval - start); 312 313 while (offset < end) { 314 _dif_sgl_get_buf(sgl, &buf, &buf_len); 315 buf_len = spdk_min(buf_len, end - offset); 316 317 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 318 guard = _dif_generate_guard(guard, buf, buf_len, ctx->dif_pi_format); 319 } 320 321 _dif_sgl_advance(sgl, buf_len); 322 offset += buf_len; 323 } 324 325 return guard; 326 } 327 328 static inline uint64_t 329 _dif_generate_guard_copy(uint64_t guard_seed, void *dst, void *src, size_t buf_len, 330 enum spdk_dif_pi_format dif_pi_format) 331 { 332 uint64_t guard; 333 334 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 335 guard = (uint64_t)spdk_crc16_t10dif_copy((uint16_t)guard_seed, dst, src, buf_len); 336 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 337 memcpy(dst, src, buf_len); 338 guard = (uint64_t)spdk_crc32c_nvme(src, buf_len, guard_seed); 339 } else { 340 memcpy(dst, src, buf_len); 341 guard = spdk_crc64_nvme(src, buf_len, guard_seed); 342 } 343 344 return guard; 345 } 346 347 static uint64_t 348 _dif_generate_guard_copy_split_src(uint64_t guard, uint8_t *dst, 349 struct _dif_sgl *src_sgl, uint32_t data_len, 350 enum spdk_dif_pi_format dif_pi_format) 351 { 352 uint32_t offset = 0, src_len; 353 uint8_t *src; 354 355 while (offset < data_len) { 356 _dif_sgl_get_buf(src_sgl, &src, &src_len); 357 src_len = spdk_min(src_len, data_len - offset); 358 359 guard = _dif_generate_guard_copy(guard, dst + offset, src, src_len, dif_pi_format); 360 361 _dif_sgl_advance(src_sgl, src_len); 362 offset += src_len; 363 } 364 365 return guard; 366 } 367 368 static uint64_t 369 _dif_generate_guard_copy_split_dst(uint64_t guard, uint8_t *src, 370 struct _dif_sgl *dst_sgl, uint32_t data_len, 371 enum spdk_dif_pi_format dif_pi_format) 372 { 373 uint32_t offset = 0, dst_len; 374 uint8_t *dst; 375 376 while (offset < data_len) { 377 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 378 dst_len = spdk_min(dst_len, data_len - offset); 379 380 guard = _dif_generate_guard_copy(guard, dst, src + offset, dst_len, dif_pi_format); 381 382 _dif_sgl_advance(dst_sgl, dst_len); 383 offset += dst_len; 384 } 385 386 return guard; 387 } 388 389 static void 390 _data_copy_split_src(uint8_t *dst, struct _dif_sgl *src_sgl, uint32_t data_len) 391 { 392 uint32_t offset = 0, src_len; 393 uint8_t *src; 394 395 while (offset < data_len) { 396 _dif_sgl_get_buf(src_sgl, &src, &src_len); 397 src_len = spdk_min(src_len, data_len - offset); 398 399 memcpy(dst + offset, src, src_len); 400 401 _dif_sgl_advance(src_sgl, src_len); 402 offset += src_len; 403 } 404 } 405 406 static void 407 _data_copy_split_dst(uint8_t *src, struct _dif_sgl *dst_sgl, uint32_t data_len) 408 { 409 uint32_t offset = 0, dst_len; 410 uint8_t *dst; 411 412 while (offset < data_len) { 413 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 414 dst_len = spdk_min(dst_len, data_len - offset); 415 416 memcpy(dst, src + offset, dst_len); 417 418 _dif_sgl_advance(dst_sgl, dst_len); 419 offset += dst_len; 420 } 421 } 422 423 static inline uint8_t 424 _dif_apptag_offset(enum spdk_dif_pi_format dif_pi_format) 425 { 426 return _dif_guard_size(dif_pi_format); 427 } 428 429 static inline uint8_t 430 _dif_apptag_size(void) 431 { 432 return SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.app_tag); 433 } 434 435 static inline void 436 _dif_set_apptag(struct spdk_dif *dif, uint16_t app_tag, enum spdk_dif_pi_format dif_pi_format) 437 { 438 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 439 to_be16(&(dif->g16.app_tag), app_tag); 440 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 441 to_be16(&(dif->g32.app_tag), app_tag); 442 } else { 443 to_be16(&(dif->g64.app_tag), app_tag); 444 } 445 } 446 447 static inline uint16_t 448 _dif_get_apptag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 449 { 450 uint16_t app_tag; 451 452 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 453 app_tag = from_be16(&(dif->g16.app_tag)); 454 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 455 app_tag = from_be16(&(dif->g32.app_tag)); 456 } else { 457 app_tag = from_be16(&(dif->g64.app_tag)); 458 } 459 460 return app_tag; 461 } 462 463 static inline bool 464 _dif_apptag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 465 { 466 return _dif_get_apptag(dif, dif_pi_format) == SPDK_DIF_APPTAG_IGNORE; 467 } 468 469 static inline uint8_t 470 _dif_reftag_offset(enum spdk_dif_pi_format dif_pi_format) 471 { 472 uint8_t offset; 473 474 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 475 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 476 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 477 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size() 478 + SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p1); 479 } else { 480 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 481 } 482 483 return offset; 484 } 485 486 static inline uint8_t 487 _dif_reftag_size(enum spdk_dif_pi_format dif_pi_format) 488 { 489 uint8_t size; 490 491 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 492 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.stor_ref_space); 493 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 494 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p2); 495 } else { 496 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p1) + 497 SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p2); 498 } 499 500 return size; 501 } 502 503 static inline void 504 _dif_set_reftag(struct spdk_dif *dif, uint64_t ref_tag, enum spdk_dif_pi_format dif_pi_format) 505 { 506 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 507 to_be32(&(dif->g16.stor_ref_space), (uint32_t)ref_tag); 508 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 509 to_be64(&(dif->g32.stor_ref_space_p2), ref_tag); 510 } else { 511 to_be16(&(dif->g64.stor_ref_space_p1), (uint16_t)(ref_tag >> 32)); 512 to_be32(&(dif->g64.stor_ref_space_p2), (uint32_t)ref_tag); 513 } 514 } 515 516 static inline uint64_t 517 _dif_get_reftag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 518 { 519 uint64_t ref_tag; 520 521 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 522 ref_tag = (uint64_t)from_be32(&(dif->g16.stor_ref_space)); 523 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 524 ref_tag = from_be64(&(dif->g32.stor_ref_space_p2)); 525 } else { 526 ref_tag = (uint64_t)from_be16(&(dif->g64.stor_ref_space_p1)); 527 ref_tag <<= 32; 528 ref_tag |= (uint64_t)from_be32(&(dif->g64.stor_ref_space_p2)); 529 } 530 531 return ref_tag; 532 } 533 534 static inline bool 535 _dif_reftag_match(struct spdk_dif *dif, uint64_t ref_tag, 536 enum spdk_dif_pi_format dif_pi_format) 537 { 538 uint64_t _ref_tag; 539 bool match; 540 541 _ref_tag = _dif_get_reftag(dif, dif_pi_format); 542 543 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 544 match = (_ref_tag == (ref_tag & REFTAG_MASK_16)); 545 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 546 match = (_ref_tag == ref_tag); 547 } else { 548 match = (_ref_tag == (ref_tag & REFTAG_MASK_64)); 549 } 550 551 return match; 552 } 553 554 static inline bool 555 _dif_reftag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 556 { 557 return _dif_reftag_match(dif, REFTAG_MASK_32, dif_pi_format); 558 } 559 560 static bool 561 _dif_ignore(struct spdk_dif *dif, const struct spdk_dif_ctx *ctx) 562 { 563 switch (ctx->dif_type) { 564 case SPDK_DIF_TYPE1: 565 case SPDK_DIF_TYPE2: 566 /* If Type 1 or 2 is used, then all DIF checks are disabled when 567 * the Application Tag is 0xFFFF. 568 */ 569 if (_dif_apptag_ignore(dif, ctx->dif_pi_format)) { 570 return true; 571 } 572 break; 573 case SPDK_DIF_TYPE3: 574 /* If Type 3 is used, then all DIF checks are disabled when the 575 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF 576 * or 0xFFFFFFFFFFFFFFFF depending on the PI format. 577 */ 578 579 if (_dif_apptag_ignore(dif, ctx->dif_pi_format) && 580 _dif_reftag_ignore(dif, ctx->dif_pi_format)) { 581 return true; 582 } 583 break; 584 default: 585 break; 586 } 587 588 return false; 589 } 590 591 static bool 592 _dif_pi_format_is_valid(enum spdk_dif_pi_format dif_pi_format) 593 { 594 switch (dif_pi_format) { 595 case SPDK_DIF_PI_FORMAT_16: 596 case SPDK_DIF_PI_FORMAT_32: 597 case SPDK_DIF_PI_FORMAT_64: 598 return true; 599 default: 600 return false; 601 } 602 } 603 604 static bool 605 _dif_type_is_valid(enum spdk_dif_type dif_type) 606 { 607 switch (dif_type) { 608 case SPDK_DIF_DISABLE: 609 case SPDK_DIF_TYPE1: 610 case SPDK_DIF_TYPE2: 611 case SPDK_DIF_TYPE3: 612 return true; 613 default: 614 return false; 615 } 616 } 617 618 int 619 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size, 620 bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags, 621 uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag, 622 uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts) 623 { 624 uint32_t data_block_size; 625 enum spdk_dif_pi_format dif_pi_format = SPDK_DIF_PI_FORMAT_16; 626 627 if (opts != NULL) { 628 if (!_dif_pi_format_is_valid(opts->dif_pi_format)) { 629 SPDK_ERRLOG("No valid DIF PI format provided.\n"); 630 return -EINVAL; 631 } 632 633 dif_pi_format = opts->dif_pi_format; 634 } 635 636 if (!_dif_type_is_valid(dif_type)) { 637 SPDK_ERRLOG("No valid DIF type was provided.\n"); 638 return -EINVAL; 639 } 640 641 if (md_size < _dif_size(dif_pi_format)) { 642 SPDK_ERRLOG("Metadata size is smaller than DIF size.\n"); 643 return -EINVAL; 644 } 645 646 if (md_interleave) { 647 if (block_size < md_size) { 648 SPDK_ERRLOG("Block size is smaller than DIF size.\n"); 649 return -EINVAL; 650 } 651 data_block_size = block_size - md_size; 652 } else { 653 data_block_size = block_size; 654 } 655 656 if (data_block_size == 0) { 657 SPDK_ERRLOG("Zero data block size is not allowed\n"); 658 return -EINVAL; 659 } 660 661 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 662 if ((data_block_size % 512) != 0) { 663 SPDK_ERRLOG("Data block size should be a multiple of 512B\n"); 664 return -EINVAL; 665 } 666 } else { 667 if ((data_block_size % 4096) != 0) { 668 SPDK_ERRLOG("Data block size should be a multiple of 4kB\n"); 669 return -EINVAL; 670 } 671 } 672 673 ctx->block_size = block_size; 674 ctx->md_size = md_size; 675 ctx->md_interleave = md_interleave; 676 ctx->dif_pi_format = dif_pi_format; 677 ctx->guard_interval = _get_guard_interval(block_size, md_size, dif_loc, md_interleave, 678 _dif_size(ctx->dif_pi_format)); 679 ctx->dif_type = dif_type; 680 ctx->dif_flags = dif_flags; 681 ctx->init_ref_tag = init_ref_tag; 682 ctx->apptag_mask = apptag_mask; 683 ctx->app_tag = app_tag; 684 ctx->data_offset = data_offset; 685 ctx->ref_tag_offset = data_offset / data_block_size; 686 ctx->last_guard = guard_seed; 687 ctx->guard_seed = guard_seed; 688 ctx->remapped_init_ref_tag = 0; 689 690 return 0; 691 } 692 693 void 694 spdk_dif_ctx_set_data_offset(struct spdk_dif_ctx *ctx, uint32_t data_offset) 695 { 696 uint32_t data_block_size; 697 698 if (ctx->md_interleave) { 699 data_block_size = ctx->block_size - ctx->md_size; 700 } else { 701 data_block_size = ctx->block_size; 702 } 703 704 ctx->data_offset = data_offset; 705 ctx->ref_tag_offset = data_offset / data_block_size; 706 } 707 708 void 709 spdk_dif_ctx_set_remapped_init_ref_tag(struct spdk_dif_ctx *ctx, 710 uint32_t remapped_init_ref_tag) 711 { 712 ctx->remapped_init_ref_tag = remapped_init_ref_tag; 713 } 714 715 static void 716 _dif_generate(void *_dif, uint64_t guard, uint32_t offset_blocks, 717 const struct spdk_dif_ctx *ctx) 718 { 719 struct spdk_dif *dif = _dif; 720 uint64_t ref_tag; 721 722 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 723 _dif_set_guard(dif, guard, ctx->dif_pi_format); 724 } 725 726 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 727 _dif_set_apptag(dif, ctx->app_tag, ctx->dif_pi_format); 728 } 729 730 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 731 /* For type 1 and 2, the reference tag is incremented for each 732 * subsequent logical block. For type 3, the reference tag 733 * remains the same as the initial reference tag. 734 */ 735 if (ctx->dif_type != SPDK_DIF_TYPE3) { 736 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 737 } else { 738 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 739 } 740 741 /* Overwrite reference tag if initialization reference tag is SPDK_DIF_REFTAG_IGNORE */ 742 if (ctx->init_ref_tag == SPDK_DIF_REFTAG_IGNORE) { 743 if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 744 ref_tag = REFTAG_MASK_16; 745 } else if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 746 ref_tag = REFTAG_MASK_32; 747 } else { 748 ref_tag = REFTAG_MASK_64; 749 } 750 } 751 752 _dif_set_reftag(dif, ref_tag, ctx->dif_pi_format); 753 } 754 } 755 756 static void 757 dif_generate(struct _dif_sgl *sgl, uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 758 { 759 uint32_t offset_blocks; 760 uint8_t *buf; 761 uint64_t guard = 0; 762 763 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 764 _dif_sgl_get_buf(sgl, &buf, NULL); 765 766 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 767 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 768 } 769 770 _dif_generate(buf + ctx->guard_interval, guard, offset_blocks, ctx); 771 772 _dif_sgl_advance(sgl, ctx->block_size); 773 } 774 } 775 776 static void 777 dif_store_split(struct _dif_sgl *sgl, struct spdk_dif *dif, 778 const struct spdk_dif_ctx *ctx) 779 { 780 uint32_t offset = 0, rest_md_len, buf_len; 781 uint8_t *buf; 782 783 rest_md_len = ctx->block_size - ctx->guard_interval; 784 785 while (offset < rest_md_len) { 786 _dif_sgl_get_buf(sgl, &buf, &buf_len); 787 788 if (offset < _dif_size(ctx->dif_pi_format)) { 789 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 790 memcpy(buf, (uint8_t *)dif + offset, buf_len); 791 } else { 792 buf_len = spdk_min(buf_len, rest_md_len - offset); 793 } 794 795 _dif_sgl_advance(sgl, buf_len); 796 offset += buf_len; 797 } 798 } 799 800 static uint64_t 801 _dif_generate_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 802 uint64_t guard, uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 803 { 804 struct spdk_dif dif = {}; 805 806 assert(offset_in_block < ctx->guard_interval); 807 assert(offset_in_block + data_len < ctx->guard_interval || 808 offset_in_block + data_len == ctx->block_size); 809 810 /* Compute CRC over split logical block data. */ 811 guard = dif_generate_guard_split(guard, sgl, offset_in_block, data_len, ctx); 812 813 if (offset_in_block + data_len < ctx->guard_interval) { 814 return guard; 815 } 816 817 /* If a whole logical block data is parsed, generate DIF 818 * and save it to the temporary DIF area. 819 */ 820 _dif_generate(&dif, guard, offset_blocks, ctx); 821 822 /* Copy generated DIF field to the split DIF field, and then 823 * skip metadata field after DIF field (if any). 824 */ 825 dif_store_split(sgl, &dif, ctx); 826 827 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 828 guard = ctx->guard_seed; 829 } 830 831 return guard; 832 } 833 834 static void 835 dif_generate_split(struct _dif_sgl *sgl, uint32_t num_blocks, 836 const struct spdk_dif_ctx *ctx) 837 { 838 uint32_t offset_blocks; 839 uint64_t guard = 0; 840 841 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 842 guard = ctx->guard_seed; 843 } 844 845 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 846 _dif_generate_split(sgl, 0, ctx->block_size, guard, offset_blocks, ctx); 847 } 848 } 849 850 int 851 spdk_dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 852 const struct spdk_dif_ctx *ctx) 853 { 854 struct _dif_sgl sgl; 855 856 _dif_sgl_init(&sgl, iovs, iovcnt); 857 858 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 859 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 860 return -EINVAL; 861 } 862 863 if (_dif_is_disabled(ctx->dif_type)) { 864 return 0; 865 } 866 867 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 868 dif_generate(&sgl, num_blocks, ctx); 869 } else { 870 dif_generate_split(&sgl, num_blocks, ctx); 871 } 872 873 return 0; 874 } 875 876 static void 877 _dif_error_set(struct spdk_dif_error *err_blk, uint8_t err_type, 878 uint64_t expected, uint64_t actual, uint32_t err_offset) 879 { 880 if (err_blk) { 881 err_blk->err_type = err_type; 882 err_blk->expected = expected; 883 err_blk->actual = actual; 884 err_blk->err_offset = err_offset; 885 } 886 } 887 888 static bool 889 _dif_reftag_check(struct spdk_dif *dif, const struct spdk_dif_ctx *ctx, 890 uint64_t expected_reftag, uint32_t offset_blocks, struct spdk_dif_error *err_blk) 891 { 892 uint64_t reftag; 893 894 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 895 switch (ctx->dif_type) { 896 case SPDK_DIF_TYPE1: 897 case SPDK_DIF_TYPE2: 898 /* Compare the DIF Reference Tag field to the passed Reference Tag. 899 * The passed Reference Tag will be the least significant 4 bytes 900 * or 8 bytes (depending on the PI format) 901 * of the LBA when Type 1 is used, and application specific value 902 * if Type 2 is used. 903 */ 904 if (!_dif_reftag_match(dif, expected_reftag, ctx->dif_pi_format)) { 905 reftag = _dif_get_reftag(dif, ctx->dif_pi_format); 906 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected_reftag, 907 reftag, offset_blocks); 908 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu64 "," \ 909 " Expected=%lx, Actual=%lx\n", 910 expected_reftag, expected_reftag, reftag); 911 return false; 912 } 913 break; 914 case SPDK_DIF_TYPE3: 915 /* For Type 3, computed Reference Tag remains unchanged. 916 * Hence ignore the Reference Tag field. 917 */ 918 break; 919 default: 920 break; 921 } 922 } 923 924 return true; 925 } 926 927 static int 928 _dif_verify(void *_dif, uint64_t guard, uint32_t offset_blocks, 929 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 930 { 931 struct spdk_dif *dif = _dif; 932 uint64_t _guard; 933 uint16_t _app_tag; 934 uint64_t ref_tag; 935 936 if (_dif_ignore(dif, ctx)) { 937 return 0; 938 } 939 940 /* For type 1 and 2, the reference tag is incremented for each 941 * subsequent logical block. For type 3, the reference tag 942 * remains the same as the initial reference tag. 943 */ 944 if (ctx->dif_type != SPDK_DIF_TYPE3) { 945 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 946 } else { 947 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 948 } 949 950 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 951 /* Compare the DIF Guard field to the CRC computed over the logical 952 * block data. 953 */ 954 _guard = _dif_get_guard(dif, ctx->dif_pi_format); 955 if (_guard != guard) { 956 _dif_error_set(err_blk, SPDK_DIF_GUARD_ERROR, _guard, guard, 957 offset_blocks); 958 SPDK_ERRLOG("Failed to compare Guard: LBA=%" PRIu64 "," \ 959 " Expected=%lx, Actual=%lx\n", 960 ref_tag, _guard, guard); 961 return -1; 962 } 963 } 964 965 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 966 /* Compare unmasked bits in the DIF Application Tag field to the 967 * passed Application Tag. 968 */ 969 _app_tag = _dif_get_apptag(dif, ctx->dif_pi_format); 970 if ((_app_tag & ctx->apptag_mask) != (ctx->app_tag & ctx->apptag_mask)) { 971 _dif_error_set(err_blk, SPDK_DIF_APPTAG_ERROR, ctx->app_tag, 972 (_app_tag & ctx->apptag_mask), offset_blocks); 973 SPDK_ERRLOG("Failed to compare App Tag: LBA=%" PRIu64 "," \ 974 " Expected=%x, Actual=%x\n", 975 ref_tag, ctx->app_tag, (_app_tag & ctx->apptag_mask)); 976 return -1; 977 } 978 } 979 980 if (!_dif_reftag_check(dif, ctx, ref_tag, offset_blocks, err_blk)) { 981 return -1; 982 } 983 984 return 0; 985 } 986 987 static int 988 dif_verify(struct _dif_sgl *sgl, uint32_t num_blocks, 989 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 990 { 991 uint32_t offset_blocks; 992 int rc; 993 uint8_t *buf; 994 uint64_t guard = 0; 995 996 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 997 _dif_sgl_get_buf(sgl, &buf, NULL); 998 999 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1000 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 1001 } 1002 1003 rc = _dif_verify(buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1004 if (rc != 0) { 1005 return rc; 1006 } 1007 1008 _dif_sgl_advance(sgl, ctx->block_size); 1009 } 1010 1011 return 0; 1012 } 1013 1014 static void 1015 dif_load_split(struct _dif_sgl *sgl, struct spdk_dif *dif, 1016 const struct spdk_dif_ctx *ctx) 1017 { 1018 uint32_t offset = 0, rest_md_len, buf_len; 1019 uint8_t *buf; 1020 1021 rest_md_len = ctx->block_size - ctx->guard_interval; 1022 1023 while (offset < rest_md_len) { 1024 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1025 1026 if (offset < _dif_size(ctx->dif_pi_format)) { 1027 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 1028 memcpy((uint8_t *)dif + offset, buf, buf_len); 1029 } else { 1030 buf_len = spdk_min(buf_len, rest_md_len - offset); 1031 } 1032 1033 _dif_sgl_advance(sgl, buf_len); 1034 offset += buf_len; 1035 } 1036 } 1037 1038 static int 1039 _dif_verify_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 1040 uint64_t *_guard, uint32_t offset_blocks, 1041 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1042 { 1043 uint64_t guard = *_guard; 1044 struct spdk_dif dif = {}; 1045 int rc; 1046 1047 assert(_guard != NULL); 1048 assert(offset_in_block < ctx->guard_interval); 1049 assert(offset_in_block + data_len < ctx->guard_interval || 1050 offset_in_block + data_len == ctx->block_size); 1051 1052 guard = dif_generate_guard_split(guard, sgl, offset_in_block, data_len, ctx); 1053 1054 if (offset_in_block + data_len < ctx->guard_interval) { 1055 *_guard = guard; 1056 return 0; 1057 } 1058 1059 dif_load_split(sgl, &dif, ctx); 1060 1061 rc = _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 1062 if (rc != 0) { 1063 return rc; 1064 } 1065 1066 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1067 guard = ctx->guard_seed; 1068 } 1069 1070 *_guard = guard; 1071 return 0; 1072 } 1073 1074 static int 1075 dif_verify_split(struct _dif_sgl *sgl, uint32_t num_blocks, 1076 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1077 { 1078 uint32_t offset_blocks; 1079 uint64_t guard = 0; 1080 int rc; 1081 1082 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1083 guard = ctx->guard_seed; 1084 } 1085 1086 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1087 rc = _dif_verify_split(sgl, 0, ctx->block_size, &guard, offset_blocks, 1088 ctx, err_blk); 1089 if (rc != 0) { 1090 return rc; 1091 } 1092 } 1093 1094 return 0; 1095 } 1096 1097 int 1098 spdk_dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1099 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1100 { 1101 struct _dif_sgl sgl; 1102 1103 _dif_sgl_init(&sgl, iovs, iovcnt); 1104 1105 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1106 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1107 return -EINVAL; 1108 } 1109 1110 if (_dif_is_disabled(ctx->dif_type)) { 1111 return 0; 1112 } 1113 1114 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 1115 return dif_verify(&sgl, num_blocks, ctx, err_blk); 1116 } else { 1117 return dif_verify_split(&sgl, num_blocks, ctx, err_blk); 1118 } 1119 } 1120 1121 static uint32_t 1122 dif_update_crc32c(struct _dif_sgl *sgl, uint32_t num_blocks, 1123 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1124 { 1125 uint32_t offset_blocks; 1126 uint8_t *buf; 1127 1128 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1129 _dif_sgl_get_buf(sgl, &buf, NULL); 1130 1131 crc32c = spdk_crc32c_update(buf, ctx->block_size - ctx->md_size, crc32c); 1132 1133 _dif_sgl_advance(sgl, ctx->block_size); 1134 } 1135 1136 return crc32c; 1137 } 1138 1139 static uint32_t 1140 _dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 1141 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1142 { 1143 uint32_t data_block_size, buf_len; 1144 uint8_t *buf; 1145 1146 data_block_size = ctx->block_size - ctx->md_size; 1147 1148 assert(offset_in_block + data_len <= ctx->block_size); 1149 1150 while (data_len != 0) { 1151 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1152 buf_len = spdk_min(buf_len, data_len); 1153 1154 if (offset_in_block < data_block_size) { 1155 buf_len = spdk_min(buf_len, data_block_size - offset_in_block); 1156 crc32c = spdk_crc32c_update(buf, buf_len, crc32c); 1157 } 1158 1159 _dif_sgl_advance(sgl, buf_len); 1160 offset_in_block += buf_len; 1161 data_len -= buf_len; 1162 } 1163 1164 return crc32c; 1165 } 1166 1167 static uint32_t 1168 dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t num_blocks, 1169 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1170 { 1171 uint32_t offset_blocks; 1172 1173 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1174 crc32c = _dif_update_crc32c_split(sgl, 0, ctx->block_size, crc32c, ctx); 1175 } 1176 1177 return crc32c; 1178 } 1179 1180 int 1181 spdk_dif_update_crc32c(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1182 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 1183 { 1184 struct _dif_sgl sgl; 1185 1186 if (_crc32c == NULL) { 1187 return -EINVAL; 1188 } 1189 1190 _dif_sgl_init(&sgl, iovs, iovcnt); 1191 1192 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1193 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1194 return -EINVAL; 1195 } 1196 1197 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 1198 *_crc32c = dif_update_crc32c(&sgl, num_blocks, *_crc32c, ctx); 1199 } else { 1200 *_crc32c = dif_update_crc32c_split(&sgl, num_blocks, *_crc32c, ctx); 1201 } 1202 1203 return 0; 1204 } 1205 1206 static void 1207 _dif_generate_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1208 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1209 { 1210 uint32_t data_block_size; 1211 uint8_t *src, *dst; 1212 uint64_t guard = 0; 1213 1214 data_block_size = ctx->block_size - ctx->md_size; 1215 1216 _dif_sgl_get_buf(src_sgl, &src, NULL); 1217 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1218 1219 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1220 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1221 ctx->dif_pi_format); 1222 guard = _dif_generate_guard(guard, dst + data_block_size, 1223 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1224 } else { 1225 memcpy(dst, src, data_block_size); 1226 } 1227 1228 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 1229 1230 _dif_sgl_advance(src_sgl, data_block_size); 1231 _dif_sgl_advance(dst_sgl, ctx->block_size); 1232 } 1233 1234 static void 1235 dif_generate_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1236 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1237 { 1238 uint32_t offset_blocks; 1239 1240 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1241 _dif_generate_copy(src_sgl, dst_sgl, offset_blocks, ctx); 1242 } 1243 } 1244 1245 static void 1246 _dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1247 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1248 { 1249 uint32_t data_block_size; 1250 uint8_t *dst; 1251 uint64_t guard = 0; 1252 1253 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1254 1255 data_block_size = ctx->block_size - ctx->md_size; 1256 1257 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1258 guard = _dif_generate_guard_copy_split_src(ctx->guard_seed, dst, src_sgl, 1259 data_block_size, ctx->dif_pi_format); 1260 guard = _dif_generate_guard(guard, dst + data_block_size, 1261 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1262 } else { 1263 _data_copy_split_src(dst, src_sgl, data_block_size); 1264 } 1265 1266 _dif_sgl_advance(dst_sgl, ctx->block_size); 1267 1268 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 1269 } 1270 1271 static void 1272 dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1273 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1274 { 1275 uint32_t offset_blocks; 1276 1277 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1278 _dif_generate_copy_split(src_sgl, dst_sgl, offset_blocks, ctx); 1279 } 1280 } 1281 1282 int 1283 spdk_dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1284 int bounce_iovcnt, uint32_t num_blocks, 1285 const struct spdk_dif_ctx *ctx) 1286 { 1287 struct _dif_sgl src_sgl, dst_sgl; 1288 uint32_t data_block_size; 1289 1290 _dif_sgl_init(&src_sgl, iovs, iovcnt); 1291 _dif_sgl_init(&dst_sgl, bounce_iovs, bounce_iovcnt); 1292 1293 data_block_size = ctx->block_size - ctx->md_size; 1294 1295 if (!_dif_sgl_is_valid(&src_sgl, data_block_size * num_blocks)) { 1296 SPDK_ERRLOG("Size of iovec arrays are not valid.\n"); 1297 return -EINVAL; 1298 } 1299 1300 if (!_dif_sgl_is_valid_block_aligned(&dst_sgl, num_blocks, ctx->block_size)) { 1301 SPDK_ERRLOG("Size of bounce_iovs arrays are not valid or misaligned with block_size.\n"); 1302 return -EINVAL; 1303 } 1304 1305 if (_dif_is_disabled(ctx->dif_type)) { 1306 return 0; 1307 } 1308 1309 if (_dif_sgl_is_bytes_multiple(&src_sgl, data_block_size)) { 1310 dif_generate_copy(&src_sgl, &dst_sgl, num_blocks, ctx); 1311 } else { 1312 dif_generate_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx); 1313 } 1314 1315 return 0; 1316 } 1317 1318 static int 1319 _dif_verify_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1320 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1321 struct spdk_dif_error *err_blk) 1322 { 1323 uint32_t data_block_size; 1324 uint8_t *src, *dst; 1325 int rc; 1326 uint64_t guard = 0; 1327 1328 data_block_size = ctx->block_size - ctx->md_size; 1329 1330 _dif_sgl_get_buf(src_sgl, &src, NULL); 1331 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1332 1333 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1334 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1335 ctx->dif_pi_format); 1336 guard = _dif_generate_guard(guard, src + data_block_size, 1337 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1338 } else { 1339 memcpy(dst, src, data_block_size); 1340 } 1341 1342 rc = _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1343 if (rc != 0) { 1344 return rc; 1345 } 1346 1347 _dif_sgl_advance(src_sgl, ctx->block_size); 1348 _dif_sgl_advance(dst_sgl, data_block_size); 1349 1350 return 0; 1351 } 1352 1353 static int 1354 dif_verify_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1355 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1356 struct spdk_dif_error *err_blk) 1357 { 1358 uint32_t offset_blocks; 1359 int rc; 1360 1361 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1362 rc = _dif_verify_copy(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1363 if (rc != 0) { 1364 return rc; 1365 } 1366 } 1367 1368 return 0; 1369 } 1370 1371 static int 1372 _dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1373 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1374 struct spdk_dif_error *err_blk) 1375 { 1376 uint32_t data_block_size; 1377 uint8_t *src; 1378 uint64_t guard = 0; 1379 1380 _dif_sgl_get_buf(src_sgl, &src, NULL); 1381 1382 data_block_size = ctx->block_size - ctx->md_size; 1383 1384 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1385 guard = _dif_generate_guard_copy_split_dst(ctx->guard_seed, src, dst_sgl, 1386 data_block_size, ctx->dif_pi_format); 1387 guard = _dif_generate_guard(guard, src + data_block_size, 1388 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1389 } else { 1390 _data_copy_split_dst(src, dst_sgl, data_block_size); 1391 } 1392 1393 _dif_sgl_advance(src_sgl, ctx->block_size); 1394 1395 return _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1396 } 1397 1398 static int 1399 dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1400 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1401 struct spdk_dif_error *err_blk) 1402 { 1403 uint32_t offset_blocks; 1404 int rc; 1405 1406 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1407 rc = _dif_verify_copy_split(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1408 if (rc != 0) { 1409 return rc; 1410 } 1411 } 1412 1413 return 0; 1414 } 1415 1416 int 1417 spdk_dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1418 int bounce_iovcnt, uint32_t num_blocks, 1419 const struct spdk_dif_ctx *ctx, 1420 struct spdk_dif_error *err_blk) 1421 { 1422 struct _dif_sgl src_sgl, dst_sgl; 1423 uint32_t data_block_size; 1424 1425 _dif_sgl_init(&src_sgl, bounce_iovs, bounce_iovcnt); 1426 _dif_sgl_init(&dst_sgl, iovs, iovcnt); 1427 1428 data_block_size = ctx->block_size - ctx->md_size; 1429 1430 if (!_dif_sgl_is_valid(&dst_sgl, data_block_size * num_blocks)) { 1431 SPDK_ERRLOG("Size of iovec arrays are not valid\n"); 1432 return -EINVAL; 1433 } 1434 1435 if (!_dif_sgl_is_valid_block_aligned(&src_sgl, num_blocks, ctx->block_size)) { 1436 SPDK_ERRLOG("Size of bounce_iovs arrays are not valid or misaligned with block_size.\n"); 1437 return -EINVAL; 1438 } 1439 1440 if (_dif_is_disabled(ctx->dif_type)) { 1441 return 0; 1442 } 1443 1444 if (_dif_sgl_is_bytes_multiple(&dst_sgl, data_block_size)) { 1445 return dif_verify_copy(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1446 } else { 1447 return dif_verify_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1448 } 1449 } 1450 1451 static void 1452 _bit_flip(uint8_t *buf, uint32_t flip_bit) 1453 { 1454 uint8_t byte; 1455 1456 byte = *buf; 1457 byte ^= 1 << flip_bit; 1458 *buf = byte; 1459 } 1460 1461 static int 1462 _dif_inject_error(struct _dif_sgl *sgl, 1463 uint32_t block_size, uint32_t num_blocks, 1464 uint32_t inject_offset_blocks, 1465 uint32_t inject_offset_bytes, 1466 uint32_t inject_offset_bits) 1467 { 1468 uint32_t offset_in_block, buf_len; 1469 uint8_t *buf; 1470 1471 _dif_sgl_advance(sgl, block_size * inject_offset_blocks); 1472 1473 offset_in_block = 0; 1474 1475 while (offset_in_block < block_size) { 1476 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1477 buf_len = spdk_min(buf_len, block_size - offset_in_block); 1478 1479 if (inject_offset_bytes >= offset_in_block && 1480 inject_offset_bytes < offset_in_block + buf_len) { 1481 buf += inject_offset_bytes - offset_in_block; 1482 _bit_flip(buf, inject_offset_bits); 1483 return 0; 1484 } 1485 1486 _dif_sgl_advance(sgl, buf_len); 1487 offset_in_block += buf_len; 1488 } 1489 1490 return -1; 1491 } 1492 1493 static int 1494 dif_inject_error(struct _dif_sgl *sgl, uint32_t block_size, uint32_t num_blocks, 1495 uint32_t start_inject_bytes, uint32_t inject_range_bytes, 1496 uint32_t *inject_offset) 1497 { 1498 uint32_t inject_offset_blocks, inject_offset_bytes, inject_offset_bits; 1499 uint32_t offset_blocks; 1500 int rc; 1501 1502 srand(time(0)); 1503 1504 inject_offset_blocks = rand() % num_blocks; 1505 inject_offset_bytes = start_inject_bytes + (rand() % inject_range_bytes); 1506 inject_offset_bits = rand() % 8; 1507 1508 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1509 if (offset_blocks == inject_offset_blocks) { 1510 rc = _dif_inject_error(sgl, block_size, num_blocks, 1511 inject_offset_blocks, 1512 inject_offset_bytes, 1513 inject_offset_bits); 1514 if (rc == 0) { 1515 *inject_offset = inject_offset_blocks; 1516 } 1517 return rc; 1518 } 1519 } 1520 1521 return -1; 1522 } 1523 1524 int 1525 spdk_dif_inject_error(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1526 const struct spdk_dif_ctx *ctx, uint32_t inject_flags, 1527 uint32_t *inject_offset) 1528 { 1529 struct _dif_sgl sgl; 1530 int rc; 1531 1532 _dif_sgl_init(&sgl, iovs, iovcnt); 1533 1534 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1535 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1536 return -EINVAL; 1537 } 1538 1539 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1540 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1541 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1542 _dif_reftag_size(ctx->dif_pi_format), 1543 inject_offset); 1544 if (rc != 0) { 1545 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1546 return rc; 1547 } 1548 } 1549 1550 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1551 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1552 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1553 _dif_apptag_size(), 1554 inject_offset); 1555 if (rc != 0) { 1556 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1557 return rc; 1558 } 1559 } 1560 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1561 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1562 ctx->guard_interval, 1563 _dif_guard_size(ctx->dif_pi_format), 1564 inject_offset); 1565 if (rc != 0) { 1566 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1567 return rc; 1568 } 1569 } 1570 1571 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1572 /* If the DIF information is contained within the last 8/16 bytes of 1573 * metadata (depending on the PI format), then the CRC covers all metadata 1574 * bytes up to but excluding the last 8/16 bytes. But error injection does not 1575 * cover these metadata because classification is not determined yet. 1576 * 1577 * Note: Error injection to data block is expected to be detected as 1578 * guard error. 1579 */ 1580 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1581 0, 1582 ctx->block_size - ctx->md_size, 1583 inject_offset); 1584 if (rc != 0) { 1585 SPDK_ERRLOG("Failed to inject error to data block.\n"); 1586 return rc; 1587 } 1588 } 1589 1590 return 0; 1591 } 1592 1593 static void 1594 dix_generate(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1595 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1596 { 1597 uint32_t offset_blocks = 0; 1598 uint8_t *data_buf, *md_buf; 1599 uint64_t guard; 1600 1601 while (offset_blocks < num_blocks) { 1602 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1603 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1604 1605 guard = 0; 1606 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1607 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1608 ctx->dif_pi_format); 1609 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1610 ctx->dif_pi_format); 1611 } 1612 1613 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1614 1615 _dif_sgl_advance(data_sgl, ctx->block_size); 1616 _dif_sgl_advance(md_sgl, ctx->md_size); 1617 offset_blocks++; 1618 } 1619 } 1620 1621 static void 1622 _dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1623 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1624 { 1625 uint32_t offset_in_block, data_buf_len; 1626 uint8_t *data_buf, *md_buf; 1627 uint64_t guard = 0; 1628 1629 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1630 1631 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1632 guard = ctx->guard_seed; 1633 } 1634 offset_in_block = 0; 1635 1636 while (offset_in_block < ctx->block_size) { 1637 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1638 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1639 1640 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1641 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1642 ctx->dif_pi_format); 1643 } 1644 1645 _dif_sgl_advance(data_sgl, data_buf_len); 1646 offset_in_block += data_buf_len; 1647 } 1648 1649 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1650 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1651 ctx->dif_pi_format); 1652 } 1653 1654 _dif_sgl_advance(md_sgl, ctx->md_size); 1655 1656 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1657 } 1658 1659 static void 1660 dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1661 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1662 { 1663 uint32_t offset_blocks; 1664 1665 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1666 _dix_generate_split(data_sgl, md_sgl, offset_blocks, ctx); 1667 } 1668 } 1669 1670 int 1671 spdk_dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1672 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1673 { 1674 struct _dif_sgl data_sgl, md_sgl; 1675 1676 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1677 _dif_sgl_init(&md_sgl, md_iov, 1); 1678 1679 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1680 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1681 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1682 return -EINVAL; 1683 } 1684 1685 if (_dif_is_disabled(ctx->dif_type)) { 1686 return 0; 1687 } 1688 1689 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1690 dix_generate(&data_sgl, &md_sgl, num_blocks, ctx); 1691 } else { 1692 dix_generate_split(&data_sgl, &md_sgl, num_blocks, ctx); 1693 } 1694 1695 return 0; 1696 } 1697 1698 static int 1699 dix_verify(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1700 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1701 struct spdk_dif_error *err_blk) 1702 { 1703 uint32_t offset_blocks = 0; 1704 uint8_t *data_buf, *md_buf; 1705 uint64_t guard; 1706 int rc; 1707 1708 while (offset_blocks < num_blocks) { 1709 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1710 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1711 1712 guard = 0; 1713 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1714 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1715 ctx->dif_pi_format); 1716 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1717 ctx->dif_pi_format); 1718 } 1719 1720 rc = _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1721 if (rc != 0) { 1722 return rc; 1723 } 1724 1725 _dif_sgl_advance(data_sgl, ctx->block_size); 1726 _dif_sgl_advance(md_sgl, ctx->md_size); 1727 offset_blocks++; 1728 } 1729 1730 return 0; 1731 } 1732 1733 static int 1734 _dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1735 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1736 struct spdk_dif_error *err_blk) 1737 { 1738 uint32_t offset_in_block, data_buf_len; 1739 uint8_t *data_buf, *md_buf; 1740 uint64_t guard = 0; 1741 1742 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1743 1744 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1745 guard = ctx->guard_seed; 1746 } 1747 offset_in_block = 0; 1748 1749 while (offset_in_block < ctx->block_size) { 1750 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1751 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1752 1753 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1754 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1755 ctx->dif_pi_format); 1756 } 1757 1758 _dif_sgl_advance(data_sgl, data_buf_len); 1759 offset_in_block += data_buf_len; 1760 } 1761 1762 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1763 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1764 ctx->dif_pi_format); 1765 } 1766 1767 _dif_sgl_advance(md_sgl, ctx->md_size); 1768 1769 return _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1770 } 1771 1772 static int 1773 dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1774 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1775 struct spdk_dif_error *err_blk) 1776 { 1777 uint32_t offset_blocks; 1778 int rc; 1779 1780 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1781 rc = _dix_verify_split(data_sgl, md_sgl, offset_blocks, ctx, err_blk); 1782 if (rc != 0) { 1783 return rc; 1784 } 1785 } 1786 1787 return 0; 1788 } 1789 1790 int 1791 spdk_dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1792 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1793 struct spdk_dif_error *err_blk) 1794 { 1795 struct _dif_sgl data_sgl, md_sgl; 1796 1797 if (md_iov->iov_base == NULL) { 1798 SPDK_ERRLOG("Metadata buffer is NULL.\n"); 1799 return -EINVAL; 1800 } 1801 1802 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1803 _dif_sgl_init(&md_sgl, md_iov, 1); 1804 1805 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1806 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1807 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1808 return -EINVAL; 1809 } 1810 1811 if (_dif_is_disabled(ctx->dif_type)) { 1812 return 0; 1813 } 1814 1815 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1816 return dix_verify(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1817 } else { 1818 return dix_verify_split(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1819 } 1820 } 1821 1822 int 1823 spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1824 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1825 uint32_t inject_flags, uint32_t *inject_offset) 1826 { 1827 struct _dif_sgl data_sgl, md_sgl; 1828 int rc; 1829 1830 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1831 _dif_sgl_init(&md_sgl, md_iov, 1); 1832 1833 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1834 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1835 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1836 return -EINVAL; 1837 } 1838 1839 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1840 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1841 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1842 _dif_reftag_size(ctx->dif_pi_format), 1843 inject_offset); 1844 if (rc != 0) { 1845 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1846 return rc; 1847 } 1848 } 1849 1850 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1851 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1852 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1853 _dif_apptag_size(), 1854 inject_offset); 1855 if (rc != 0) { 1856 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1857 return rc; 1858 } 1859 } 1860 1861 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1862 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1863 ctx->guard_interval, 1864 _dif_guard_size(ctx->dif_pi_format), 1865 inject_offset); 1866 if (rc != 0) { 1867 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1868 return rc; 1869 } 1870 } 1871 1872 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1873 /* Note: Error injection to data block is expected to be detected 1874 * as guard error. 1875 */ 1876 rc = dif_inject_error(&data_sgl, ctx->block_size, num_blocks, 1877 0, 1878 ctx->block_size, 1879 inject_offset); 1880 if (rc != 0) { 1881 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1882 return rc; 1883 } 1884 } 1885 1886 return 0; 1887 } 1888 1889 static uint32_t 1890 _to_next_boundary(uint32_t offset, uint32_t boundary) 1891 { 1892 return boundary - (offset % boundary); 1893 } 1894 1895 static uint32_t 1896 _to_size_with_md(uint32_t size, uint32_t data_block_size, uint32_t block_size) 1897 { 1898 return (size / data_block_size) * block_size + (size % data_block_size); 1899 } 1900 1901 int 1902 spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int iovcnt, 1903 struct iovec *buf_iovs, int buf_iovcnt, 1904 uint32_t data_offset, uint32_t data_len, 1905 uint32_t *_mapped_len, 1906 const struct spdk_dif_ctx *ctx) 1907 { 1908 uint32_t data_block_size, data_unalign, buf_len, buf_offset, len; 1909 struct _dif_sgl dif_sgl; 1910 struct _dif_sgl buf_sgl; 1911 1912 if (iovs == NULL || iovcnt == 0 || buf_iovs == NULL || buf_iovcnt == 0) { 1913 return -EINVAL; 1914 } 1915 1916 data_block_size = ctx->block_size - ctx->md_size; 1917 1918 data_unalign = ctx->data_offset % data_block_size; 1919 1920 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1921 ctx->block_size); 1922 buf_len -= data_unalign; 1923 1924 _dif_sgl_init(&dif_sgl, iovs, iovcnt); 1925 _dif_sgl_init(&buf_sgl, buf_iovs, buf_iovcnt); 1926 1927 if (!_dif_sgl_is_valid(&buf_sgl, buf_len)) { 1928 SPDK_ERRLOG("Buffer overflow will occur.\n"); 1929 return -ERANGE; 1930 } 1931 1932 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1933 buf_offset -= data_unalign; 1934 1935 _dif_sgl_advance(&buf_sgl, buf_offset); 1936 1937 while (data_len != 0) { 1938 len = spdk_min(data_len, _to_next_boundary(ctx->data_offset + data_offset, data_block_size)); 1939 if (!_dif_sgl_append_split(&dif_sgl, &buf_sgl, len)) { 1940 break; 1941 } 1942 _dif_sgl_advance(&buf_sgl, ctx->md_size); 1943 data_offset += len; 1944 data_len -= len; 1945 } 1946 1947 if (_mapped_len != NULL) { 1948 *_mapped_len = dif_sgl.total_size; 1949 } 1950 1951 return iovcnt - dif_sgl.iovcnt; 1952 } 1953 1954 static int 1955 _dif_sgl_setup_stream(struct _dif_sgl *sgl, uint32_t *_buf_offset, uint32_t *_buf_len, 1956 uint32_t data_offset, uint32_t data_len, 1957 const struct spdk_dif_ctx *ctx) 1958 { 1959 uint32_t data_block_size, data_unalign, buf_len, buf_offset; 1960 1961 data_block_size = ctx->block_size - ctx->md_size; 1962 1963 data_unalign = ctx->data_offset % data_block_size; 1964 1965 /* If the last data block is complete, DIF of the data block is 1966 * inserted or verified in this turn. 1967 */ 1968 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1969 ctx->block_size); 1970 buf_len -= data_unalign; 1971 1972 if (!_dif_sgl_is_valid(sgl, buf_len)) { 1973 return -ERANGE; 1974 } 1975 1976 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1977 buf_offset -= data_unalign; 1978 1979 _dif_sgl_advance(sgl, buf_offset); 1980 buf_len -= buf_offset; 1981 1982 buf_offset += data_unalign; 1983 1984 *_buf_offset = buf_offset; 1985 *_buf_len = buf_len; 1986 1987 return 0; 1988 } 1989 1990 int 1991 spdk_dif_generate_stream(struct iovec *iovs, int iovcnt, 1992 uint32_t data_offset, uint32_t data_len, 1993 struct spdk_dif_ctx *ctx) 1994 { 1995 uint32_t buf_len = 0, buf_offset = 0; 1996 uint32_t len, offset_in_block, offset_blocks; 1997 uint64_t guard = 0; 1998 struct _dif_sgl sgl; 1999 int rc; 2000 2001 if (iovs == NULL || iovcnt == 0) { 2002 return -EINVAL; 2003 } 2004 2005 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2006 guard = ctx->last_guard; 2007 } 2008 2009 _dif_sgl_init(&sgl, iovs, iovcnt); 2010 2011 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2012 if (rc != 0) { 2013 return rc; 2014 } 2015 2016 while (buf_len != 0) { 2017 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2018 offset_in_block = buf_offset % ctx->block_size; 2019 offset_blocks = buf_offset / ctx->block_size; 2020 2021 guard = _dif_generate_split(&sgl, offset_in_block, len, guard, offset_blocks, ctx); 2022 2023 buf_len -= len; 2024 buf_offset += len; 2025 } 2026 2027 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2028 ctx->last_guard = guard; 2029 } 2030 2031 return 0; 2032 } 2033 2034 int 2035 spdk_dif_verify_stream(struct iovec *iovs, int iovcnt, 2036 uint32_t data_offset, uint32_t data_len, 2037 struct spdk_dif_ctx *ctx, 2038 struct spdk_dif_error *err_blk) 2039 { 2040 uint32_t buf_len = 0, buf_offset = 0; 2041 uint32_t len, offset_in_block, offset_blocks; 2042 uint64_t guard = 0; 2043 struct _dif_sgl sgl; 2044 int rc = 0; 2045 2046 if (iovs == NULL || iovcnt == 0) { 2047 return -EINVAL; 2048 } 2049 2050 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2051 guard = ctx->last_guard; 2052 } 2053 2054 _dif_sgl_init(&sgl, iovs, iovcnt); 2055 2056 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2057 if (rc != 0) { 2058 return rc; 2059 } 2060 2061 while (buf_len != 0) { 2062 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2063 offset_in_block = buf_offset % ctx->block_size; 2064 offset_blocks = buf_offset / ctx->block_size; 2065 2066 rc = _dif_verify_split(&sgl, offset_in_block, len, &guard, offset_blocks, 2067 ctx, err_blk); 2068 if (rc != 0) { 2069 goto error; 2070 } 2071 2072 buf_len -= len; 2073 buf_offset += len; 2074 } 2075 2076 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2077 ctx->last_guard = guard; 2078 } 2079 error: 2080 return rc; 2081 } 2082 2083 int 2084 spdk_dif_update_crc32c_stream(struct iovec *iovs, int iovcnt, 2085 uint32_t data_offset, uint32_t data_len, 2086 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 2087 { 2088 uint32_t buf_len = 0, buf_offset = 0, len, offset_in_block; 2089 uint32_t crc32c; 2090 struct _dif_sgl sgl; 2091 int rc; 2092 2093 if (iovs == NULL || iovcnt == 0) { 2094 return -EINVAL; 2095 } 2096 2097 crc32c = *_crc32c; 2098 _dif_sgl_init(&sgl, iovs, iovcnt); 2099 2100 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2101 if (rc != 0) { 2102 return rc; 2103 } 2104 2105 while (buf_len != 0) { 2106 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2107 offset_in_block = buf_offset % ctx->block_size; 2108 2109 crc32c = _dif_update_crc32c_split(&sgl, offset_in_block, len, crc32c, ctx); 2110 2111 buf_len -= len; 2112 buf_offset += len; 2113 } 2114 2115 *_crc32c = crc32c; 2116 2117 return 0; 2118 } 2119 2120 void 2121 spdk_dif_get_range_with_md(uint32_t data_offset, uint32_t data_len, 2122 uint32_t *_buf_offset, uint32_t *_buf_len, 2123 const struct spdk_dif_ctx *ctx) 2124 { 2125 uint32_t data_block_size, data_unalign, buf_offset, buf_len; 2126 2127 if (!ctx->md_interleave) { 2128 buf_offset = data_offset; 2129 buf_len = data_len; 2130 } else { 2131 data_block_size = ctx->block_size - ctx->md_size; 2132 2133 data_unalign = data_offset % data_block_size; 2134 2135 buf_offset = _to_size_with_md(data_offset, data_block_size, ctx->block_size); 2136 buf_len = _to_size_with_md(data_unalign + data_len, data_block_size, ctx->block_size) - 2137 data_unalign; 2138 } 2139 2140 if (_buf_offset != NULL) { 2141 *_buf_offset = buf_offset; 2142 } 2143 2144 if (_buf_len != NULL) { 2145 *_buf_len = buf_len; 2146 } 2147 } 2148 2149 uint32_t 2150 spdk_dif_get_length_with_md(uint32_t data_len, const struct spdk_dif_ctx *ctx) 2151 { 2152 uint32_t data_block_size; 2153 2154 if (!ctx->md_interleave) { 2155 return data_len; 2156 } else { 2157 data_block_size = ctx->block_size - ctx->md_size; 2158 2159 return _to_size_with_md(data_len, data_block_size, ctx->block_size); 2160 } 2161 } 2162 2163 static int 2164 _dif_remap_ref_tag(struct _dif_sgl *sgl, uint32_t offset_blocks, 2165 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2166 bool check_ref_tag) 2167 { 2168 uint32_t offset, buf_len; 2169 uint64_t expected = 0, remapped; 2170 uint8_t *buf; 2171 struct _dif_sgl tmp_sgl; 2172 struct spdk_dif dif; 2173 2174 /* Fast forward to DIF field. */ 2175 _dif_sgl_advance(sgl, ctx->guard_interval); 2176 _dif_sgl_copy(&tmp_sgl, sgl); 2177 2178 /* Copy the split DIF field to the temporary DIF buffer */ 2179 offset = 0; 2180 while (offset < _dif_size(ctx->dif_pi_format)) { 2181 _dif_sgl_get_buf(sgl, &buf, &buf_len); 2182 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2183 2184 memcpy((uint8_t *)&dif + offset, buf, buf_len); 2185 2186 _dif_sgl_advance(sgl, buf_len); 2187 offset += buf_len; 2188 } 2189 2190 if (_dif_ignore(&dif, ctx)) { 2191 goto end; 2192 } 2193 2194 /* For type 1 and 2, the Reference Tag is incremented for each 2195 * subsequent logical block. For type 3, the Reference Tag 2196 * remains the same as the initial Reference Tag. 2197 */ 2198 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2199 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2200 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2201 } else { 2202 remapped = ctx->remapped_init_ref_tag; 2203 } 2204 2205 /* Verify the stored Reference Tag. */ 2206 if (check_ref_tag && !_dif_reftag_check(&dif, ctx, expected, offset_blocks, err_blk)) { 2207 return -1; 2208 } 2209 2210 /* Update the stored Reference Tag to the remapped one. */ 2211 _dif_set_reftag(&dif, remapped, ctx->dif_pi_format); 2212 2213 offset = 0; 2214 while (offset < _dif_size(ctx->dif_pi_format)) { 2215 _dif_sgl_get_buf(&tmp_sgl, &buf, &buf_len); 2216 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2217 2218 memcpy(buf, (uint8_t *)&dif + offset, buf_len); 2219 2220 _dif_sgl_advance(&tmp_sgl, buf_len); 2221 offset += buf_len; 2222 } 2223 2224 end: 2225 _dif_sgl_advance(sgl, ctx->block_size - ctx->guard_interval - _dif_size(ctx->dif_pi_format)); 2226 2227 return 0; 2228 } 2229 2230 int 2231 spdk_dif_remap_ref_tag(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 2232 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2233 bool check_ref_tag) 2234 { 2235 struct _dif_sgl sgl; 2236 uint32_t offset_blocks; 2237 int rc; 2238 2239 _dif_sgl_init(&sgl, iovs, iovcnt); 2240 2241 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 2242 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 2243 return -EINVAL; 2244 } 2245 2246 if (_dif_is_disabled(ctx->dif_type)) { 2247 return 0; 2248 } 2249 2250 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2251 return 0; 2252 } 2253 2254 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2255 rc = _dif_remap_ref_tag(&sgl, offset_blocks, ctx, err_blk, check_ref_tag); 2256 if (rc != 0) { 2257 return rc; 2258 } 2259 } 2260 2261 return 0; 2262 } 2263 2264 static int 2265 _dix_remap_ref_tag(struct _dif_sgl *md_sgl, uint32_t offset_blocks, 2266 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2267 bool check_ref_tag) 2268 { 2269 uint64_t expected = 0, remapped; 2270 uint8_t *md_buf; 2271 struct spdk_dif *dif; 2272 2273 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 2274 2275 dif = (struct spdk_dif *)(md_buf + ctx->guard_interval); 2276 2277 if (_dif_ignore(dif, ctx)) { 2278 goto end; 2279 } 2280 2281 /* For type 1 and 2, the Reference Tag is incremented for each 2282 * subsequent logical block. For type 3, the Reference Tag 2283 * remains the same as the initialReference Tag. 2284 */ 2285 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2286 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2287 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2288 } else { 2289 remapped = ctx->remapped_init_ref_tag; 2290 } 2291 2292 /* Verify the stored Reference Tag. */ 2293 if (check_ref_tag && !_dif_reftag_check(dif, ctx, expected, offset_blocks, err_blk)) { 2294 return -1; 2295 } 2296 2297 /* Update the stored Reference Tag to the remapped one. */ 2298 _dif_set_reftag(dif, remapped, ctx->dif_pi_format); 2299 2300 end: 2301 _dif_sgl_advance(md_sgl, ctx->md_size); 2302 2303 return 0; 2304 } 2305 2306 int 2307 spdk_dix_remap_ref_tag(struct iovec *md_iov, uint32_t num_blocks, 2308 const struct spdk_dif_ctx *ctx, 2309 struct spdk_dif_error *err_blk, 2310 bool check_ref_tag) 2311 { 2312 struct _dif_sgl md_sgl; 2313 uint32_t offset_blocks; 2314 int rc; 2315 2316 _dif_sgl_init(&md_sgl, md_iov, 1); 2317 2318 if (!_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 2319 SPDK_ERRLOG("Size of metadata iovec array is not valid.\n"); 2320 return -EINVAL; 2321 } 2322 2323 if (_dif_is_disabled(ctx->dif_type)) { 2324 return 0; 2325 } 2326 2327 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2328 return 0; 2329 } 2330 2331 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2332 rc = _dix_remap_ref_tag(&md_sgl, offset_blocks, ctx, err_blk, check_ref_tag); 2333 if (rc != 0) { 2334 return rc; 2335 } 2336 } 2337 2338 return 0; 2339 } 2340