1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/dif.h" 7 #include "spdk/crc16.h" 8 #include "spdk/crc32.h" 9 #include "spdk/crc64.h" 10 #include "spdk/endian.h" 11 #include "spdk/log.h" 12 #include "spdk/util.h" 13 14 #define REFTAG_MASK_16 0x00000000FFFFFFFF 15 #define REFTAG_MASK_32 0xFFFFFFFFFFFFFFFF 16 #define REFTAG_MASK_64 0x0000FFFFFFFFFFFF 17 18 /* The variable size Storage Tag and Reference Tag is not supported yet, 19 * so the maximum size of the Reference Tag is assumed. 20 */ 21 struct spdk_dif { 22 union { 23 struct { 24 uint16_t guard; 25 uint16_t app_tag; 26 uint32_t stor_ref_space; 27 } g16; 28 struct { 29 uint32_t guard; 30 uint16_t app_tag; 31 uint16_t stor_ref_space_p1; 32 uint64_t stor_ref_space_p2; 33 } g32; 34 struct { 35 uint64_t guard; 36 uint16_t app_tag; 37 uint16_t stor_ref_space_p1; 38 uint32_t stor_ref_space_p2; 39 } g64; 40 }; 41 }; 42 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g16) == 8, "Incorrect size"); 43 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g32) == 16, "Incorrect size"); 44 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g64) == 16, "Incorrect size"); 45 46 /* Context to iterate or create a iovec array. 47 * Each sgl is either iterated or created at a time. 48 */ 49 struct _dif_sgl { 50 /* Current iovec in the iteration or creation */ 51 struct iovec *iov; 52 53 /* Remaining count of iovecs in the iteration or creation. */ 54 int iovcnt; 55 56 /* Current offset in the iovec */ 57 uint32_t iov_offset; 58 59 /* Size of the created iovec array in bytes */ 60 uint32_t total_size; 61 }; 62 63 static inline void 64 _dif_sgl_init(struct _dif_sgl *s, struct iovec *iovs, int iovcnt) 65 { 66 s->iov = iovs; 67 s->iovcnt = iovcnt; 68 s->iov_offset = 0; 69 s->total_size = 0; 70 } 71 72 static void 73 _dif_sgl_advance(struct _dif_sgl *s, uint32_t step) 74 { 75 s->iov_offset += step; 76 while (s->iovcnt != 0) { 77 if (s->iov_offset < s->iov->iov_len) { 78 break; 79 } 80 81 s->iov_offset -= s->iov->iov_len; 82 s->iov++; 83 s->iovcnt--; 84 } 85 } 86 87 static inline void 88 _dif_sgl_get_buf(struct _dif_sgl *s, uint8_t **_buf, uint32_t *_buf_len) 89 { 90 if (_buf != NULL) { 91 *_buf = (uint8_t *)s->iov->iov_base + s->iov_offset; 92 } 93 if (_buf_len != NULL) { 94 *_buf_len = s->iov->iov_len - s->iov_offset; 95 } 96 } 97 98 static inline bool 99 _dif_sgl_append(struct _dif_sgl *s, uint8_t *data, uint32_t data_len) 100 { 101 assert(s->iovcnt > 0); 102 s->iov->iov_base = data; 103 s->iov->iov_len = data_len; 104 s->total_size += data_len; 105 s->iov++; 106 s->iovcnt--; 107 108 if (s->iovcnt > 0) { 109 return true; 110 } else { 111 return false; 112 } 113 } 114 115 static inline bool 116 _dif_sgl_append_split(struct _dif_sgl *dst, struct _dif_sgl *src, uint32_t data_len) 117 { 118 uint8_t *buf; 119 uint32_t buf_len; 120 121 while (data_len != 0) { 122 _dif_sgl_get_buf(src, &buf, &buf_len); 123 buf_len = spdk_min(buf_len, data_len); 124 125 if (!_dif_sgl_append(dst, buf, buf_len)) { 126 return false; 127 } 128 129 _dif_sgl_advance(src, buf_len); 130 data_len -= buf_len; 131 } 132 133 return true; 134 } 135 136 /* This function must be used before starting iteration. */ 137 static bool 138 _dif_sgl_is_bytes_multiple(struct _dif_sgl *s, uint32_t bytes) 139 { 140 int i; 141 142 for (i = 0; i < s->iovcnt; i++) { 143 if (s->iov[i].iov_len % bytes) { 144 return false; 145 } 146 } 147 148 return true; 149 } 150 151 /* This function must be used before starting iteration. */ 152 static bool 153 _dif_sgl_is_valid(struct _dif_sgl *s, uint32_t bytes) 154 { 155 uint64_t total = 0; 156 int i; 157 158 for (i = 0; i < s->iovcnt; i++) { 159 total += s->iov[i].iov_len; 160 } 161 162 return total >= bytes; 163 } 164 165 static void 166 _dif_sgl_copy(struct _dif_sgl *to, struct _dif_sgl *from) 167 { 168 memcpy(to, from, sizeof(struct _dif_sgl)); 169 } 170 171 static bool 172 _dif_is_disabled(enum spdk_dif_type dif_type) 173 { 174 if (dif_type == SPDK_DIF_DISABLE) { 175 return true; 176 } else { 177 return false; 178 } 179 } 180 181 static inline size_t 182 _dif_size(enum spdk_dif_pi_format dif_pi_format) 183 { 184 uint8_t size; 185 186 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 187 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16); 188 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 189 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32); 190 } else { 191 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64); 192 } 193 194 return size; 195 } 196 197 static uint32_t 198 _get_guard_interval(uint32_t block_size, uint32_t md_size, bool dif_loc, bool md_interleave, 199 size_t dif_size) 200 { 201 if (!dif_loc) { 202 /* For metadata formats with more than 8/16 bytes (depending on 203 * the PI format), if the DIF is contained in the last 8/16 bytes 204 * of metadata, then the CRC covers all metadata up to but excluding 205 * these last 8/16 bytes. 206 */ 207 if (md_interleave) { 208 return block_size - dif_size; 209 } else { 210 return md_size - dif_size; 211 } 212 } else { 213 /* For metadata formats with more than 8/16 bytes (depending on 214 * the PI format), if the DIF is contained in the first 8/16 bytes 215 * of metadata, then the CRC does not cover any metadata. 216 */ 217 if (md_interleave) { 218 return block_size - md_size; 219 } else { 220 return 0; 221 } 222 } 223 } 224 225 static inline uint8_t 226 _dif_guard_size(enum spdk_dif_pi_format dif_pi_format) 227 { 228 uint8_t size; 229 230 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 231 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.guard); 232 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 233 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.guard); 234 } else { 235 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.guard); 236 } 237 238 return size; 239 } 240 241 static inline void 242 _dif_set_guard(struct spdk_dif *dif, uint64_t guard, enum spdk_dif_pi_format dif_pi_format) 243 { 244 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 245 to_be16(&(dif->g16.guard), (uint16_t)guard); 246 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 247 to_be32(&(dif->g32.guard), (uint32_t)guard); 248 } else { 249 to_be64(&(dif->g64.guard), guard); 250 } 251 } 252 253 static inline uint64_t 254 _dif_get_guard(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 255 { 256 uint64_t guard; 257 258 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 259 guard = (uint64_t)from_be16(&(dif->g16.guard)); 260 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 261 guard = (uint64_t)from_be32(&(dif->g32.guard)); 262 } else { 263 guard = from_be64(&(dif->g64.guard)); 264 } 265 266 return guard; 267 } 268 269 static inline uint64_t 270 _dif_generate_guard(uint64_t guard_seed, void *buf, size_t buf_len, 271 enum spdk_dif_pi_format dif_pi_format) 272 { 273 uint64_t guard; 274 275 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 276 guard = (uint64_t)spdk_crc16_t10dif((uint16_t)guard_seed, buf, buf_len); 277 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 278 guard = (uint64_t)spdk_crc32c_nvme(buf, buf_len, guard_seed); 279 } else { 280 guard = spdk_crc64_nvme(buf, buf_len, guard_seed); 281 } 282 283 return guard; 284 } 285 286 static uint64_t 287 dif_generate_guard_split(uint64_t guard_seed, struct _dif_sgl *sgl, uint32_t start, 288 uint32_t len, const struct spdk_dif_ctx *ctx) 289 { 290 uint64_t guard = guard_seed; 291 uint32_t offset, end, buf_len; 292 uint8_t *buf; 293 294 offset = start; 295 end = start + spdk_min(len, ctx->guard_interval - start); 296 297 while (offset < end) { 298 _dif_sgl_get_buf(sgl, &buf, &buf_len); 299 buf_len = spdk_min(buf_len, end - offset); 300 301 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 302 guard = _dif_generate_guard(guard, buf, buf_len, ctx->dif_pi_format); 303 } 304 305 _dif_sgl_advance(sgl, buf_len); 306 offset += buf_len; 307 } 308 309 return guard; 310 } 311 312 static inline uint64_t 313 _dif_generate_guard_copy(uint64_t guard_seed, void *dst, void *src, size_t buf_len, 314 enum spdk_dif_pi_format dif_pi_format) 315 { 316 uint64_t guard; 317 318 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 319 guard = (uint64_t)spdk_crc16_t10dif_copy((uint16_t)guard_seed, dst, src, buf_len); 320 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 321 memcpy(dst, src, buf_len); 322 guard = (uint64_t)spdk_crc32c_nvme(src, buf_len, guard_seed); 323 } else { 324 memcpy(dst, src, buf_len); 325 guard = spdk_crc64_nvme(src, buf_len, guard_seed); 326 } 327 328 return guard; 329 } 330 331 static uint64_t 332 _dif_generate_guard_copy_split(uint64_t guard, struct _dif_sgl *dst_sgl, 333 struct _dif_sgl *src_sgl, uint32_t data_len, 334 enum spdk_dif_pi_format dif_pi_format) 335 { 336 uint32_t offset = 0, src_len, dst_len, buf_len; 337 uint8_t *src, *dst; 338 339 while (offset < data_len) { 340 _dif_sgl_get_buf(src_sgl, &src, &src_len); 341 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 342 buf_len = spdk_min(src_len, dst_len); 343 buf_len = spdk_min(buf_len, data_len - offset); 344 345 guard = _dif_generate_guard_copy(guard, dst, src, buf_len, dif_pi_format); 346 347 _dif_sgl_advance(src_sgl, buf_len); 348 _dif_sgl_advance(dst_sgl, buf_len); 349 offset += buf_len; 350 } 351 352 return guard; 353 } 354 355 static void 356 _data_copy_split(struct _dif_sgl *dst_sgl, struct _dif_sgl *src_sgl, uint32_t data_len) 357 { 358 uint32_t offset = 0, src_len, dst_len, buf_len; 359 uint8_t *src, *dst; 360 361 while (offset < data_len) { 362 _dif_sgl_get_buf(src_sgl, &src, &src_len); 363 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 364 buf_len = spdk_min(src_len, dst_len); 365 buf_len = spdk_min(buf_len, data_len - offset); 366 367 memcpy(dst, src, buf_len); 368 369 _dif_sgl_advance(src_sgl, buf_len); 370 _dif_sgl_advance(dst_sgl, buf_len); 371 offset += buf_len; 372 } 373 } 374 375 static inline uint8_t 376 _dif_apptag_offset(enum spdk_dif_pi_format dif_pi_format) 377 { 378 return _dif_guard_size(dif_pi_format); 379 } 380 381 static inline uint8_t 382 _dif_apptag_size(void) 383 { 384 return SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.app_tag); 385 } 386 387 static inline void 388 _dif_set_apptag(struct spdk_dif *dif, uint16_t app_tag, enum spdk_dif_pi_format dif_pi_format) 389 { 390 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 391 to_be16(&(dif->g16.app_tag), app_tag); 392 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 393 to_be16(&(dif->g32.app_tag), app_tag); 394 } else { 395 to_be16(&(dif->g64.app_tag), app_tag); 396 } 397 } 398 399 static inline uint16_t 400 _dif_get_apptag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 401 { 402 uint16_t app_tag; 403 404 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 405 app_tag = from_be16(&(dif->g16.app_tag)); 406 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 407 app_tag = from_be16(&(dif->g32.app_tag)); 408 } else { 409 app_tag = from_be16(&(dif->g64.app_tag)); 410 } 411 412 return app_tag; 413 } 414 415 static inline bool 416 _dif_apptag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 417 { 418 return _dif_get_apptag(dif, dif_pi_format) == SPDK_DIF_APPTAG_IGNORE; 419 } 420 421 static inline uint8_t 422 _dif_reftag_offset(enum spdk_dif_pi_format dif_pi_format) 423 { 424 uint8_t offset; 425 426 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 427 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 428 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 429 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size() 430 + SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p1); 431 } else { 432 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 433 } 434 435 return offset; 436 } 437 438 static inline uint8_t 439 _dif_reftag_size(enum spdk_dif_pi_format dif_pi_format) 440 { 441 uint8_t size; 442 443 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 444 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.stor_ref_space); 445 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 446 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p2); 447 } else { 448 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p1) + 449 SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p2); 450 } 451 452 return size; 453 } 454 455 static inline void 456 _dif_set_reftag(struct spdk_dif *dif, uint64_t ref_tag, enum spdk_dif_pi_format dif_pi_format) 457 { 458 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 459 to_be32(&(dif->g16.stor_ref_space), (uint32_t)ref_tag); 460 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 461 to_be64(&(dif->g32.stor_ref_space_p2), ref_tag); 462 } else { 463 to_be16(&(dif->g64.stor_ref_space_p1), (uint16_t)(ref_tag >> 32)); 464 to_be32(&(dif->g64.stor_ref_space_p2), (uint32_t)ref_tag); 465 } 466 } 467 468 static inline uint64_t 469 _dif_get_reftag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 470 { 471 uint64_t ref_tag; 472 473 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 474 ref_tag = (uint64_t)from_be32(&(dif->g16.stor_ref_space)); 475 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 476 ref_tag = from_be64(&(dif->g32.stor_ref_space_p2)); 477 } else { 478 ref_tag = (uint64_t)from_be16(&(dif->g64.stor_ref_space_p1)); 479 ref_tag <<= 32; 480 ref_tag |= (uint64_t)from_be32(&(dif->g64.stor_ref_space_p2)); 481 } 482 483 return ref_tag; 484 } 485 486 static inline bool 487 _dif_reftag_match(struct spdk_dif *dif, uint64_t ref_tag, 488 enum spdk_dif_pi_format dif_pi_format) 489 { 490 uint64_t _ref_tag; 491 bool match; 492 493 _ref_tag = _dif_get_reftag(dif, dif_pi_format); 494 495 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 496 match = (_ref_tag == (ref_tag & REFTAG_MASK_16)); 497 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 498 match = (_ref_tag == ref_tag); 499 } else { 500 match = (_ref_tag == (ref_tag & REFTAG_MASK_64)); 501 } 502 503 return match; 504 } 505 506 static inline bool 507 _dif_reftag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 508 { 509 return _dif_reftag_match(dif, REFTAG_MASK_32, dif_pi_format); 510 } 511 512 static bool 513 _dif_ignore(struct spdk_dif *dif, const struct spdk_dif_ctx *ctx) 514 { 515 switch (ctx->dif_type) { 516 case SPDK_DIF_TYPE1: 517 case SPDK_DIF_TYPE2: 518 /* If Type 1 or 2 is used, then all DIF checks are disabled when 519 * the Application Tag is 0xFFFF. 520 */ 521 if (_dif_apptag_ignore(dif, ctx->dif_pi_format)) { 522 return true; 523 } 524 break; 525 case SPDK_DIF_TYPE3: 526 /* If Type 3 is used, then all DIF checks are disabled when the 527 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF 528 * or 0xFFFFFFFFFFFFFFFF depending on the PI format. 529 */ 530 531 if (_dif_apptag_ignore(dif, ctx->dif_pi_format) && 532 _dif_reftag_ignore(dif, ctx->dif_pi_format)) { 533 return true; 534 } 535 break; 536 default: 537 break; 538 } 539 540 return false; 541 } 542 543 static bool 544 _dif_pi_format_is_valid(enum spdk_dif_pi_format dif_pi_format) 545 { 546 switch (dif_pi_format) { 547 case SPDK_DIF_PI_FORMAT_16: 548 case SPDK_DIF_PI_FORMAT_32: 549 case SPDK_DIF_PI_FORMAT_64: 550 return true; 551 default: 552 return false; 553 } 554 } 555 556 static bool 557 _dif_type_is_valid(enum spdk_dif_type dif_type) 558 { 559 switch (dif_type) { 560 case SPDK_DIF_DISABLE: 561 case SPDK_DIF_TYPE1: 562 case SPDK_DIF_TYPE2: 563 case SPDK_DIF_TYPE3: 564 return true; 565 default: 566 return false; 567 } 568 } 569 570 int 571 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size, 572 bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags, 573 uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag, 574 uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts) 575 { 576 uint32_t data_block_size; 577 enum spdk_dif_pi_format dif_pi_format = SPDK_DIF_PI_FORMAT_16; 578 579 if (opts != NULL) { 580 if (!_dif_pi_format_is_valid(opts->dif_pi_format)) { 581 SPDK_ERRLOG("No valid DIF PI format provided.\n"); 582 return -EINVAL; 583 } 584 585 dif_pi_format = opts->dif_pi_format; 586 } 587 588 if (!_dif_type_is_valid(dif_type)) { 589 SPDK_ERRLOG("No valid DIF type was provided.\n"); 590 return -EINVAL; 591 } 592 593 if (md_size < _dif_size(dif_pi_format)) { 594 SPDK_ERRLOG("Metadata size is smaller than DIF size.\n"); 595 return -EINVAL; 596 } 597 598 if (md_interleave) { 599 if (block_size < md_size) { 600 SPDK_ERRLOG("Block size is smaller than DIF size.\n"); 601 return -EINVAL; 602 } 603 data_block_size = block_size - md_size; 604 } else { 605 data_block_size = block_size; 606 } 607 608 if (data_block_size == 0) { 609 SPDK_ERRLOG("Zero data block size is not allowed\n"); 610 return -EINVAL; 611 } 612 613 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 614 if ((data_block_size % 512) != 0) { 615 SPDK_ERRLOG("Data block size should be a multiple of 512B\n"); 616 return -EINVAL; 617 } 618 } else { 619 if ((data_block_size % 4096) != 0) { 620 SPDK_ERRLOG("Data block size should be a multiple of 4kB\n"); 621 return -EINVAL; 622 } 623 } 624 625 ctx->block_size = block_size; 626 ctx->md_size = md_size; 627 ctx->md_interleave = md_interleave; 628 ctx->dif_pi_format = dif_pi_format; 629 ctx->guard_interval = _get_guard_interval(block_size, md_size, dif_loc, md_interleave, 630 _dif_size(ctx->dif_pi_format)); 631 ctx->dif_type = dif_type; 632 ctx->dif_flags = dif_flags; 633 ctx->init_ref_tag = init_ref_tag; 634 ctx->apptag_mask = apptag_mask; 635 ctx->app_tag = app_tag; 636 ctx->data_offset = data_offset; 637 ctx->ref_tag_offset = data_offset / data_block_size; 638 ctx->last_guard = guard_seed; 639 ctx->guard_seed = guard_seed; 640 ctx->remapped_init_ref_tag = 0; 641 642 return 0; 643 } 644 645 void 646 spdk_dif_ctx_set_data_offset(struct spdk_dif_ctx *ctx, uint32_t data_offset) 647 { 648 uint32_t data_block_size; 649 650 if (ctx->md_interleave) { 651 data_block_size = ctx->block_size - ctx->md_size; 652 } else { 653 data_block_size = ctx->block_size; 654 } 655 656 ctx->data_offset = data_offset; 657 ctx->ref_tag_offset = data_offset / data_block_size; 658 } 659 660 void 661 spdk_dif_ctx_set_remapped_init_ref_tag(struct spdk_dif_ctx *ctx, 662 uint32_t remapped_init_ref_tag) 663 { 664 ctx->remapped_init_ref_tag = remapped_init_ref_tag; 665 } 666 667 static void 668 _dif_generate(void *_dif, uint64_t guard, uint32_t offset_blocks, 669 const struct spdk_dif_ctx *ctx) 670 { 671 struct spdk_dif *dif = _dif; 672 uint64_t ref_tag; 673 674 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 675 _dif_set_guard(dif, guard, ctx->dif_pi_format); 676 } 677 678 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 679 _dif_set_apptag(dif, ctx->app_tag, ctx->dif_pi_format); 680 } 681 682 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 683 /* For type 1 and 2, the reference tag is incremented for each 684 * subsequent logical block. For type 3, the reference tag 685 * remains the same as the initial reference tag. 686 */ 687 if (ctx->dif_type != SPDK_DIF_TYPE3) { 688 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 689 } else { 690 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 691 } 692 693 /* Overwrite reference tag if initialization reference tag is SPDK_DIF_REFTAG_IGNORE */ 694 if (ctx->init_ref_tag == SPDK_DIF_REFTAG_IGNORE) { 695 if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 696 ref_tag = REFTAG_MASK_16; 697 } else if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 698 ref_tag = REFTAG_MASK_32; 699 } else { 700 ref_tag = REFTAG_MASK_64; 701 } 702 } 703 704 _dif_set_reftag(dif, ref_tag, ctx->dif_pi_format); 705 } 706 } 707 708 static void 709 dif_generate(struct _dif_sgl *sgl, uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 710 { 711 uint32_t offset_blocks; 712 uint8_t *buf; 713 uint64_t guard = 0; 714 715 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 716 _dif_sgl_get_buf(sgl, &buf, NULL); 717 718 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 719 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 720 } 721 722 _dif_generate(buf + ctx->guard_interval, guard, offset_blocks, ctx); 723 724 _dif_sgl_advance(sgl, ctx->block_size); 725 } 726 } 727 728 static void 729 dif_store_split(struct _dif_sgl *sgl, struct spdk_dif *dif, 730 const struct spdk_dif_ctx *ctx) 731 { 732 uint32_t offset = 0, rest_md_len, buf_len; 733 uint8_t *buf; 734 735 rest_md_len = ctx->block_size - ctx->guard_interval; 736 737 while (offset < rest_md_len) { 738 _dif_sgl_get_buf(sgl, &buf, &buf_len); 739 740 if (offset < _dif_size(ctx->dif_pi_format)) { 741 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 742 memcpy(buf, (uint8_t *)dif + offset, buf_len); 743 } else { 744 buf_len = spdk_min(buf_len, rest_md_len - offset); 745 } 746 747 _dif_sgl_advance(sgl, buf_len); 748 offset += buf_len; 749 } 750 } 751 752 static uint64_t 753 _dif_generate_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 754 uint64_t guard, uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 755 { 756 struct spdk_dif dif = {}; 757 758 assert(offset_in_block < ctx->guard_interval); 759 assert(offset_in_block + data_len < ctx->guard_interval || 760 offset_in_block + data_len == ctx->block_size); 761 762 /* Compute CRC over split logical block data. */ 763 guard = dif_generate_guard_split(guard, sgl, offset_in_block, data_len, ctx); 764 765 if (offset_in_block + data_len < ctx->guard_interval) { 766 return guard; 767 } 768 769 /* If a whole logical block data is parsed, generate DIF 770 * and save it to the temporary DIF area. 771 */ 772 _dif_generate(&dif, guard, offset_blocks, ctx); 773 774 /* Copy generated DIF field to the split DIF field, and then 775 * skip metadata field after DIF field (if any). 776 */ 777 dif_store_split(sgl, &dif, ctx); 778 779 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 780 guard = ctx->guard_seed; 781 } 782 783 return guard; 784 } 785 786 static void 787 dif_generate_split(struct _dif_sgl *sgl, uint32_t num_blocks, 788 const struct spdk_dif_ctx *ctx) 789 { 790 uint32_t offset_blocks; 791 uint64_t guard = 0; 792 793 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 794 guard = ctx->guard_seed; 795 } 796 797 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 798 _dif_generate_split(sgl, 0, ctx->block_size, guard, offset_blocks, ctx); 799 } 800 } 801 802 int 803 spdk_dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 804 const struct spdk_dif_ctx *ctx) 805 { 806 struct _dif_sgl sgl; 807 808 _dif_sgl_init(&sgl, iovs, iovcnt); 809 810 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 811 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 812 return -EINVAL; 813 } 814 815 if (_dif_is_disabled(ctx->dif_type)) { 816 return 0; 817 } 818 819 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 820 dif_generate(&sgl, num_blocks, ctx); 821 } else { 822 dif_generate_split(&sgl, num_blocks, ctx); 823 } 824 825 return 0; 826 } 827 828 static void 829 _dif_error_set(struct spdk_dif_error *err_blk, uint8_t err_type, 830 uint64_t expected, uint64_t actual, uint32_t err_offset) 831 { 832 if (err_blk) { 833 err_blk->err_type = err_type; 834 err_blk->expected = expected; 835 err_blk->actual = actual; 836 err_blk->err_offset = err_offset; 837 } 838 } 839 840 static bool 841 _dif_reftag_check(struct spdk_dif *dif, const struct spdk_dif_ctx *ctx, 842 uint64_t expected_reftag, uint32_t offset_blocks, struct spdk_dif_error *err_blk) 843 { 844 uint64_t reftag; 845 846 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 847 switch (ctx->dif_type) { 848 case SPDK_DIF_TYPE1: 849 case SPDK_DIF_TYPE2: 850 /* Compare the DIF Reference Tag field to the passed Reference Tag. 851 * The passed Reference Tag will be the least significant 4 bytes 852 * or 8 bytes (depending on the PI format) 853 * of the LBA when Type 1 is used, and application specific value 854 * if Type 2 is used. 855 */ 856 if (!_dif_reftag_match(dif, expected_reftag, ctx->dif_pi_format)) { 857 reftag = _dif_get_reftag(dif, ctx->dif_pi_format); 858 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected_reftag, 859 reftag, offset_blocks); 860 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu64 "," \ 861 " Expected=%lx, Actual=%lx\n", 862 expected_reftag, expected_reftag, reftag); 863 return false; 864 } 865 break; 866 case SPDK_DIF_TYPE3: 867 /* For Type 3, computed Reference Tag remains unchanged. 868 * Hence ignore the Reference Tag field. 869 */ 870 break; 871 default: 872 break; 873 } 874 } 875 876 return true; 877 } 878 879 static int 880 _dif_verify(void *_dif, uint64_t guard, uint32_t offset_blocks, 881 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 882 { 883 struct spdk_dif *dif = _dif; 884 uint64_t _guard; 885 uint16_t _app_tag; 886 uint64_t ref_tag; 887 888 if (_dif_ignore(dif, ctx)) { 889 return 0; 890 } 891 892 /* For type 1 and 2, the reference tag is incremented for each 893 * subsequent logical block. For type 3, the reference tag 894 * remains the same as the initial reference tag. 895 */ 896 if (ctx->dif_type != SPDK_DIF_TYPE3) { 897 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 898 } else { 899 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 900 } 901 902 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 903 /* Compare the DIF Guard field to the CRC computed over the logical 904 * block data. 905 */ 906 _guard = _dif_get_guard(dif, ctx->dif_pi_format); 907 if (_guard != guard) { 908 _dif_error_set(err_blk, SPDK_DIF_GUARD_ERROR, _guard, guard, 909 offset_blocks); 910 SPDK_ERRLOG("Failed to compare Guard: LBA=%" PRIu64 "," \ 911 " Expected=%lx, Actual=%lx\n", 912 ref_tag, _guard, guard); 913 return -1; 914 } 915 } 916 917 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 918 /* Compare unmasked bits in the DIF Application Tag field to the 919 * passed Application Tag. 920 */ 921 _app_tag = _dif_get_apptag(dif, ctx->dif_pi_format); 922 if ((_app_tag & ctx->apptag_mask) != (ctx->app_tag & ctx->apptag_mask)) { 923 _dif_error_set(err_blk, SPDK_DIF_APPTAG_ERROR, ctx->app_tag, 924 (_app_tag & ctx->apptag_mask), offset_blocks); 925 SPDK_ERRLOG("Failed to compare App Tag: LBA=%" PRIu64 "," \ 926 " Expected=%x, Actual=%x\n", 927 ref_tag, ctx->app_tag, (_app_tag & ctx->apptag_mask)); 928 return -1; 929 } 930 } 931 932 if (!_dif_reftag_check(dif, ctx, ref_tag, offset_blocks, err_blk)) { 933 return -1; 934 } 935 936 return 0; 937 } 938 939 static int 940 dif_verify(struct _dif_sgl *sgl, uint32_t num_blocks, 941 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 942 { 943 uint32_t offset_blocks; 944 int rc; 945 uint8_t *buf; 946 uint64_t guard = 0; 947 948 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 949 _dif_sgl_get_buf(sgl, &buf, NULL); 950 951 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 952 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 953 } 954 955 rc = _dif_verify(buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 956 if (rc != 0) { 957 return rc; 958 } 959 960 _dif_sgl_advance(sgl, ctx->block_size); 961 } 962 963 return 0; 964 } 965 966 static void 967 dif_load_split(struct _dif_sgl *sgl, struct spdk_dif *dif, 968 const struct spdk_dif_ctx *ctx) 969 { 970 uint32_t offset = 0, rest_md_len, buf_len; 971 uint8_t *buf; 972 973 rest_md_len = ctx->block_size - ctx->guard_interval; 974 975 while (offset < rest_md_len) { 976 _dif_sgl_get_buf(sgl, &buf, &buf_len); 977 978 if (offset < _dif_size(ctx->dif_pi_format)) { 979 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 980 memcpy((uint8_t *)dif + offset, buf, buf_len); 981 } else { 982 buf_len = spdk_min(buf_len, rest_md_len - offset); 983 } 984 985 _dif_sgl_advance(sgl, buf_len); 986 offset += buf_len; 987 } 988 } 989 990 static int 991 _dif_verify_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 992 uint64_t *_guard, uint32_t offset_blocks, 993 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 994 { 995 uint64_t guard = *_guard; 996 struct spdk_dif dif = {}; 997 int rc; 998 999 assert(_guard != NULL); 1000 assert(offset_in_block < ctx->guard_interval); 1001 assert(offset_in_block + data_len < ctx->guard_interval || 1002 offset_in_block + data_len == ctx->block_size); 1003 1004 guard = dif_generate_guard_split(guard, sgl, offset_in_block, data_len, ctx); 1005 1006 if (offset_in_block + data_len < ctx->guard_interval) { 1007 *_guard = guard; 1008 return 0; 1009 } 1010 1011 dif_load_split(sgl, &dif, ctx); 1012 1013 rc = _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 1014 if (rc != 0) { 1015 return rc; 1016 } 1017 1018 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1019 guard = ctx->guard_seed; 1020 } 1021 1022 *_guard = guard; 1023 return 0; 1024 } 1025 1026 static int 1027 dif_verify_split(struct _dif_sgl *sgl, uint32_t num_blocks, 1028 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1029 { 1030 uint32_t offset_blocks; 1031 uint64_t guard = 0; 1032 int rc; 1033 1034 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1035 guard = ctx->guard_seed; 1036 } 1037 1038 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1039 rc = _dif_verify_split(sgl, 0, ctx->block_size, &guard, offset_blocks, 1040 ctx, err_blk); 1041 if (rc != 0) { 1042 return rc; 1043 } 1044 } 1045 1046 return 0; 1047 } 1048 1049 int 1050 spdk_dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1051 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1052 { 1053 struct _dif_sgl sgl; 1054 1055 _dif_sgl_init(&sgl, iovs, iovcnt); 1056 1057 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1058 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1059 return -EINVAL; 1060 } 1061 1062 if (_dif_is_disabled(ctx->dif_type)) { 1063 return 0; 1064 } 1065 1066 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 1067 return dif_verify(&sgl, num_blocks, ctx, err_blk); 1068 } else { 1069 return dif_verify_split(&sgl, num_blocks, ctx, err_blk); 1070 } 1071 } 1072 1073 static uint32_t 1074 dif_update_crc32c(struct _dif_sgl *sgl, uint32_t num_blocks, 1075 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1076 { 1077 uint32_t offset_blocks; 1078 uint8_t *buf; 1079 1080 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1081 _dif_sgl_get_buf(sgl, &buf, NULL); 1082 1083 crc32c = spdk_crc32c_update(buf, ctx->block_size - ctx->md_size, crc32c); 1084 1085 _dif_sgl_advance(sgl, ctx->block_size); 1086 } 1087 1088 return crc32c; 1089 } 1090 1091 static uint32_t 1092 _dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 1093 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1094 { 1095 uint32_t data_block_size, buf_len; 1096 uint8_t *buf; 1097 1098 data_block_size = ctx->block_size - ctx->md_size; 1099 1100 assert(offset_in_block + data_len <= ctx->block_size); 1101 1102 while (data_len != 0) { 1103 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1104 buf_len = spdk_min(buf_len, data_len); 1105 1106 if (offset_in_block < data_block_size) { 1107 buf_len = spdk_min(buf_len, data_block_size - offset_in_block); 1108 crc32c = spdk_crc32c_update(buf, buf_len, crc32c); 1109 } 1110 1111 _dif_sgl_advance(sgl, buf_len); 1112 offset_in_block += buf_len; 1113 data_len -= buf_len; 1114 } 1115 1116 return crc32c; 1117 } 1118 1119 static uint32_t 1120 dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t num_blocks, 1121 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1122 { 1123 uint32_t offset_blocks; 1124 1125 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1126 crc32c = _dif_update_crc32c_split(sgl, 0, ctx->block_size, crc32c, ctx); 1127 } 1128 1129 return crc32c; 1130 } 1131 1132 int 1133 spdk_dif_update_crc32c(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1134 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 1135 { 1136 struct _dif_sgl sgl; 1137 1138 if (_crc32c == NULL) { 1139 return -EINVAL; 1140 } 1141 1142 _dif_sgl_init(&sgl, iovs, iovcnt); 1143 1144 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1145 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1146 return -EINVAL; 1147 } 1148 1149 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 1150 *_crc32c = dif_update_crc32c(&sgl, num_blocks, *_crc32c, ctx); 1151 } else { 1152 *_crc32c = dif_update_crc32c_split(&sgl, num_blocks, *_crc32c, ctx); 1153 } 1154 1155 return 0; 1156 } 1157 1158 static void 1159 _dif_generate_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1160 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1161 { 1162 uint32_t data_block_size; 1163 uint8_t *src, *dst; 1164 uint64_t guard = 0; 1165 1166 data_block_size = ctx->block_size - ctx->md_size; 1167 1168 _dif_sgl_get_buf(src_sgl, &src, NULL); 1169 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1170 1171 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1172 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1173 ctx->dif_pi_format); 1174 guard = _dif_generate_guard(guard, dst + data_block_size, 1175 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1176 } else { 1177 memcpy(dst, src, data_block_size); 1178 } 1179 1180 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 1181 1182 _dif_sgl_advance(src_sgl, data_block_size); 1183 _dif_sgl_advance(dst_sgl, ctx->block_size); 1184 } 1185 1186 static void 1187 dif_generate_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1188 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1189 { 1190 uint32_t offset_blocks; 1191 1192 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1193 _dif_generate_copy(src_sgl, dst_sgl, offset_blocks, ctx); 1194 } 1195 } 1196 1197 static void 1198 _dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1199 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1200 { 1201 uint32_t data_block_size; 1202 uint64_t guard = 0; 1203 struct spdk_dif dif = {}; 1204 1205 data_block_size = ctx->block_size - ctx->md_size; 1206 1207 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1208 guard = _dif_generate_guard_copy_split(ctx->guard_seed, dst_sgl, src_sgl, 1209 data_block_size, ctx->dif_pi_format); 1210 guard = dif_generate_guard_split(guard, dst_sgl, data_block_size, 1211 ctx->guard_interval - data_block_size, ctx); 1212 } else { 1213 _data_copy_split(dst_sgl, src_sgl, data_block_size); 1214 _dif_sgl_advance(dst_sgl, ctx->guard_interval - data_block_size); 1215 } 1216 1217 _dif_generate(&dif, guard, offset_blocks, ctx); 1218 1219 dif_store_split(dst_sgl, &dif, ctx); 1220 } 1221 1222 static void 1223 dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1224 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1225 { 1226 uint32_t offset_blocks; 1227 1228 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1229 _dif_generate_copy_split(src_sgl, dst_sgl, offset_blocks, ctx); 1230 } 1231 } 1232 1233 static void 1234 _dif_disable_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1235 const struct spdk_dif_ctx *ctx) 1236 { 1237 uint32_t offset = 0, src_len, dst_len, buf_len, data_block_size; 1238 uint8_t *src, *dst; 1239 1240 data_block_size = ctx->block_size - ctx->md_size; 1241 1242 while (offset < data_block_size) { 1243 _dif_sgl_get_buf(src_sgl, &src, &src_len); 1244 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 1245 buf_len = spdk_min(src_len, dst_len); 1246 buf_len = spdk_min(buf_len, data_block_size - offset); 1247 1248 memcpy(dst, src, buf_len); 1249 1250 _dif_sgl_advance(src_sgl, buf_len); 1251 _dif_sgl_advance(dst_sgl, buf_len); 1252 offset += buf_len; 1253 } 1254 1255 _dif_sgl_advance(dst_sgl, ctx->md_size); 1256 } 1257 1258 static void 1259 dif_disable_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1260 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1261 { 1262 uint32_t offset_blocks; 1263 1264 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1265 _dif_disable_insert_copy(src_sgl, dst_sgl, ctx); 1266 } 1267 } 1268 1269 int 1270 spdk_dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1271 int bounce_iovcnt, uint32_t num_blocks, 1272 const struct spdk_dif_ctx *ctx) 1273 { 1274 struct _dif_sgl src_sgl, dst_sgl; 1275 uint32_t data_block_size; 1276 1277 _dif_sgl_init(&src_sgl, iovs, iovcnt); 1278 _dif_sgl_init(&dst_sgl, bounce_iovs, bounce_iovcnt); 1279 1280 data_block_size = ctx->block_size - ctx->md_size; 1281 1282 if (!_dif_sgl_is_valid(&src_sgl, data_block_size * num_blocks) || 1283 !_dif_sgl_is_valid(&dst_sgl, ctx->block_size * num_blocks)) { 1284 SPDK_ERRLOG("Size of iovec arrays are not valid.\n"); 1285 return -EINVAL; 1286 } 1287 1288 if (_dif_is_disabled(ctx->dif_type)) { 1289 dif_disable_insert_copy(&src_sgl, &dst_sgl, num_blocks, ctx); 1290 return 0; 1291 } 1292 1293 if (_dif_sgl_is_bytes_multiple(&src_sgl, data_block_size) && 1294 _dif_sgl_is_bytes_multiple(&dst_sgl, ctx->block_size)) { 1295 dif_generate_copy(&src_sgl, &dst_sgl, num_blocks, ctx); 1296 } else { 1297 dif_generate_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx); 1298 } 1299 1300 return 0; 1301 } 1302 1303 static int 1304 _dif_verify_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1305 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1306 struct spdk_dif_error *err_blk) 1307 { 1308 uint32_t data_block_size; 1309 uint8_t *src, *dst; 1310 int rc; 1311 uint64_t guard = 0; 1312 1313 data_block_size = ctx->block_size - ctx->md_size; 1314 1315 _dif_sgl_get_buf(src_sgl, &src, NULL); 1316 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1317 1318 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1319 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1320 ctx->dif_pi_format); 1321 guard = _dif_generate_guard(guard, src + data_block_size, 1322 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1323 } else { 1324 memcpy(dst, src, data_block_size); 1325 } 1326 1327 rc = _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1328 if (rc != 0) { 1329 return rc; 1330 } 1331 1332 _dif_sgl_advance(src_sgl, ctx->block_size); 1333 _dif_sgl_advance(dst_sgl, data_block_size); 1334 1335 return 0; 1336 } 1337 1338 static int 1339 dif_verify_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1340 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1341 struct spdk_dif_error *err_blk) 1342 { 1343 uint32_t offset_blocks; 1344 int rc; 1345 1346 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1347 rc = _dif_verify_copy(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1348 if (rc != 0) { 1349 return rc; 1350 } 1351 } 1352 1353 return 0; 1354 } 1355 1356 static int 1357 _dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1358 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1359 struct spdk_dif_error *err_blk) 1360 { 1361 uint32_t data_block_size; 1362 uint64_t guard = 0; 1363 struct spdk_dif dif = {}; 1364 1365 data_block_size = ctx->block_size - ctx->md_size; 1366 1367 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1368 guard = _dif_generate_guard_copy_split(ctx->guard_seed, dst_sgl, src_sgl, 1369 data_block_size, ctx->dif_pi_format); 1370 guard = dif_generate_guard_split(guard, src_sgl, data_block_size, 1371 ctx->guard_interval - data_block_size, ctx); 1372 } else { 1373 _data_copy_split(dst_sgl, src_sgl, data_block_size); 1374 _dif_sgl_advance(src_sgl, ctx->guard_interval - data_block_size); 1375 } 1376 1377 dif_load_split(src_sgl, &dif, ctx); 1378 1379 return _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 1380 } 1381 1382 static int 1383 dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1384 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1385 struct spdk_dif_error *err_blk) 1386 { 1387 uint32_t offset_blocks; 1388 int rc; 1389 1390 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1391 rc = _dif_verify_copy_split(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1392 if (rc != 0) { 1393 return rc; 1394 } 1395 } 1396 1397 return 0; 1398 } 1399 1400 static void 1401 _dif_disable_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1402 const struct spdk_dif_ctx *ctx) 1403 { 1404 uint32_t offset = 0, src_len, dst_len, buf_len, data_block_size; 1405 uint8_t *src, *dst; 1406 1407 data_block_size = ctx->block_size - ctx->md_size; 1408 1409 while (offset < data_block_size) { 1410 _dif_sgl_get_buf(src_sgl, &src, &src_len); 1411 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 1412 buf_len = spdk_min(src_len, dst_len); 1413 buf_len = spdk_min(buf_len, data_block_size - offset); 1414 1415 memcpy(dst, src, buf_len); 1416 1417 _dif_sgl_advance(src_sgl, buf_len); 1418 _dif_sgl_advance(dst_sgl, buf_len); 1419 offset += buf_len; 1420 } 1421 1422 _dif_sgl_advance(src_sgl, ctx->md_size); 1423 } 1424 1425 static void 1426 dif_disable_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1427 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1428 { 1429 uint32_t offset_blocks; 1430 1431 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1432 _dif_disable_strip_copy(src_sgl, dst_sgl, ctx); 1433 } 1434 } 1435 1436 int 1437 spdk_dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1438 int bounce_iovcnt, uint32_t num_blocks, 1439 const struct spdk_dif_ctx *ctx, 1440 struct spdk_dif_error *err_blk) 1441 { 1442 struct _dif_sgl src_sgl, dst_sgl; 1443 uint32_t data_block_size; 1444 1445 _dif_sgl_init(&src_sgl, bounce_iovs, bounce_iovcnt); 1446 _dif_sgl_init(&dst_sgl, iovs, iovcnt); 1447 1448 data_block_size = ctx->block_size - ctx->md_size; 1449 1450 if (!_dif_sgl_is_valid(&dst_sgl, data_block_size * num_blocks) || 1451 !_dif_sgl_is_valid(&src_sgl, ctx->block_size * num_blocks)) { 1452 SPDK_ERRLOG("Size of iovec arrays are not valid\n"); 1453 return -EINVAL; 1454 } 1455 1456 if (_dif_is_disabled(ctx->dif_type)) { 1457 dif_disable_strip_copy(&src_sgl, &dst_sgl, num_blocks, ctx); 1458 return 0; 1459 } 1460 1461 if (_dif_sgl_is_bytes_multiple(&dst_sgl, data_block_size) && 1462 _dif_sgl_is_bytes_multiple(&src_sgl, ctx->block_size)) { 1463 return dif_verify_copy(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1464 } else { 1465 return dif_verify_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1466 } 1467 } 1468 1469 static void 1470 _bit_flip(uint8_t *buf, uint32_t flip_bit) 1471 { 1472 uint8_t byte; 1473 1474 byte = *buf; 1475 byte ^= 1 << flip_bit; 1476 *buf = byte; 1477 } 1478 1479 static int 1480 _dif_inject_error(struct _dif_sgl *sgl, 1481 uint32_t block_size, uint32_t num_blocks, 1482 uint32_t inject_offset_blocks, 1483 uint32_t inject_offset_bytes, 1484 uint32_t inject_offset_bits) 1485 { 1486 uint32_t offset_in_block, buf_len; 1487 uint8_t *buf; 1488 1489 _dif_sgl_advance(sgl, block_size * inject_offset_blocks); 1490 1491 offset_in_block = 0; 1492 1493 while (offset_in_block < block_size) { 1494 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1495 buf_len = spdk_min(buf_len, block_size - offset_in_block); 1496 1497 if (inject_offset_bytes >= offset_in_block && 1498 inject_offset_bytes < offset_in_block + buf_len) { 1499 buf += inject_offset_bytes - offset_in_block; 1500 _bit_flip(buf, inject_offset_bits); 1501 return 0; 1502 } 1503 1504 _dif_sgl_advance(sgl, buf_len); 1505 offset_in_block += buf_len; 1506 } 1507 1508 return -1; 1509 } 1510 1511 static int 1512 dif_inject_error(struct _dif_sgl *sgl, uint32_t block_size, uint32_t num_blocks, 1513 uint32_t start_inject_bytes, uint32_t inject_range_bytes, 1514 uint32_t *inject_offset) 1515 { 1516 uint32_t inject_offset_blocks, inject_offset_bytes, inject_offset_bits; 1517 uint32_t offset_blocks; 1518 int rc; 1519 1520 srand(time(0)); 1521 1522 inject_offset_blocks = rand() % num_blocks; 1523 inject_offset_bytes = start_inject_bytes + (rand() % inject_range_bytes); 1524 inject_offset_bits = rand() % 8; 1525 1526 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1527 if (offset_blocks == inject_offset_blocks) { 1528 rc = _dif_inject_error(sgl, block_size, num_blocks, 1529 inject_offset_blocks, 1530 inject_offset_bytes, 1531 inject_offset_bits); 1532 if (rc == 0) { 1533 *inject_offset = inject_offset_blocks; 1534 } 1535 return rc; 1536 } 1537 } 1538 1539 return -1; 1540 } 1541 1542 int 1543 spdk_dif_inject_error(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1544 const struct spdk_dif_ctx *ctx, uint32_t inject_flags, 1545 uint32_t *inject_offset) 1546 { 1547 struct _dif_sgl sgl; 1548 int rc; 1549 1550 _dif_sgl_init(&sgl, iovs, iovcnt); 1551 1552 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1553 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1554 return -EINVAL; 1555 } 1556 1557 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1558 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1559 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1560 _dif_reftag_size(ctx->dif_pi_format), 1561 inject_offset); 1562 if (rc != 0) { 1563 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1564 return rc; 1565 } 1566 } 1567 1568 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1569 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1570 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1571 _dif_apptag_size(), 1572 inject_offset); 1573 if (rc != 0) { 1574 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1575 return rc; 1576 } 1577 } 1578 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1579 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1580 ctx->guard_interval, 1581 _dif_guard_size(ctx->dif_pi_format), 1582 inject_offset); 1583 if (rc != 0) { 1584 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1585 return rc; 1586 } 1587 } 1588 1589 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1590 /* If the DIF information is contained within the last 8/16 bytes of 1591 * metadata (depending on the PI format), then the CRC covers all metadata 1592 * bytes up to but excluding the last 8/16 bytes. But error injection does not 1593 * cover these metadata because classification is not determined yet. 1594 * 1595 * Note: Error injection to data block is expected to be detected as 1596 * guard error. 1597 */ 1598 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1599 0, 1600 ctx->block_size - ctx->md_size, 1601 inject_offset); 1602 if (rc != 0) { 1603 SPDK_ERRLOG("Failed to inject error to data block.\n"); 1604 return rc; 1605 } 1606 } 1607 1608 return 0; 1609 } 1610 1611 static void 1612 dix_generate(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1613 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1614 { 1615 uint32_t offset_blocks = 0; 1616 uint8_t *data_buf, *md_buf; 1617 uint64_t guard; 1618 1619 while (offset_blocks < num_blocks) { 1620 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1621 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1622 1623 guard = 0; 1624 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1625 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1626 ctx->dif_pi_format); 1627 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1628 ctx->dif_pi_format); 1629 } 1630 1631 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1632 1633 _dif_sgl_advance(data_sgl, ctx->block_size); 1634 _dif_sgl_advance(md_sgl, ctx->md_size); 1635 offset_blocks++; 1636 } 1637 } 1638 1639 static void 1640 _dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1641 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1642 { 1643 uint32_t offset_in_block, data_buf_len; 1644 uint8_t *data_buf, *md_buf; 1645 uint64_t guard = 0; 1646 1647 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1648 1649 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1650 guard = ctx->guard_seed; 1651 } 1652 offset_in_block = 0; 1653 1654 while (offset_in_block < ctx->block_size) { 1655 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1656 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1657 1658 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1659 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1660 ctx->dif_pi_format); 1661 } 1662 1663 _dif_sgl_advance(data_sgl, data_buf_len); 1664 offset_in_block += data_buf_len; 1665 } 1666 1667 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1668 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1669 ctx->dif_pi_format); 1670 } 1671 1672 _dif_sgl_advance(md_sgl, ctx->md_size); 1673 1674 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1675 } 1676 1677 static void 1678 dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1679 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1680 { 1681 uint32_t offset_blocks; 1682 1683 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1684 _dix_generate_split(data_sgl, md_sgl, offset_blocks, ctx); 1685 } 1686 } 1687 1688 int 1689 spdk_dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1690 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1691 { 1692 struct _dif_sgl data_sgl, md_sgl; 1693 1694 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1695 _dif_sgl_init(&md_sgl, md_iov, 1); 1696 1697 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1698 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1699 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1700 return -EINVAL; 1701 } 1702 1703 if (_dif_is_disabled(ctx->dif_type)) { 1704 return 0; 1705 } 1706 1707 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1708 dix_generate(&data_sgl, &md_sgl, num_blocks, ctx); 1709 } else { 1710 dix_generate_split(&data_sgl, &md_sgl, num_blocks, ctx); 1711 } 1712 1713 return 0; 1714 } 1715 1716 static int 1717 dix_verify(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1718 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1719 struct spdk_dif_error *err_blk) 1720 { 1721 uint32_t offset_blocks = 0; 1722 uint8_t *data_buf, *md_buf; 1723 uint64_t guard; 1724 int rc; 1725 1726 while (offset_blocks < num_blocks) { 1727 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1728 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1729 1730 guard = 0; 1731 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1732 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1733 ctx->dif_pi_format); 1734 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1735 ctx->dif_pi_format); 1736 } 1737 1738 rc = _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1739 if (rc != 0) { 1740 return rc; 1741 } 1742 1743 _dif_sgl_advance(data_sgl, ctx->block_size); 1744 _dif_sgl_advance(md_sgl, ctx->md_size); 1745 offset_blocks++; 1746 } 1747 1748 return 0; 1749 } 1750 1751 static int 1752 _dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1753 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1754 struct spdk_dif_error *err_blk) 1755 { 1756 uint32_t offset_in_block, data_buf_len; 1757 uint8_t *data_buf, *md_buf; 1758 uint64_t guard = 0; 1759 1760 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1761 1762 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1763 guard = ctx->guard_seed; 1764 } 1765 offset_in_block = 0; 1766 1767 while (offset_in_block < ctx->block_size) { 1768 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1769 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1770 1771 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1772 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1773 ctx->dif_pi_format); 1774 } 1775 1776 _dif_sgl_advance(data_sgl, data_buf_len); 1777 offset_in_block += data_buf_len; 1778 } 1779 1780 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1781 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1782 ctx->dif_pi_format); 1783 } 1784 1785 _dif_sgl_advance(md_sgl, ctx->md_size); 1786 1787 return _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1788 } 1789 1790 static int 1791 dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1792 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1793 struct spdk_dif_error *err_blk) 1794 { 1795 uint32_t offset_blocks; 1796 int rc; 1797 1798 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1799 rc = _dix_verify_split(data_sgl, md_sgl, offset_blocks, ctx, err_blk); 1800 if (rc != 0) { 1801 return rc; 1802 } 1803 } 1804 1805 return 0; 1806 } 1807 1808 int 1809 spdk_dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1810 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1811 struct spdk_dif_error *err_blk) 1812 { 1813 struct _dif_sgl data_sgl, md_sgl; 1814 1815 if (md_iov->iov_base == NULL) { 1816 SPDK_ERRLOG("Metadata buffer is NULL.\n"); 1817 return -EINVAL; 1818 } 1819 1820 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1821 _dif_sgl_init(&md_sgl, md_iov, 1); 1822 1823 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1824 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1825 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1826 return -EINVAL; 1827 } 1828 1829 if (_dif_is_disabled(ctx->dif_type)) { 1830 return 0; 1831 } 1832 1833 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1834 return dix_verify(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1835 } else { 1836 return dix_verify_split(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1837 } 1838 } 1839 1840 int 1841 spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1842 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1843 uint32_t inject_flags, uint32_t *inject_offset) 1844 { 1845 struct _dif_sgl data_sgl, md_sgl; 1846 int rc; 1847 1848 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1849 _dif_sgl_init(&md_sgl, md_iov, 1); 1850 1851 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1852 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1853 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1854 return -EINVAL; 1855 } 1856 1857 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1858 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1859 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1860 _dif_reftag_size(ctx->dif_pi_format), 1861 inject_offset); 1862 if (rc != 0) { 1863 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1864 return rc; 1865 } 1866 } 1867 1868 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1869 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1870 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1871 _dif_apptag_size(), 1872 inject_offset); 1873 if (rc != 0) { 1874 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1875 return rc; 1876 } 1877 } 1878 1879 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1880 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1881 ctx->guard_interval, 1882 _dif_guard_size(ctx->dif_pi_format), 1883 inject_offset); 1884 if (rc != 0) { 1885 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1886 return rc; 1887 } 1888 } 1889 1890 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1891 /* Note: Error injection to data block is expected to be detected 1892 * as guard error. 1893 */ 1894 rc = dif_inject_error(&data_sgl, ctx->block_size, num_blocks, 1895 0, 1896 ctx->block_size, 1897 inject_offset); 1898 if (rc != 0) { 1899 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1900 return rc; 1901 } 1902 } 1903 1904 return 0; 1905 } 1906 1907 static uint32_t 1908 _to_next_boundary(uint32_t offset, uint32_t boundary) 1909 { 1910 return boundary - (offset % boundary); 1911 } 1912 1913 static uint32_t 1914 _to_size_with_md(uint32_t size, uint32_t data_block_size, uint32_t block_size) 1915 { 1916 return (size / data_block_size) * block_size + (size % data_block_size); 1917 } 1918 1919 int 1920 spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int iovcnt, 1921 struct iovec *buf_iovs, int buf_iovcnt, 1922 uint32_t data_offset, uint32_t data_len, 1923 uint32_t *_mapped_len, 1924 const struct spdk_dif_ctx *ctx) 1925 { 1926 uint32_t data_block_size, data_unalign, buf_len, buf_offset, len; 1927 struct _dif_sgl dif_sgl; 1928 struct _dif_sgl buf_sgl; 1929 1930 if (iovs == NULL || iovcnt == 0 || buf_iovs == NULL || buf_iovcnt == 0) { 1931 return -EINVAL; 1932 } 1933 1934 data_block_size = ctx->block_size - ctx->md_size; 1935 1936 data_unalign = ctx->data_offset % data_block_size; 1937 1938 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1939 ctx->block_size); 1940 buf_len -= data_unalign; 1941 1942 _dif_sgl_init(&dif_sgl, iovs, iovcnt); 1943 _dif_sgl_init(&buf_sgl, buf_iovs, buf_iovcnt); 1944 1945 if (!_dif_sgl_is_valid(&buf_sgl, buf_len)) { 1946 SPDK_ERRLOG("Buffer overflow will occur.\n"); 1947 return -ERANGE; 1948 } 1949 1950 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1951 buf_offset -= data_unalign; 1952 1953 _dif_sgl_advance(&buf_sgl, buf_offset); 1954 1955 while (data_len != 0) { 1956 len = spdk_min(data_len, _to_next_boundary(ctx->data_offset + data_offset, data_block_size)); 1957 if (!_dif_sgl_append_split(&dif_sgl, &buf_sgl, len)) { 1958 break; 1959 } 1960 _dif_sgl_advance(&buf_sgl, ctx->md_size); 1961 data_offset += len; 1962 data_len -= len; 1963 } 1964 1965 if (_mapped_len != NULL) { 1966 *_mapped_len = dif_sgl.total_size; 1967 } 1968 1969 return iovcnt - dif_sgl.iovcnt; 1970 } 1971 1972 static int 1973 _dif_sgl_setup_stream(struct _dif_sgl *sgl, uint32_t *_buf_offset, uint32_t *_buf_len, 1974 uint32_t data_offset, uint32_t data_len, 1975 const struct spdk_dif_ctx *ctx) 1976 { 1977 uint32_t data_block_size, data_unalign, buf_len, buf_offset; 1978 1979 data_block_size = ctx->block_size - ctx->md_size; 1980 1981 data_unalign = ctx->data_offset % data_block_size; 1982 1983 /* If the last data block is complete, DIF of the data block is 1984 * inserted or verified in this turn. 1985 */ 1986 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1987 ctx->block_size); 1988 buf_len -= data_unalign; 1989 1990 if (!_dif_sgl_is_valid(sgl, buf_len)) { 1991 return -ERANGE; 1992 } 1993 1994 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1995 buf_offset -= data_unalign; 1996 1997 _dif_sgl_advance(sgl, buf_offset); 1998 buf_len -= buf_offset; 1999 2000 buf_offset += data_unalign; 2001 2002 *_buf_offset = buf_offset; 2003 *_buf_len = buf_len; 2004 2005 return 0; 2006 } 2007 2008 int 2009 spdk_dif_generate_stream(struct iovec *iovs, int iovcnt, 2010 uint32_t data_offset, uint32_t data_len, 2011 struct spdk_dif_ctx *ctx) 2012 { 2013 uint32_t buf_len = 0, buf_offset = 0; 2014 uint32_t len, offset_in_block, offset_blocks; 2015 uint64_t guard = 0; 2016 struct _dif_sgl sgl; 2017 int rc; 2018 2019 if (iovs == NULL || iovcnt == 0) { 2020 return -EINVAL; 2021 } 2022 2023 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2024 guard = ctx->last_guard; 2025 } 2026 2027 _dif_sgl_init(&sgl, iovs, iovcnt); 2028 2029 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2030 if (rc != 0) { 2031 return rc; 2032 } 2033 2034 while (buf_len != 0) { 2035 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2036 offset_in_block = buf_offset % ctx->block_size; 2037 offset_blocks = buf_offset / ctx->block_size; 2038 2039 guard = _dif_generate_split(&sgl, offset_in_block, len, guard, offset_blocks, ctx); 2040 2041 buf_len -= len; 2042 buf_offset += len; 2043 } 2044 2045 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2046 ctx->last_guard = guard; 2047 } 2048 2049 return 0; 2050 } 2051 2052 int 2053 spdk_dif_verify_stream(struct iovec *iovs, int iovcnt, 2054 uint32_t data_offset, uint32_t data_len, 2055 struct spdk_dif_ctx *ctx, 2056 struct spdk_dif_error *err_blk) 2057 { 2058 uint32_t buf_len = 0, buf_offset = 0; 2059 uint32_t len, offset_in_block, offset_blocks; 2060 uint64_t guard = 0; 2061 struct _dif_sgl sgl; 2062 int rc = 0; 2063 2064 if (iovs == NULL || iovcnt == 0) { 2065 return -EINVAL; 2066 } 2067 2068 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2069 guard = ctx->last_guard; 2070 } 2071 2072 _dif_sgl_init(&sgl, iovs, iovcnt); 2073 2074 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2075 if (rc != 0) { 2076 return rc; 2077 } 2078 2079 while (buf_len != 0) { 2080 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2081 offset_in_block = buf_offset % ctx->block_size; 2082 offset_blocks = buf_offset / ctx->block_size; 2083 2084 rc = _dif_verify_split(&sgl, offset_in_block, len, &guard, offset_blocks, 2085 ctx, err_blk); 2086 if (rc != 0) { 2087 goto error; 2088 } 2089 2090 buf_len -= len; 2091 buf_offset += len; 2092 } 2093 2094 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2095 ctx->last_guard = guard; 2096 } 2097 error: 2098 return rc; 2099 } 2100 2101 int 2102 spdk_dif_update_crc32c_stream(struct iovec *iovs, int iovcnt, 2103 uint32_t data_offset, uint32_t data_len, 2104 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 2105 { 2106 uint32_t buf_len = 0, buf_offset = 0, len, offset_in_block; 2107 uint32_t crc32c; 2108 struct _dif_sgl sgl; 2109 int rc; 2110 2111 if (iovs == NULL || iovcnt == 0) { 2112 return -EINVAL; 2113 } 2114 2115 crc32c = *_crc32c; 2116 _dif_sgl_init(&sgl, iovs, iovcnt); 2117 2118 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2119 if (rc != 0) { 2120 return rc; 2121 } 2122 2123 while (buf_len != 0) { 2124 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2125 offset_in_block = buf_offset % ctx->block_size; 2126 2127 crc32c = _dif_update_crc32c_split(&sgl, offset_in_block, len, crc32c, ctx); 2128 2129 buf_len -= len; 2130 buf_offset += len; 2131 } 2132 2133 *_crc32c = crc32c; 2134 2135 return 0; 2136 } 2137 2138 void 2139 spdk_dif_get_range_with_md(uint32_t data_offset, uint32_t data_len, 2140 uint32_t *_buf_offset, uint32_t *_buf_len, 2141 const struct spdk_dif_ctx *ctx) 2142 { 2143 uint32_t data_block_size, data_unalign, buf_offset, buf_len; 2144 2145 if (!ctx->md_interleave) { 2146 buf_offset = data_offset; 2147 buf_len = data_len; 2148 } else { 2149 data_block_size = ctx->block_size - ctx->md_size; 2150 2151 data_unalign = data_offset % data_block_size; 2152 2153 buf_offset = _to_size_with_md(data_offset, data_block_size, ctx->block_size); 2154 buf_len = _to_size_with_md(data_unalign + data_len, data_block_size, ctx->block_size) - 2155 data_unalign; 2156 } 2157 2158 if (_buf_offset != NULL) { 2159 *_buf_offset = buf_offset; 2160 } 2161 2162 if (_buf_len != NULL) { 2163 *_buf_len = buf_len; 2164 } 2165 } 2166 2167 uint32_t 2168 spdk_dif_get_length_with_md(uint32_t data_len, const struct spdk_dif_ctx *ctx) 2169 { 2170 uint32_t data_block_size; 2171 2172 if (!ctx->md_interleave) { 2173 return data_len; 2174 } else { 2175 data_block_size = ctx->block_size - ctx->md_size; 2176 2177 return _to_size_with_md(data_len, data_block_size, ctx->block_size); 2178 } 2179 } 2180 2181 static int 2182 _dif_remap_ref_tag(struct _dif_sgl *sgl, uint32_t offset_blocks, 2183 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2184 bool check_ref_tag) 2185 { 2186 uint32_t offset, buf_len; 2187 uint64_t expected = 0, remapped; 2188 uint8_t *buf; 2189 struct _dif_sgl tmp_sgl; 2190 struct spdk_dif dif; 2191 2192 /* Fast forward to DIF field. */ 2193 _dif_sgl_advance(sgl, ctx->guard_interval); 2194 _dif_sgl_copy(&tmp_sgl, sgl); 2195 2196 /* Copy the split DIF field to the temporary DIF buffer */ 2197 offset = 0; 2198 while (offset < _dif_size(ctx->dif_pi_format)) { 2199 _dif_sgl_get_buf(sgl, &buf, &buf_len); 2200 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2201 2202 memcpy((uint8_t *)&dif + offset, buf, buf_len); 2203 2204 _dif_sgl_advance(sgl, buf_len); 2205 offset += buf_len; 2206 } 2207 2208 if (_dif_ignore(&dif, ctx)) { 2209 goto end; 2210 } 2211 2212 /* For type 1 and 2, the Reference Tag is incremented for each 2213 * subsequent logical block. For type 3, the Reference Tag 2214 * remains the same as the initial Reference Tag. 2215 */ 2216 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2217 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2218 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2219 } else { 2220 remapped = ctx->remapped_init_ref_tag; 2221 } 2222 2223 /* Verify the stored Reference Tag. */ 2224 if (check_ref_tag && !_dif_reftag_check(&dif, ctx, expected, offset_blocks, err_blk)) { 2225 return -1; 2226 } 2227 2228 /* Update the stored Reference Tag to the remapped one. */ 2229 _dif_set_reftag(&dif, remapped, ctx->dif_pi_format); 2230 2231 offset = 0; 2232 while (offset < _dif_size(ctx->dif_pi_format)) { 2233 _dif_sgl_get_buf(&tmp_sgl, &buf, &buf_len); 2234 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2235 2236 memcpy(buf, (uint8_t *)&dif + offset, buf_len); 2237 2238 _dif_sgl_advance(&tmp_sgl, buf_len); 2239 offset += buf_len; 2240 } 2241 2242 end: 2243 _dif_sgl_advance(sgl, ctx->block_size - ctx->guard_interval - _dif_size(ctx->dif_pi_format)); 2244 2245 return 0; 2246 } 2247 2248 int 2249 spdk_dif_remap_ref_tag(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 2250 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2251 bool check_ref_tag) 2252 { 2253 struct _dif_sgl sgl; 2254 uint32_t offset_blocks; 2255 int rc; 2256 2257 _dif_sgl_init(&sgl, iovs, iovcnt); 2258 2259 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 2260 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 2261 return -EINVAL; 2262 } 2263 2264 if (_dif_is_disabled(ctx->dif_type)) { 2265 return 0; 2266 } 2267 2268 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2269 return 0; 2270 } 2271 2272 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2273 rc = _dif_remap_ref_tag(&sgl, offset_blocks, ctx, err_blk, check_ref_tag); 2274 if (rc != 0) { 2275 return rc; 2276 } 2277 } 2278 2279 return 0; 2280 } 2281 2282 static int 2283 _dix_remap_ref_tag(struct _dif_sgl *md_sgl, uint32_t offset_blocks, 2284 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2285 bool check_ref_tag) 2286 { 2287 uint64_t expected = 0, remapped; 2288 uint8_t *md_buf; 2289 struct spdk_dif *dif; 2290 2291 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 2292 2293 dif = (struct spdk_dif *)(md_buf + ctx->guard_interval); 2294 2295 if (_dif_ignore(dif, ctx)) { 2296 goto end; 2297 } 2298 2299 /* For type 1 and 2, the Reference Tag is incremented for each 2300 * subsequent logical block. For type 3, the Reference Tag 2301 * remains the same as the initialReference Tag. 2302 */ 2303 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2304 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2305 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2306 } else { 2307 remapped = ctx->remapped_init_ref_tag; 2308 } 2309 2310 /* Verify the stored Reference Tag. */ 2311 if (check_ref_tag && !_dif_reftag_check(dif, ctx, expected, offset_blocks, err_blk)) { 2312 return -1; 2313 } 2314 2315 /* Update the stored Reference Tag to the remapped one. */ 2316 _dif_set_reftag(dif, remapped, ctx->dif_pi_format); 2317 2318 end: 2319 _dif_sgl_advance(md_sgl, ctx->md_size); 2320 2321 return 0; 2322 } 2323 2324 int 2325 spdk_dix_remap_ref_tag(struct iovec *md_iov, uint32_t num_blocks, 2326 const struct spdk_dif_ctx *ctx, 2327 struct spdk_dif_error *err_blk, 2328 bool check_ref_tag) 2329 { 2330 struct _dif_sgl md_sgl; 2331 uint32_t offset_blocks; 2332 int rc; 2333 2334 _dif_sgl_init(&md_sgl, md_iov, 1); 2335 2336 if (!_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 2337 SPDK_ERRLOG("Size of metadata iovec array is not valid.\n"); 2338 return -EINVAL; 2339 } 2340 2341 if (_dif_is_disabled(ctx->dif_type)) { 2342 return 0; 2343 } 2344 2345 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2346 return 0; 2347 } 2348 2349 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2350 rc = _dix_remap_ref_tag(&md_sgl, offset_blocks, ctx, err_blk, check_ref_tag); 2351 if (rc != 0) { 2352 return rc; 2353 } 2354 } 2355 2356 return 0; 2357 } 2358