1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/dif.h" 7 #include "spdk/crc16.h" 8 #include "spdk/crc32.h" 9 #include "spdk/crc64.h" 10 #include "spdk/endian.h" 11 #include "spdk/log.h" 12 #include "spdk/util.h" 13 14 #define REFTAG_MASK_16 0x00000000FFFFFFFF 15 #define REFTAG_MASK_32 0xFFFFFFFFFFFFFFFF 16 #define REFTAG_MASK_64 0x0000FFFFFFFFFFFF 17 18 /* The variable size Storage Tag and Reference Tag is not supported yet, 19 * so the maximum size of the Reference Tag is assumed. 20 */ 21 struct spdk_dif { 22 union { 23 struct { 24 uint16_t guard; 25 uint16_t app_tag; 26 uint32_t stor_ref_space; 27 } g16; 28 struct { 29 uint32_t guard; 30 uint16_t app_tag; 31 uint16_t stor_ref_space_p1; 32 uint64_t stor_ref_space_p2; 33 } g32; 34 struct { 35 uint64_t guard; 36 uint16_t app_tag; 37 uint16_t stor_ref_space_p1; 38 uint32_t stor_ref_space_p2; 39 } g64; 40 }; 41 }; 42 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g16) == 8, "Incorrect size"); 43 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g32) == 16, "Incorrect size"); 44 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g64) == 16, "Incorrect size"); 45 46 /* Context to iterate or create a iovec array. 47 * Each sgl is either iterated or created at a time. 48 */ 49 struct _dif_sgl { 50 /* Current iovec in the iteration or creation */ 51 struct iovec *iov; 52 53 /* Remaining count of iovecs in the iteration or creation. */ 54 int iovcnt; 55 56 /* Current offset in the iovec */ 57 uint32_t iov_offset; 58 59 /* Size of the created iovec array in bytes */ 60 uint32_t total_size; 61 }; 62 63 static inline void 64 _dif_sgl_init(struct _dif_sgl *s, struct iovec *iovs, int iovcnt) 65 { 66 s->iov = iovs; 67 s->iovcnt = iovcnt; 68 s->iov_offset = 0; 69 s->total_size = 0; 70 } 71 72 static void 73 _dif_sgl_advance(struct _dif_sgl *s, uint32_t step) 74 { 75 s->iov_offset += step; 76 while (s->iovcnt != 0) { 77 if (s->iov_offset < s->iov->iov_len) { 78 break; 79 } 80 81 s->iov_offset -= s->iov->iov_len; 82 s->iov++; 83 s->iovcnt--; 84 } 85 } 86 87 static inline void 88 _dif_sgl_get_buf(struct _dif_sgl *s, uint8_t **_buf, uint32_t *_buf_len) 89 { 90 if (_buf != NULL) { 91 *_buf = (uint8_t *)s->iov->iov_base + s->iov_offset; 92 } 93 if (_buf_len != NULL) { 94 *_buf_len = s->iov->iov_len - s->iov_offset; 95 } 96 } 97 98 static inline bool 99 _dif_sgl_append(struct _dif_sgl *s, uint8_t *data, uint32_t data_len) 100 { 101 assert(s->iovcnt > 0); 102 s->iov->iov_base = data; 103 s->iov->iov_len = data_len; 104 s->total_size += data_len; 105 s->iov++; 106 s->iovcnt--; 107 108 if (s->iovcnt > 0) { 109 return true; 110 } else { 111 return false; 112 } 113 } 114 115 static inline bool 116 _dif_sgl_append_split(struct _dif_sgl *dst, struct _dif_sgl *src, uint32_t data_len) 117 { 118 uint8_t *buf; 119 uint32_t buf_len; 120 121 while (data_len != 0) { 122 _dif_sgl_get_buf(src, &buf, &buf_len); 123 buf_len = spdk_min(buf_len, data_len); 124 125 if (!_dif_sgl_append(dst, buf, buf_len)) { 126 return false; 127 } 128 129 _dif_sgl_advance(src, buf_len); 130 data_len -= buf_len; 131 } 132 133 return true; 134 } 135 136 /* This function must be used before starting iteration. */ 137 static bool 138 _dif_sgl_is_bytes_multiple(struct _dif_sgl *s, uint32_t bytes) 139 { 140 int i; 141 142 for (i = 0; i < s->iovcnt; i++) { 143 if (s->iov[i].iov_len % bytes) { 144 return false; 145 } 146 } 147 148 return true; 149 } 150 151 /* This function must be used before starting iteration. */ 152 static bool 153 _dif_sgl_is_valid(struct _dif_sgl *s, uint32_t bytes) 154 { 155 uint64_t total = 0; 156 int i; 157 158 for (i = 0; i < s->iovcnt; i++) { 159 total += s->iov[i].iov_len; 160 } 161 162 return total >= bytes; 163 } 164 165 static void 166 _dif_sgl_copy(struct _dif_sgl *to, struct _dif_sgl *from) 167 { 168 memcpy(to, from, sizeof(struct _dif_sgl)); 169 } 170 171 static bool 172 _dif_is_disabled(enum spdk_dif_type dif_type) 173 { 174 if (dif_type == SPDK_DIF_DISABLE) { 175 return true; 176 } else { 177 return false; 178 } 179 } 180 181 static inline size_t 182 _dif_size(enum spdk_dif_pi_format dif_pi_format) 183 { 184 uint8_t size; 185 186 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 187 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16); 188 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 189 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32); 190 } else { 191 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64); 192 } 193 194 return size; 195 } 196 197 static uint32_t 198 _get_guard_interval(uint32_t block_size, uint32_t md_size, bool dif_loc, bool md_interleave, 199 size_t dif_size) 200 { 201 if (!dif_loc) { 202 /* For metadata formats with more than 8/16 bytes (depending on 203 * the PI format), if the DIF is contained in the last 8/16 bytes 204 * of metadata, then the CRC covers all metadata up to but excluding 205 * these last 8/16 bytes. 206 */ 207 if (md_interleave) { 208 return block_size - dif_size; 209 } else { 210 return md_size - dif_size; 211 } 212 } else { 213 /* For metadata formats with more than 8/16 bytes (depending on 214 * the PI format), if the DIF is contained in the first 8/16 bytes 215 * of metadata, then the CRC does not cover any metadata. 216 */ 217 if (md_interleave) { 218 return block_size - md_size; 219 } else { 220 return 0; 221 } 222 } 223 } 224 225 static inline uint8_t 226 _dif_guard_size(enum spdk_dif_pi_format dif_pi_format) 227 { 228 uint8_t size; 229 230 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 231 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.guard); 232 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 233 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.guard); 234 } else { 235 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.guard); 236 } 237 238 return size; 239 } 240 241 static inline void 242 _dif_set_guard(struct spdk_dif *dif, uint64_t guard, enum spdk_dif_pi_format dif_pi_format) 243 { 244 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 245 to_be16(&(dif->g16.guard), (uint16_t)guard); 246 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 247 to_be32(&(dif->g32.guard), (uint32_t)guard); 248 } else { 249 to_be64(&(dif->g64.guard), guard); 250 } 251 } 252 253 static inline uint64_t 254 _dif_get_guard(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 255 { 256 uint64_t guard; 257 258 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 259 guard = (uint64_t)from_be16(&(dif->g16.guard)); 260 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 261 guard = (uint64_t)from_be32(&(dif->g32.guard)); 262 } else { 263 guard = from_be64(&(dif->g64.guard)); 264 } 265 266 return guard; 267 } 268 269 static inline uint64_t 270 _dif_generate_guard(uint64_t guard_seed, void *buf, size_t buf_len, 271 enum spdk_dif_pi_format dif_pi_format) 272 { 273 uint64_t guard; 274 275 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 276 guard = (uint64_t)spdk_crc16_t10dif((uint16_t)guard_seed, buf, buf_len); 277 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 278 guard = (uint64_t)spdk_crc32c_nvme(buf, buf_len, guard_seed); 279 } else { 280 guard = spdk_crc64_nvme(buf, buf_len, guard_seed); 281 } 282 283 return guard; 284 } 285 286 static uint64_t 287 dif_generate_guard_split(uint64_t guard_seed, struct _dif_sgl *sgl, uint32_t start, 288 uint32_t len, const struct spdk_dif_ctx *ctx) 289 { 290 uint64_t guard = guard_seed; 291 uint32_t offset, end, buf_len; 292 uint8_t *buf; 293 294 offset = start; 295 end = start + spdk_min(len, ctx->guard_interval - start); 296 297 while (offset < end) { 298 _dif_sgl_get_buf(sgl, &buf, &buf_len); 299 buf_len = spdk_min(buf_len, end - offset); 300 301 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 302 guard = _dif_generate_guard(guard, buf, buf_len, ctx->dif_pi_format); 303 } 304 305 _dif_sgl_advance(sgl, buf_len); 306 offset += buf_len; 307 } 308 309 return guard; 310 } 311 312 static inline uint64_t 313 _dif_generate_guard_copy(uint64_t guard_seed, void *dst, void *src, size_t buf_len, 314 enum spdk_dif_pi_format dif_pi_format) 315 { 316 uint64_t guard; 317 318 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 319 guard = (uint64_t)spdk_crc16_t10dif_copy((uint16_t)guard_seed, dst, src, buf_len); 320 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 321 memcpy(dst, src, buf_len); 322 guard = (uint64_t)spdk_crc32c_nvme(src, buf_len, guard_seed); 323 } else { 324 memcpy(dst, src, buf_len); 325 guard = spdk_crc64_nvme(src, buf_len, guard_seed); 326 } 327 328 return guard; 329 } 330 331 static uint64_t 332 _dif_generate_guard_copy_split(uint64_t guard, struct _dif_sgl *dst_sgl, 333 struct _dif_sgl *src_sgl, uint32_t data_len, 334 enum spdk_dif_pi_format dif_pi_format) 335 { 336 uint32_t offset = 0, src_len, dst_len, buf_len; 337 uint8_t *src, *dst; 338 339 while (offset < data_len) { 340 _dif_sgl_get_buf(src_sgl, &src, &src_len); 341 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 342 buf_len = spdk_min(src_len, dst_len); 343 buf_len = spdk_min(buf_len, data_len - offset); 344 345 guard = _dif_generate_guard_copy(guard, dst, src, buf_len, dif_pi_format); 346 347 _dif_sgl_advance(src_sgl, buf_len); 348 _dif_sgl_advance(dst_sgl, buf_len); 349 offset += buf_len; 350 } 351 352 return guard; 353 } 354 355 static void 356 _data_copy_split(struct _dif_sgl *dst_sgl, struct _dif_sgl *src_sgl, uint32_t data_len) 357 { 358 uint32_t offset = 0, src_len, dst_len, buf_len; 359 uint8_t *src, *dst; 360 361 while (offset < data_len) { 362 _dif_sgl_get_buf(src_sgl, &src, &src_len); 363 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 364 buf_len = spdk_min(src_len, dst_len); 365 buf_len = spdk_min(buf_len, data_len - offset); 366 367 memcpy(dst, src, buf_len); 368 369 _dif_sgl_advance(src_sgl, buf_len); 370 _dif_sgl_advance(dst_sgl, buf_len); 371 offset += buf_len; 372 } 373 } 374 375 static inline uint8_t 376 _dif_apptag_offset(enum spdk_dif_pi_format dif_pi_format) 377 { 378 return _dif_guard_size(dif_pi_format); 379 } 380 381 static inline uint8_t 382 _dif_apptag_size(void) 383 { 384 return SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.app_tag); 385 } 386 387 static inline void 388 _dif_set_apptag(struct spdk_dif *dif, uint16_t app_tag, enum spdk_dif_pi_format dif_pi_format) 389 { 390 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 391 to_be16(&(dif->g16.app_tag), app_tag); 392 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 393 to_be16(&(dif->g32.app_tag), app_tag); 394 } else { 395 to_be16(&(dif->g64.app_tag), app_tag); 396 } 397 } 398 399 static inline uint16_t 400 _dif_get_apptag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 401 { 402 uint16_t app_tag; 403 404 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 405 app_tag = from_be16(&(dif->g16.app_tag)); 406 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 407 app_tag = from_be16(&(dif->g32.app_tag)); 408 } else { 409 app_tag = from_be16(&(dif->g64.app_tag)); 410 } 411 412 return app_tag; 413 } 414 415 static inline bool 416 _dif_apptag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 417 { 418 return _dif_get_apptag(dif, dif_pi_format) == SPDK_DIF_APPTAG_IGNORE; 419 } 420 421 static inline uint8_t 422 _dif_reftag_offset(enum spdk_dif_pi_format dif_pi_format) 423 { 424 uint8_t offset; 425 426 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 427 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 428 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 429 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size() 430 + SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p1); 431 } else { 432 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 433 } 434 435 return offset; 436 } 437 438 static inline uint8_t 439 _dif_reftag_size(enum spdk_dif_pi_format dif_pi_format) 440 { 441 uint8_t size; 442 443 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 444 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.stor_ref_space); 445 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 446 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p2); 447 } else { 448 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p1) + 449 SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p2); 450 } 451 452 return size; 453 } 454 455 static inline void 456 _dif_set_reftag(struct spdk_dif *dif, uint64_t ref_tag, enum spdk_dif_pi_format dif_pi_format) 457 { 458 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 459 to_be32(&(dif->g16.stor_ref_space), (uint32_t)ref_tag); 460 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 461 to_be64(&(dif->g32.stor_ref_space_p2), ref_tag); 462 } else { 463 to_be16(&(dif->g64.stor_ref_space_p1), (uint16_t)(ref_tag >> 32)); 464 to_be32(&(dif->g64.stor_ref_space_p2), (uint32_t)ref_tag); 465 } 466 } 467 468 static inline uint64_t 469 _dif_get_reftag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 470 { 471 uint64_t ref_tag; 472 473 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 474 ref_tag = (uint64_t)from_be32(&(dif->g16.stor_ref_space)); 475 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 476 ref_tag = from_be64(&(dif->g32.stor_ref_space_p2)); 477 } else { 478 ref_tag = (uint64_t)from_be16(&(dif->g64.stor_ref_space_p1)); 479 ref_tag <<= 32; 480 ref_tag |= (uint64_t)from_be32(&(dif->g64.stor_ref_space_p2)); 481 } 482 483 return ref_tag; 484 } 485 486 static inline bool 487 _dif_reftag_match(struct spdk_dif *dif, uint64_t ref_tag, 488 enum spdk_dif_pi_format dif_pi_format) 489 { 490 uint64_t _ref_tag; 491 bool match; 492 493 _ref_tag = _dif_get_reftag(dif, dif_pi_format); 494 495 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 496 match = (_ref_tag == (ref_tag & REFTAG_MASK_16)); 497 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 498 match = (_ref_tag == ref_tag); 499 } else { 500 match = (_ref_tag == (ref_tag & REFTAG_MASK_64)); 501 } 502 503 return match; 504 } 505 506 static inline bool 507 _dif_reftag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 508 { 509 return _dif_reftag_match(dif, REFTAG_MASK_32, dif_pi_format); 510 } 511 512 static bool 513 _dif_ignore(struct spdk_dif *dif, const struct spdk_dif_ctx *ctx) 514 { 515 switch (ctx->dif_type) { 516 case SPDK_DIF_TYPE1: 517 case SPDK_DIF_TYPE2: 518 /* If Type 1 or 2 is used, then all DIF checks are disabled when 519 * the Application Tag is 0xFFFF. 520 */ 521 if (_dif_apptag_ignore(dif, ctx->dif_pi_format)) { 522 return true; 523 } 524 break; 525 case SPDK_DIF_TYPE3: 526 /* If Type 3 is used, then all DIF checks are disabled when the 527 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF 528 * or 0xFFFFFFFFFFFFFFFF depending on the PI format. 529 */ 530 531 if (_dif_apptag_ignore(dif, ctx->dif_pi_format) && 532 _dif_reftag_ignore(dif, ctx->dif_pi_format)) { 533 return true; 534 } 535 break; 536 default: 537 break; 538 } 539 540 return false; 541 } 542 543 static bool 544 _dif_pi_format_is_valid(enum spdk_dif_pi_format dif_pi_format) 545 { 546 switch (dif_pi_format) { 547 case SPDK_DIF_PI_FORMAT_16: 548 case SPDK_DIF_PI_FORMAT_32: 549 case SPDK_DIF_PI_FORMAT_64: 550 return true; 551 default: 552 return false; 553 } 554 } 555 556 static bool 557 _dif_type_is_valid(enum spdk_dif_type dif_type) 558 { 559 switch (dif_type) { 560 case SPDK_DIF_DISABLE: 561 case SPDK_DIF_TYPE1: 562 case SPDK_DIF_TYPE2: 563 case SPDK_DIF_TYPE3: 564 return true; 565 default: 566 return false; 567 } 568 } 569 570 int 571 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size, 572 bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags, 573 uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag, 574 uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts) 575 { 576 uint32_t data_block_size; 577 enum spdk_dif_pi_format dif_pi_format = SPDK_DIF_PI_FORMAT_16; 578 579 if (opts != NULL) { 580 if (!_dif_pi_format_is_valid(opts->dif_pi_format)) { 581 SPDK_ERRLOG("No valid DIF PI format provided.\n"); 582 return -EINVAL; 583 } 584 585 dif_pi_format = opts->dif_pi_format; 586 } 587 588 if (!_dif_type_is_valid(dif_type)) { 589 SPDK_ERRLOG("No valid DIF type was provided.\n"); 590 return -EINVAL; 591 } 592 593 if (md_size < _dif_size(dif_pi_format)) { 594 SPDK_ERRLOG("Metadata size is smaller than DIF size.\n"); 595 return -EINVAL; 596 } 597 598 if (md_interleave) { 599 if (block_size < md_size) { 600 SPDK_ERRLOG("Block size is smaller than DIF size.\n"); 601 return -EINVAL; 602 } 603 data_block_size = block_size - md_size; 604 } else { 605 data_block_size = block_size; 606 } 607 608 if (data_block_size == 0) { 609 SPDK_ERRLOG("Zero data block size is not allowed\n"); 610 return -EINVAL; 611 } 612 613 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 614 if ((data_block_size % 512) != 0) { 615 SPDK_ERRLOG("Data block size should be a multiple of 512B\n"); 616 return -EINVAL; 617 } 618 } else { 619 if ((data_block_size % 4096) != 0) { 620 SPDK_ERRLOG("Data block size should be a multiple of 4kB\n"); 621 return -EINVAL; 622 } 623 } 624 625 ctx->block_size = block_size; 626 ctx->md_size = md_size; 627 ctx->md_interleave = md_interleave; 628 ctx->dif_pi_format = dif_pi_format; 629 ctx->guard_interval = _get_guard_interval(block_size, md_size, dif_loc, md_interleave, 630 _dif_size(ctx->dif_pi_format)); 631 ctx->dif_type = dif_type; 632 ctx->dif_flags = dif_flags; 633 ctx->init_ref_tag = init_ref_tag; 634 ctx->apptag_mask = apptag_mask; 635 ctx->app_tag = app_tag; 636 ctx->data_offset = data_offset; 637 ctx->ref_tag_offset = data_offset / data_block_size; 638 ctx->last_guard = guard_seed; 639 ctx->guard_seed = guard_seed; 640 ctx->remapped_init_ref_tag = 0; 641 642 return 0; 643 } 644 645 void 646 spdk_dif_ctx_set_data_offset(struct spdk_dif_ctx *ctx, uint32_t data_offset) 647 { 648 uint32_t data_block_size; 649 650 if (ctx->md_interleave) { 651 data_block_size = ctx->block_size - ctx->md_size; 652 } else { 653 data_block_size = ctx->block_size; 654 } 655 656 ctx->data_offset = data_offset; 657 ctx->ref_tag_offset = data_offset / data_block_size; 658 } 659 660 void 661 spdk_dif_ctx_set_remapped_init_ref_tag(struct spdk_dif_ctx *ctx, 662 uint32_t remapped_init_ref_tag) 663 { 664 ctx->remapped_init_ref_tag = remapped_init_ref_tag; 665 } 666 667 static void 668 _dif_generate(void *_dif, uint64_t guard, uint32_t offset_blocks, 669 const struct spdk_dif_ctx *ctx) 670 { 671 struct spdk_dif *dif = _dif; 672 uint64_t ref_tag; 673 674 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 675 _dif_set_guard(dif, guard, ctx->dif_pi_format); 676 } 677 678 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 679 _dif_set_apptag(dif, ctx->app_tag, ctx->dif_pi_format); 680 } 681 682 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 683 /* For type 1 and 2, the reference tag is incremented for each 684 * subsequent logical block. For type 3, the reference tag 685 * remains the same as the initial reference tag. 686 */ 687 if (ctx->dif_type != SPDK_DIF_TYPE3) { 688 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 689 } else { 690 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 691 } 692 693 /* Overwrite reference tag if initialization reference tag is SPDK_DIF_REFTAG_IGNORE */ 694 if (ctx->init_ref_tag == SPDK_DIF_REFTAG_IGNORE) { 695 if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 696 ref_tag = REFTAG_MASK_16; 697 } else if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 698 ref_tag = REFTAG_MASK_32; 699 } else { 700 ref_tag = REFTAG_MASK_64; 701 } 702 } 703 704 _dif_set_reftag(dif, ref_tag, ctx->dif_pi_format); 705 } 706 } 707 708 static void 709 dif_generate(struct _dif_sgl *sgl, uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 710 { 711 uint32_t offset_blocks; 712 uint8_t *buf; 713 uint64_t guard = 0; 714 715 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 716 _dif_sgl_get_buf(sgl, &buf, NULL); 717 718 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 719 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 720 } 721 722 _dif_generate(buf + ctx->guard_interval, guard, offset_blocks, ctx); 723 724 _dif_sgl_advance(sgl, ctx->block_size); 725 } 726 } 727 728 static void 729 dif_store_split(struct _dif_sgl *sgl, struct spdk_dif *dif, 730 const struct spdk_dif_ctx *ctx) 731 { 732 uint32_t offset = 0, rest_md_len, buf_len; 733 uint8_t *buf; 734 735 rest_md_len = ctx->block_size - ctx->guard_interval; 736 737 while (offset < rest_md_len) { 738 _dif_sgl_get_buf(sgl, &buf, &buf_len); 739 740 if (offset < _dif_size(ctx->dif_pi_format)) { 741 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 742 memcpy(buf, (uint8_t *)dif + offset, buf_len); 743 } else { 744 buf_len = spdk_min(buf_len, rest_md_len - offset); 745 } 746 747 _dif_sgl_advance(sgl, buf_len); 748 offset += buf_len; 749 } 750 } 751 752 static uint64_t 753 _dif_generate_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 754 uint64_t guard, uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 755 { 756 struct spdk_dif dif = {}; 757 758 assert(offset_in_block < ctx->guard_interval); 759 assert(offset_in_block + data_len < ctx->guard_interval || 760 offset_in_block + data_len == ctx->block_size); 761 762 /* Compute CRC over split logical block data. */ 763 guard = dif_generate_guard_split(guard, sgl, offset_in_block, data_len, ctx); 764 765 if (offset_in_block + data_len < ctx->guard_interval) { 766 return guard; 767 } 768 769 /* If a whole logical block data is parsed, generate DIF 770 * and save it to the temporary DIF area. 771 */ 772 _dif_generate(&dif, guard, offset_blocks, ctx); 773 774 /* Copy generated DIF field to the split DIF field, and then 775 * skip metadata field after DIF field (if any). 776 */ 777 dif_store_split(sgl, &dif, ctx); 778 779 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 780 guard = ctx->guard_seed; 781 } 782 783 return guard; 784 } 785 786 static void 787 dif_generate_split(struct _dif_sgl *sgl, uint32_t num_blocks, 788 const struct spdk_dif_ctx *ctx) 789 { 790 uint32_t offset_blocks; 791 uint64_t guard = 0; 792 793 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 794 guard = ctx->guard_seed; 795 } 796 797 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 798 _dif_generate_split(sgl, 0, ctx->block_size, guard, offset_blocks, ctx); 799 } 800 } 801 802 int 803 spdk_dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 804 const struct spdk_dif_ctx *ctx) 805 { 806 struct _dif_sgl sgl; 807 808 _dif_sgl_init(&sgl, iovs, iovcnt); 809 810 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 811 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 812 return -EINVAL; 813 } 814 815 if (_dif_is_disabled(ctx->dif_type)) { 816 return 0; 817 } 818 819 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 820 dif_generate(&sgl, num_blocks, ctx); 821 } else { 822 dif_generate_split(&sgl, num_blocks, ctx); 823 } 824 825 return 0; 826 } 827 828 static void 829 _dif_error_set(struct spdk_dif_error *err_blk, uint8_t err_type, 830 uint64_t expected, uint64_t actual, uint32_t err_offset) 831 { 832 if (err_blk) { 833 err_blk->err_type = err_type; 834 err_blk->expected = expected; 835 err_blk->actual = actual; 836 err_blk->err_offset = err_offset; 837 } 838 } 839 840 static bool 841 _dif_reftag_check(struct spdk_dif *dif, const struct spdk_dif_ctx *ctx, 842 uint64_t expected_reftag, uint32_t offset_blocks, struct spdk_dif_error *err_blk) 843 { 844 uint64_t reftag; 845 846 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 847 switch (ctx->dif_type) { 848 case SPDK_DIF_TYPE1: 849 case SPDK_DIF_TYPE2: 850 /* Compare the DIF Reference Tag field to the passed Reference Tag. 851 * The passed Reference Tag will be the least significant 4 bytes 852 * or 8 bytes (depending on the PI format) 853 * of the LBA when Type 1 is used, and application specific value 854 * if Type 2 is used. 855 */ 856 if (!_dif_reftag_match(dif, expected_reftag, ctx->dif_pi_format)) { 857 reftag = _dif_get_reftag(dif, ctx->dif_pi_format); 858 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected_reftag, 859 reftag, offset_blocks); 860 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu64 "," \ 861 " Expected=%lx, Actual=%lx\n", 862 expected_reftag, expected_reftag, reftag); 863 return false; 864 } 865 break; 866 case SPDK_DIF_TYPE3: 867 /* For Type 3, computed Reference Tag remains unchanged. 868 * Hence ignore the Reference Tag field. 869 */ 870 break; 871 default: 872 break; 873 } 874 } 875 876 return true; 877 } 878 879 static int 880 _dif_verify(void *_dif, uint64_t guard, uint32_t offset_blocks, 881 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 882 { 883 struct spdk_dif *dif = _dif; 884 uint64_t _guard; 885 uint16_t _app_tag; 886 uint64_t ref_tag; 887 888 if (_dif_ignore(dif, ctx)) { 889 return 0; 890 } 891 892 /* For type 1 and 2, the reference tag is incremented for each 893 * subsequent logical block. For type 3, the reference tag 894 * remains the same as the initial reference tag. 895 */ 896 if (ctx->dif_type != SPDK_DIF_TYPE3) { 897 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 898 } else { 899 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 900 } 901 902 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 903 /* Compare the DIF Guard field to the CRC computed over the logical 904 * block data. 905 */ 906 _guard = _dif_get_guard(dif, ctx->dif_pi_format); 907 if (_guard != guard) { 908 _dif_error_set(err_blk, SPDK_DIF_GUARD_ERROR, _guard, guard, 909 offset_blocks); 910 SPDK_ERRLOG("Failed to compare Guard: LBA=%" PRIu64 "," \ 911 " Expected=%lx, Actual=%lx\n", 912 ref_tag, _guard, guard); 913 return -1; 914 } 915 } 916 917 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 918 /* Compare unmasked bits in the DIF Application Tag field to the 919 * passed Application Tag. 920 */ 921 _app_tag = _dif_get_apptag(dif, ctx->dif_pi_format); 922 if ((_app_tag & ctx->apptag_mask) != (ctx->app_tag & ctx->apptag_mask)) { 923 _dif_error_set(err_blk, SPDK_DIF_APPTAG_ERROR, ctx->app_tag, 924 (_app_tag & ctx->apptag_mask), offset_blocks); 925 SPDK_ERRLOG("Failed to compare App Tag: LBA=%" PRIu64 "," \ 926 " Expected=%x, Actual=%x\n", 927 ref_tag, ctx->app_tag, (_app_tag & ctx->apptag_mask)); 928 return -1; 929 } 930 } 931 932 if (!_dif_reftag_check(dif, ctx, ref_tag, offset_blocks, err_blk)) { 933 return -1; 934 } 935 936 return 0; 937 } 938 939 static int 940 dif_verify(struct _dif_sgl *sgl, uint32_t num_blocks, 941 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 942 { 943 uint32_t offset_blocks; 944 int rc; 945 uint8_t *buf; 946 uint64_t guard = 0; 947 948 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 949 _dif_sgl_get_buf(sgl, &buf, NULL); 950 951 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 952 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 953 } 954 955 rc = _dif_verify(buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 956 if (rc != 0) { 957 return rc; 958 } 959 960 _dif_sgl_advance(sgl, ctx->block_size); 961 } 962 963 return 0; 964 } 965 966 static void 967 dif_load_split(struct _dif_sgl *sgl, struct spdk_dif *dif, 968 const struct spdk_dif_ctx *ctx) 969 { 970 uint32_t offset = 0, rest_md_len, buf_len; 971 uint8_t *buf; 972 973 rest_md_len = ctx->block_size - ctx->guard_interval; 974 975 while (offset < rest_md_len) { 976 _dif_sgl_get_buf(sgl, &buf, &buf_len); 977 978 if (offset < _dif_size(ctx->dif_pi_format)) { 979 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 980 memcpy((uint8_t *)dif + offset, buf, buf_len); 981 } else { 982 buf_len = spdk_min(buf_len, rest_md_len - offset); 983 } 984 985 _dif_sgl_advance(sgl, buf_len); 986 offset += buf_len; 987 } 988 } 989 990 static int 991 _dif_verify_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 992 uint64_t *_guard, uint32_t offset_blocks, 993 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 994 { 995 uint64_t guard = *_guard; 996 struct spdk_dif dif = {}; 997 int rc; 998 999 assert(_guard != NULL); 1000 assert(offset_in_block < ctx->guard_interval); 1001 assert(offset_in_block + data_len < ctx->guard_interval || 1002 offset_in_block + data_len == ctx->block_size); 1003 1004 guard = dif_generate_guard_split(guard, sgl, offset_in_block, data_len, ctx); 1005 1006 if (offset_in_block + data_len < ctx->guard_interval) { 1007 *_guard = guard; 1008 return 0; 1009 } 1010 1011 dif_load_split(sgl, &dif, ctx); 1012 1013 rc = _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 1014 if (rc != 0) { 1015 return rc; 1016 } 1017 1018 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1019 guard = ctx->guard_seed; 1020 } 1021 1022 *_guard = guard; 1023 return 0; 1024 } 1025 1026 static int 1027 dif_verify_split(struct _dif_sgl *sgl, uint32_t num_blocks, 1028 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1029 { 1030 uint32_t offset_blocks; 1031 uint64_t guard = 0; 1032 int rc; 1033 1034 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1035 guard = ctx->guard_seed; 1036 } 1037 1038 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1039 rc = _dif_verify_split(sgl, 0, ctx->block_size, &guard, offset_blocks, 1040 ctx, err_blk); 1041 if (rc != 0) { 1042 return rc; 1043 } 1044 } 1045 1046 return 0; 1047 } 1048 1049 int 1050 spdk_dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1051 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1052 { 1053 struct _dif_sgl sgl; 1054 1055 _dif_sgl_init(&sgl, iovs, iovcnt); 1056 1057 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1058 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1059 return -EINVAL; 1060 } 1061 1062 if (_dif_is_disabled(ctx->dif_type)) { 1063 return 0; 1064 } 1065 1066 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 1067 return dif_verify(&sgl, num_blocks, ctx, err_blk); 1068 } else { 1069 return dif_verify_split(&sgl, num_blocks, ctx, err_blk); 1070 } 1071 } 1072 1073 static uint32_t 1074 dif_update_crc32c(struct _dif_sgl *sgl, uint32_t num_blocks, 1075 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1076 { 1077 uint32_t offset_blocks; 1078 uint8_t *buf; 1079 1080 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1081 _dif_sgl_get_buf(sgl, &buf, NULL); 1082 1083 crc32c = spdk_crc32c_update(buf, ctx->block_size - ctx->md_size, crc32c); 1084 1085 _dif_sgl_advance(sgl, ctx->block_size); 1086 } 1087 1088 return crc32c; 1089 } 1090 1091 static uint32_t 1092 _dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 1093 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1094 { 1095 uint32_t data_block_size, buf_len; 1096 uint8_t *buf; 1097 1098 data_block_size = ctx->block_size - ctx->md_size; 1099 1100 assert(offset_in_block + data_len <= ctx->block_size); 1101 1102 while (data_len != 0) { 1103 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1104 buf_len = spdk_min(buf_len, data_len); 1105 1106 if (offset_in_block < data_block_size) { 1107 buf_len = spdk_min(buf_len, data_block_size - offset_in_block); 1108 crc32c = spdk_crc32c_update(buf, buf_len, crc32c); 1109 } 1110 1111 _dif_sgl_advance(sgl, buf_len); 1112 offset_in_block += buf_len; 1113 data_len -= buf_len; 1114 } 1115 1116 return crc32c; 1117 } 1118 1119 static uint32_t 1120 dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t num_blocks, 1121 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1122 { 1123 uint32_t offset_blocks; 1124 1125 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1126 crc32c = _dif_update_crc32c_split(sgl, 0, ctx->block_size, crc32c, ctx); 1127 } 1128 1129 return crc32c; 1130 } 1131 1132 int 1133 spdk_dif_update_crc32c(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1134 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 1135 { 1136 struct _dif_sgl sgl; 1137 1138 if (_crc32c == NULL) { 1139 return -EINVAL; 1140 } 1141 1142 _dif_sgl_init(&sgl, iovs, iovcnt); 1143 1144 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1145 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1146 return -EINVAL; 1147 } 1148 1149 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 1150 *_crc32c = dif_update_crc32c(&sgl, num_blocks, *_crc32c, ctx); 1151 } else { 1152 *_crc32c = dif_update_crc32c_split(&sgl, num_blocks, *_crc32c, ctx); 1153 } 1154 1155 return 0; 1156 } 1157 1158 static void 1159 _dif_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1160 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1161 { 1162 uint32_t data_block_size; 1163 uint8_t *src, *dst; 1164 uint64_t guard = 0; 1165 1166 data_block_size = ctx->block_size - ctx->md_size; 1167 1168 _dif_sgl_get_buf(src_sgl, &src, NULL); 1169 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1170 1171 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1172 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1173 ctx->dif_pi_format); 1174 guard = _dif_generate_guard(guard, dst + data_block_size, 1175 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1176 } else { 1177 memcpy(dst, src, data_block_size); 1178 } 1179 1180 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 1181 1182 _dif_sgl_advance(src_sgl, data_block_size); 1183 _dif_sgl_advance(dst_sgl, ctx->block_size); 1184 } 1185 1186 static void 1187 dif_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1188 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1189 { 1190 uint32_t offset_blocks; 1191 1192 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1193 _dif_insert_copy(src_sgl, dst_sgl, offset_blocks, ctx); 1194 } 1195 } 1196 1197 static void 1198 _dif_insert_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1199 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1200 { 1201 uint32_t data_block_size; 1202 uint64_t guard = 0; 1203 struct spdk_dif dif = {}; 1204 1205 data_block_size = ctx->block_size - ctx->md_size; 1206 1207 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1208 guard = _dif_generate_guard_copy_split(ctx->guard_seed, dst_sgl, src_sgl, 1209 data_block_size, ctx->dif_pi_format); 1210 guard = dif_generate_guard_split(guard, dst_sgl, data_block_size, 1211 ctx->guard_interval - data_block_size, ctx); 1212 } else { 1213 _data_copy_split(dst_sgl, src_sgl, data_block_size); 1214 _dif_sgl_advance(dst_sgl, ctx->guard_interval - data_block_size); 1215 } 1216 1217 _dif_generate(&dif, guard, offset_blocks, ctx); 1218 1219 dif_store_split(dst_sgl, &dif, ctx); 1220 } 1221 1222 static void 1223 dif_insert_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1224 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1225 { 1226 uint32_t offset_blocks; 1227 1228 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1229 _dif_insert_copy_split(src_sgl, dst_sgl, offset_blocks, ctx); 1230 } 1231 } 1232 1233 static void 1234 _dif_disable_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1235 const struct spdk_dif_ctx *ctx) 1236 { 1237 uint32_t offset = 0, src_len, dst_len, buf_len, data_block_size; 1238 uint8_t *src, *dst; 1239 1240 data_block_size = ctx->block_size - ctx->md_size; 1241 1242 while (offset < data_block_size) { 1243 _dif_sgl_get_buf(src_sgl, &src, &src_len); 1244 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 1245 buf_len = spdk_min(src_len, dst_len); 1246 buf_len = spdk_min(buf_len, data_block_size - offset); 1247 1248 memcpy(dst, src, buf_len); 1249 1250 _dif_sgl_advance(src_sgl, buf_len); 1251 _dif_sgl_advance(dst_sgl, buf_len); 1252 offset += buf_len; 1253 } 1254 1255 _dif_sgl_advance(dst_sgl, ctx->md_size); 1256 } 1257 1258 static void 1259 dif_disable_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1260 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1261 { 1262 uint32_t offset_blocks; 1263 1264 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1265 _dif_disable_insert_copy(src_sgl, dst_sgl, ctx); 1266 } 1267 } 1268 1269 static int 1270 _spdk_dif_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1271 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1272 { 1273 uint32_t data_block_size; 1274 1275 data_block_size = ctx->block_size - ctx->md_size; 1276 1277 if (!_dif_sgl_is_valid(src_sgl, data_block_size * num_blocks) || 1278 !_dif_sgl_is_valid(dst_sgl, ctx->block_size * num_blocks)) { 1279 SPDK_ERRLOG("Size of iovec arrays are not valid.\n"); 1280 return -EINVAL; 1281 } 1282 1283 if (_dif_is_disabled(ctx->dif_type)) { 1284 dif_disable_insert_copy(src_sgl, dst_sgl, num_blocks, ctx); 1285 return 0; 1286 } 1287 1288 if (_dif_sgl_is_bytes_multiple(src_sgl, data_block_size) && 1289 _dif_sgl_is_bytes_multiple(dst_sgl, ctx->block_size)) { 1290 dif_insert_copy(src_sgl, dst_sgl, num_blocks, ctx); 1291 } else { 1292 dif_insert_copy_split(src_sgl, dst_sgl, num_blocks, ctx); 1293 } 1294 1295 return 0; 1296 } 1297 1298 int 1299 spdk_dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1300 int bounce_iovcnt, uint32_t num_blocks, 1301 const struct spdk_dif_ctx *ctx) 1302 { 1303 struct _dif_sgl src_sgl, dst_sgl; 1304 1305 _dif_sgl_init(&src_sgl, iovs, iovcnt); 1306 _dif_sgl_init(&dst_sgl, bounce_iovs, bounce_iovcnt); 1307 1308 return _spdk_dif_insert_copy(&src_sgl, &dst_sgl, num_blocks, ctx); 1309 } 1310 1311 static int 1312 _dif_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1313 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1314 struct spdk_dif_error *err_blk) 1315 { 1316 uint32_t data_block_size; 1317 uint8_t *src, *dst; 1318 int rc; 1319 uint64_t guard = 0; 1320 1321 data_block_size = ctx->block_size - ctx->md_size; 1322 1323 _dif_sgl_get_buf(src_sgl, &src, NULL); 1324 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1325 1326 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1327 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1328 ctx->dif_pi_format); 1329 guard = _dif_generate_guard(guard, src + data_block_size, 1330 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1331 } else { 1332 memcpy(dst, src, data_block_size); 1333 } 1334 1335 rc = _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1336 if (rc != 0) { 1337 return rc; 1338 } 1339 1340 _dif_sgl_advance(src_sgl, ctx->block_size); 1341 _dif_sgl_advance(dst_sgl, data_block_size); 1342 1343 return 0; 1344 } 1345 1346 static int 1347 dif_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1348 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1349 struct spdk_dif_error *err_blk) 1350 { 1351 uint32_t offset_blocks; 1352 int rc; 1353 1354 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1355 rc = _dif_strip_copy(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1356 if (rc != 0) { 1357 return rc; 1358 } 1359 } 1360 1361 return 0; 1362 } 1363 1364 static int 1365 _dif_strip_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1366 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1367 struct spdk_dif_error *err_blk) 1368 { 1369 uint32_t data_block_size; 1370 uint64_t guard = 0; 1371 struct spdk_dif dif = {}; 1372 1373 data_block_size = ctx->block_size - ctx->md_size; 1374 1375 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1376 guard = _dif_generate_guard_copy_split(ctx->guard_seed, dst_sgl, src_sgl, 1377 data_block_size, ctx->dif_pi_format); 1378 guard = dif_generate_guard_split(guard, src_sgl, data_block_size, 1379 ctx->guard_interval - data_block_size, ctx); 1380 } else { 1381 _data_copy_split(dst_sgl, src_sgl, data_block_size); 1382 _dif_sgl_advance(src_sgl, ctx->guard_interval - data_block_size); 1383 } 1384 1385 dif_load_split(src_sgl, &dif, ctx); 1386 1387 return _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 1388 } 1389 1390 static int 1391 dif_strip_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1392 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1393 struct spdk_dif_error *err_blk) 1394 { 1395 uint32_t offset_blocks; 1396 int rc; 1397 1398 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1399 rc = _dif_strip_copy_split(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1400 if (rc != 0) { 1401 return rc; 1402 } 1403 } 1404 1405 return 0; 1406 } 1407 1408 static void 1409 _dif_disable_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1410 const struct spdk_dif_ctx *ctx) 1411 { 1412 uint32_t offset = 0, src_len, dst_len, buf_len, data_block_size; 1413 uint8_t *src, *dst; 1414 1415 data_block_size = ctx->block_size - ctx->md_size; 1416 1417 while (offset < data_block_size) { 1418 _dif_sgl_get_buf(src_sgl, &src, &src_len); 1419 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 1420 buf_len = spdk_min(src_len, dst_len); 1421 buf_len = spdk_min(buf_len, data_block_size - offset); 1422 1423 memcpy(dst, src, buf_len); 1424 1425 _dif_sgl_advance(src_sgl, buf_len); 1426 _dif_sgl_advance(dst_sgl, buf_len); 1427 offset += buf_len; 1428 } 1429 1430 _dif_sgl_advance(src_sgl, ctx->md_size); 1431 } 1432 1433 static void 1434 dif_disable_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1435 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1436 { 1437 uint32_t offset_blocks; 1438 1439 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1440 _dif_disable_strip_copy(src_sgl, dst_sgl, ctx); 1441 } 1442 } 1443 1444 static int 1445 _spdk_dif_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1446 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1447 struct spdk_dif_error *err_blk) 1448 { 1449 uint32_t data_block_size; 1450 1451 data_block_size = ctx->block_size - ctx->md_size; 1452 1453 if (!_dif_sgl_is_valid(dst_sgl, data_block_size * num_blocks) || 1454 !_dif_sgl_is_valid(src_sgl, ctx->block_size * num_blocks)) { 1455 SPDK_ERRLOG("Size of iovec arrays are not valid\n"); 1456 return -EINVAL; 1457 } 1458 1459 if (_dif_is_disabled(ctx->dif_type)) { 1460 dif_disable_strip_copy(src_sgl, dst_sgl, num_blocks, ctx); 1461 return 0; 1462 } 1463 1464 if (_dif_sgl_is_bytes_multiple(dst_sgl, data_block_size) && 1465 _dif_sgl_is_bytes_multiple(src_sgl, ctx->block_size)) { 1466 return dif_strip_copy(src_sgl, dst_sgl, num_blocks, ctx, err_blk); 1467 } else { 1468 return dif_strip_copy_split(src_sgl, dst_sgl, num_blocks, ctx, err_blk); 1469 } 1470 } 1471 1472 int 1473 spdk_dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1474 int bounce_iovcnt, uint32_t num_blocks, 1475 const struct spdk_dif_ctx *ctx, 1476 struct spdk_dif_error *err_blk) 1477 { 1478 struct _dif_sgl src_sgl, dst_sgl; 1479 1480 _dif_sgl_init(&src_sgl, bounce_iovs, bounce_iovcnt); 1481 _dif_sgl_init(&dst_sgl, iovs, iovcnt); 1482 1483 return _spdk_dif_strip_copy(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1484 } 1485 1486 static void 1487 _bit_flip(uint8_t *buf, uint32_t flip_bit) 1488 { 1489 uint8_t byte; 1490 1491 byte = *buf; 1492 byte ^= 1 << flip_bit; 1493 *buf = byte; 1494 } 1495 1496 static int 1497 _dif_inject_error(struct _dif_sgl *sgl, 1498 uint32_t block_size, uint32_t num_blocks, 1499 uint32_t inject_offset_blocks, 1500 uint32_t inject_offset_bytes, 1501 uint32_t inject_offset_bits) 1502 { 1503 uint32_t offset_in_block, buf_len; 1504 uint8_t *buf; 1505 1506 _dif_sgl_advance(sgl, block_size * inject_offset_blocks); 1507 1508 offset_in_block = 0; 1509 1510 while (offset_in_block < block_size) { 1511 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1512 buf_len = spdk_min(buf_len, block_size - offset_in_block); 1513 1514 if (inject_offset_bytes >= offset_in_block && 1515 inject_offset_bytes < offset_in_block + buf_len) { 1516 buf += inject_offset_bytes - offset_in_block; 1517 _bit_flip(buf, inject_offset_bits); 1518 return 0; 1519 } 1520 1521 _dif_sgl_advance(sgl, buf_len); 1522 offset_in_block += buf_len; 1523 } 1524 1525 return -1; 1526 } 1527 1528 static int 1529 dif_inject_error(struct _dif_sgl *sgl, uint32_t block_size, uint32_t num_blocks, 1530 uint32_t start_inject_bytes, uint32_t inject_range_bytes, 1531 uint32_t *inject_offset) 1532 { 1533 uint32_t inject_offset_blocks, inject_offset_bytes, inject_offset_bits; 1534 uint32_t offset_blocks; 1535 int rc; 1536 1537 srand(time(0)); 1538 1539 inject_offset_blocks = rand() % num_blocks; 1540 inject_offset_bytes = start_inject_bytes + (rand() % inject_range_bytes); 1541 inject_offset_bits = rand() % 8; 1542 1543 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1544 if (offset_blocks == inject_offset_blocks) { 1545 rc = _dif_inject_error(sgl, block_size, num_blocks, 1546 inject_offset_blocks, 1547 inject_offset_bytes, 1548 inject_offset_bits); 1549 if (rc == 0) { 1550 *inject_offset = inject_offset_blocks; 1551 } 1552 return rc; 1553 } 1554 } 1555 1556 return -1; 1557 } 1558 1559 int 1560 spdk_dif_inject_error(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1561 const struct spdk_dif_ctx *ctx, uint32_t inject_flags, 1562 uint32_t *inject_offset) 1563 { 1564 struct _dif_sgl sgl; 1565 int rc; 1566 1567 _dif_sgl_init(&sgl, iovs, iovcnt); 1568 1569 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1570 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1571 return -EINVAL; 1572 } 1573 1574 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1575 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1576 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1577 _dif_reftag_size(ctx->dif_pi_format), 1578 inject_offset); 1579 if (rc != 0) { 1580 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1581 return rc; 1582 } 1583 } 1584 1585 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1586 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1587 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1588 _dif_apptag_size(), 1589 inject_offset); 1590 if (rc != 0) { 1591 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1592 return rc; 1593 } 1594 } 1595 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1596 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1597 ctx->guard_interval, 1598 _dif_guard_size(ctx->dif_pi_format), 1599 inject_offset); 1600 if (rc != 0) { 1601 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1602 return rc; 1603 } 1604 } 1605 1606 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1607 /* If the DIF information is contained within the last 8/16 bytes of 1608 * metadata (depending on the PI format), then the CRC covers all metadata 1609 * bytes up to but excluding the last 8/16 bytes. But error injection does not 1610 * cover these metadata because classification is not determined yet. 1611 * 1612 * Note: Error injection to data block is expected to be detected as 1613 * guard error. 1614 */ 1615 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1616 0, 1617 ctx->block_size - ctx->md_size, 1618 inject_offset); 1619 if (rc != 0) { 1620 SPDK_ERRLOG("Failed to inject error to data block.\n"); 1621 return rc; 1622 } 1623 } 1624 1625 return 0; 1626 } 1627 1628 static void 1629 dix_generate(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1630 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1631 { 1632 uint32_t offset_blocks = 0; 1633 uint8_t *data_buf, *md_buf; 1634 uint64_t guard; 1635 1636 while (offset_blocks < num_blocks) { 1637 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1638 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1639 1640 guard = 0; 1641 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1642 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1643 ctx->dif_pi_format); 1644 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1645 ctx->dif_pi_format); 1646 } 1647 1648 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1649 1650 _dif_sgl_advance(data_sgl, ctx->block_size); 1651 _dif_sgl_advance(md_sgl, ctx->md_size); 1652 offset_blocks++; 1653 } 1654 } 1655 1656 static void 1657 _dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1658 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1659 { 1660 uint32_t offset_in_block, data_buf_len; 1661 uint8_t *data_buf, *md_buf; 1662 uint64_t guard = 0; 1663 1664 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1665 1666 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1667 guard = ctx->guard_seed; 1668 } 1669 offset_in_block = 0; 1670 1671 while (offset_in_block < ctx->block_size) { 1672 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1673 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1674 1675 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1676 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1677 ctx->dif_pi_format); 1678 } 1679 1680 _dif_sgl_advance(data_sgl, data_buf_len); 1681 offset_in_block += data_buf_len; 1682 } 1683 1684 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1685 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1686 ctx->dif_pi_format); 1687 } 1688 1689 _dif_sgl_advance(md_sgl, ctx->md_size); 1690 1691 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1692 } 1693 1694 static void 1695 dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1696 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1697 { 1698 uint32_t offset_blocks; 1699 1700 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1701 _dix_generate_split(data_sgl, md_sgl, offset_blocks, ctx); 1702 } 1703 } 1704 1705 int 1706 spdk_dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1707 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1708 { 1709 struct _dif_sgl data_sgl, md_sgl; 1710 1711 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1712 _dif_sgl_init(&md_sgl, md_iov, 1); 1713 1714 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1715 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1716 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1717 return -EINVAL; 1718 } 1719 1720 if (_dif_is_disabled(ctx->dif_type)) { 1721 return 0; 1722 } 1723 1724 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1725 dix_generate(&data_sgl, &md_sgl, num_blocks, ctx); 1726 } else { 1727 dix_generate_split(&data_sgl, &md_sgl, num_blocks, ctx); 1728 } 1729 1730 return 0; 1731 } 1732 1733 static int 1734 dix_verify(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1735 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1736 struct spdk_dif_error *err_blk) 1737 { 1738 uint32_t offset_blocks = 0; 1739 uint8_t *data_buf, *md_buf; 1740 uint64_t guard; 1741 int rc; 1742 1743 while (offset_blocks < num_blocks) { 1744 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1745 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1746 1747 guard = 0; 1748 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1749 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1750 ctx->dif_pi_format); 1751 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1752 ctx->dif_pi_format); 1753 } 1754 1755 rc = _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1756 if (rc != 0) { 1757 return rc; 1758 } 1759 1760 _dif_sgl_advance(data_sgl, ctx->block_size); 1761 _dif_sgl_advance(md_sgl, ctx->md_size); 1762 offset_blocks++; 1763 } 1764 1765 return 0; 1766 } 1767 1768 static int 1769 _dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1770 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1771 struct spdk_dif_error *err_blk) 1772 { 1773 uint32_t offset_in_block, data_buf_len; 1774 uint8_t *data_buf, *md_buf; 1775 uint64_t guard = 0; 1776 1777 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1778 1779 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1780 guard = ctx->guard_seed; 1781 } 1782 offset_in_block = 0; 1783 1784 while (offset_in_block < ctx->block_size) { 1785 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1786 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1787 1788 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1789 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1790 ctx->dif_pi_format); 1791 } 1792 1793 _dif_sgl_advance(data_sgl, data_buf_len); 1794 offset_in_block += data_buf_len; 1795 } 1796 1797 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1798 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1799 ctx->dif_pi_format); 1800 } 1801 1802 _dif_sgl_advance(md_sgl, ctx->md_size); 1803 1804 return _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1805 } 1806 1807 static int 1808 dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1809 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1810 struct spdk_dif_error *err_blk) 1811 { 1812 uint32_t offset_blocks; 1813 int rc; 1814 1815 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1816 rc = _dix_verify_split(data_sgl, md_sgl, offset_blocks, ctx, err_blk); 1817 if (rc != 0) { 1818 return rc; 1819 } 1820 } 1821 1822 return 0; 1823 } 1824 1825 int 1826 spdk_dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1827 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1828 struct spdk_dif_error *err_blk) 1829 { 1830 struct _dif_sgl data_sgl, md_sgl; 1831 1832 if (md_iov->iov_base == NULL) { 1833 SPDK_ERRLOG("Metadata buffer is NULL.\n"); 1834 return -EINVAL; 1835 } 1836 1837 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1838 _dif_sgl_init(&md_sgl, md_iov, 1); 1839 1840 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1841 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1842 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1843 return -EINVAL; 1844 } 1845 1846 if (_dif_is_disabled(ctx->dif_type)) { 1847 return 0; 1848 } 1849 1850 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1851 return dix_verify(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1852 } else { 1853 return dix_verify_split(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1854 } 1855 } 1856 1857 int 1858 spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1859 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1860 uint32_t inject_flags, uint32_t *inject_offset) 1861 { 1862 struct _dif_sgl data_sgl, md_sgl; 1863 int rc; 1864 1865 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1866 _dif_sgl_init(&md_sgl, md_iov, 1); 1867 1868 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1869 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1870 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1871 return -EINVAL; 1872 } 1873 1874 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1875 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1876 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1877 _dif_reftag_size(ctx->dif_pi_format), 1878 inject_offset); 1879 if (rc != 0) { 1880 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1881 return rc; 1882 } 1883 } 1884 1885 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1886 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1887 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1888 _dif_apptag_size(), 1889 inject_offset); 1890 if (rc != 0) { 1891 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1892 return rc; 1893 } 1894 } 1895 1896 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1897 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1898 ctx->guard_interval, 1899 _dif_guard_size(ctx->dif_pi_format), 1900 inject_offset); 1901 if (rc != 0) { 1902 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1903 return rc; 1904 } 1905 } 1906 1907 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1908 /* Note: Error injection to data block is expected to be detected 1909 * as guard error. 1910 */ 1911 rc = dif_inject_error(&data_sgl, ctx->block_size, num_blocks, 1912 0, 1913 ctx->block_size, 1914 inject_offset); 1915 if (rc != 0) { 1916 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1917 return rc; 1918 } 1919 } 1920 1921 return 0; 1922 } 1923 1924 static uint32_t 1925 _to_next_boundary(uint32_t offset, uint32_t boundary) 1926 { 1927 return boundary - (offset % boundary); 1928 } 1929 1930 static uint32_t 1931 _to_size_with_md(uint32_t size, uint32_t data_block_size, uint32_t block_size) 1932 { 1933 return (size / data_block_size) * block_size + (size % data_block_size); 1934 } 1935 1936 int 1937 spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int iovcnt, 1938 struct iovec *buf_iovs, int buf_iovcnt, 1939 uint32_t data_offset, uint32_t data_len, 1940 uint32_t *_mapped_len, 1941 const struct spdk_dif_ctx *ctx) 1942 { 1943 uint32_t data_block_size, data_unalign, buf_len, buf_offset, len; 1944 struct _dif_sgl dif_sgl; 1945 struct _dif_sgl buf_sgl; 1946 1947 if (iovs == NULL || iovcnt == 0 || buf_iovs == NULL || buf_iovcnt == 0) { 1948 return -EINVAL; 1949 } 1950 1951 data_block_size = ctx->block_size - ctx->md_size; 1952 1953 data_unalign = ctx->data_offset % data_block_size; 1954 1955 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1956 ctx->block_size); 1957 buf_len -= data_unalign; 1958 1959 _dif_sgl_init(&dif_sgl, iovs, iovcnt); 1960 _dif_sgl_init(&buf_sgl, buf_iovs, buf_iovcnt); 1961 1962 if (!_dif_sgl_is_valid(&buf_sgl, buf_len)) { 1963 SPDK_ERRLOG("Buffer overflow will occur.\n"); 1964 return -ERANGE; 1965 } 1966 1967 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1968 buf_offset -= data_unalign; 1969 1970 _dif_sgl_advance(&buf_sgl, buf_offset); 1971 1972 while (data_len != 0) { 1973 len = spdk_min(data_len, _to_next_boundary(ctx->data_offset + data_offset, data_block_size)); 1974 if (!_dif_sgl_append_split(&dif_sgl, &buf_sgl, len)) { 1975 break; 1976 } 1977 _dif_sgl_advance(&buf_sgl, ctx->md_size); 1978 data_offset += len; 1979 data_len -= len; 1980 } 1981 1982 if (_mapped_len != NULL) { 1983 *_mapped_len = dif_sgl.total_size; 1984 } 1985 1986 return iovcnt - dif_sgl.iovcnt; 1987 } 1988 1989 static int 1990 _dif_sgl_setup_stream(struct _dif_sgl *sgl, uint32_t *_buf_offset, uint32_t *_buf_len, 1991 uint32_t data_offset, uint32_t data_len, 1992 const struct spdk_dif_ctx *ctx) 1993 { 1994 uint32_t data_block_size, data_unalign, buf_len, buf_offset; 1995 1996 data_block_size = ctx->block_size - ctx->md_size; 1997 1998 data_unalign = ctx->data_offset % data_block_size; 1999 2000 /* If the last data block is complete, DIF of the data block is 2001 * inserted or verified in this turn. 2002 */ 2003 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 2004 ctx->block_size); 2005 buf_len -= data_unalign; 2006 2007 if (!_dif_sgl_is_valid(sgl, buf_len)) { 2008 return -ERANGE; 2009 } 2010 2011 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 2012 buf_offset -= data_unalign; 2013 2014 _dif_sgl_advance(sgl, buf_offset); 2015 buf_len -= buf_offset; 2016 2017 buf_offset += data_unalign; 2018 2019 *_buf_offset = buf_offset; 2020 *_buf_len = buf_len; 2021 2022 return 0; 2023 } 2024 2025 int 2026 spdk_dif_generate_stream(struct iovec *iovs, int iovcnt, 2027 uint32_t data_offset, uint32_t data_len, 2028 struct spdk_dif_ctx *ctx) 2029 { 2030 uint32_t buf_len = 0, buf_offset = 0; 2031 uint32_t len, offset_in_block, offset_blocks; 2032 uint64_t guard = 0; 2033 struct _dif_sgl sgl; 2034 int rc; 2035 2036 if (iovs == NULL || iovcnt == 0) { 2037 return -EINVAL; 2038 } 2039 2040 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2041 guard = ctx->last_guard; 2042 } 2043 2044 _dif_sgl_init(&sgl, iovs, iovcnt); 2045 2046 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2047 if (rc != 0) { 2048 return rc; 2049 } 2050 2051 while (buf_len != 0) { 2052 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2053 offset_in_block = buf_offset % ctx->block_size; 2054 offset_blocks = buf_offset / ctx->block_size; 2055 2056 guard = _dif_generate_split(&sgl, offset_in_block, len, guard, offset_blocks, ctx); 2057 2058 buf_len -= len; 2059 buf_offset += len; 2060 } 2061 2062 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2063 ctx->last_guard = guard; 2064 } 2065 2066 return 0; 2067 } 2068 2069 int 2070 spdk_dif_verify_stream(struct iovec *iovs, int iovcnt, 2071 uint32_t data_offset, uint32_t data_len, 2072 struct spdk_dif_ctx *ctx, 2073 struct spdk_dif_error *err_blk) 2074 { 2075 uint32_t buf_len = 0, buf_offset = 0; 2076 uint32_t len, offset_in_block, offset_blocks; 2077 uint64_t guard = 0; 2078 struct _dif_sgl sgl; 2079 int rc = 0; 2080 2081 if (iovs == NULL || iovcnt == 0) { 2082 return -EINVAL; 2083 } 2084 2085 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2086 guard = ctx->last_guard; 2087 } 2088 2089 _dif_sgl_init(&sgl, iovs, iovcnt); 2090 2091 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2092 if (rc != 0) { 2093 return rc; 2094 } 2095 2096 while (buf_len != 0) { 2097 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2098 offset_in_block = buf_offset % ctx->block_size; 2099 offset_blocks = buf_offset / ctx->block_size; 2100 2101 rc = _dif_verify_split(&sgl, offset_in_block, len, &guard, offset_blocks, 2102 ctx, err_blk); 2103 if (rc != 0) { 2104 goto error; 2105 } 2106 2107 buf_len -= len; 2108 buf_offset += len; 2109 } 2110 2111 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2112 ctx->last_guard = guard; 2113 } 2114 error: 2115 return rc; 2116 } 2117 2118 int 2119 spdk_dif_update_crc32c_stream(struct iovec *iovs, int iovcnt, 2120 uint32_t data_offset, uint32_t data_len, 2121 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 2122 { 2123 uint32_t buf_len = 0, buf_offset = 0, len, offset_in_block; 2124 uint32_t crc32c; 2125 struct _dif_sgl sgl; 2126 int rc; 2127 2128 if (iovs == NULL || iovcnt == 0) { 2129 return -EINVAL; 2130 } 2131 2132 crc32c = *_crc32c; 2133 _dif_sgl_init(&sgl, iovs, iovcnt); 2134 2135 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2136 if (rc != 0) { 2137 return rc; 2138 } 2139 2140 while (buf_len != 0) { 2141 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2142 offset_in_block = buf_offset % ctx->block_size; 2143 2144 crc32c = _dif_update_crc32c_split(&sgl, offset_in_block, len, crc32c, ctx); 2145 2146 buf_len -= len; 2147 buf_offset += len; 2148 } 2149 2150 *_crc32c = crc32c; 2151 2152 return 0; 2153 } 2154 2155 void 2156 spdk_dif_get_range_with_md(uint32_t data_offset, uint32_t data_len, 2157 uint32_t *_buf_offset, uint32_t *_buf_len, 2158 const struct spdk_dif_ctx *ctx) 2159 { 2160 uint32_t data_block_size, data_unalign, buf_offset, buf_len; 2161 2162 if (!ctx->md_interleave) { 2163 buf_offset = data_offset; 2164 buf_len = data_len; 2165 } else { 2166 data_block_size = ctx->block_size - ctx->md_size; 2167 2168 data_unalign = data_offset % data_block_size; 2169 2170 buf_offset = _to_size_with_md(data_offset, data_block_size, ctx->block_size); 2171 buf_len = _to_size_with_md(data_unalign + data_len, data_block_size, ctx->block_size) - 2172 data_unalign; 2173 } 2174 2175 if (_buf_offset != NULL) { 2176 *_buf_offset = buf_offset; 2177 } 2178 2179 if (_buf_len != NULL) { 2180 *_buf_len = buf_len; 2181 } 2182 } 2183 2184 uint32_t 2185 spdk_dif_get_length_with_md(uint32_t data_len, const struct spdk_dif_ctx *ctx) 2186 { 2187 uint32_t data_block_size; 2188 2189 if (!ctx->md_interleave) { 2190 return data_len; 2191 } else { 2192 data_block_size = ctx->block_size - ctx->md_size; 2193 2194 return _to_size_with_md(data_len, data_block_size, ctx->block_size); 2195 } 2196 } 2197 2198 static int 2199 _dif_remap_ref_tag(struct _dif_sgl *sgl, uint32_t offset_blocks, 2200 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2201 bool check_ref_tag) 2202 { 2203 uint32_t offset, buf_len; 2204 uint64_t expected = 0, remapped; 2205 uint8_t *buf; 2206 struct _dif_sgl tmp_sgl; 2207 struct spdk_dif dif; 2208 2209 /* Fast forward to DIF field. */ 2210 _dif_sgl_advance(sgl, ctx->guard_interval); 2211 _dif_sgl_copy(&tmp_sgl, sgl); 2212 2213 /* Copy the split DIF field to the temporary DIF buffer */ 2214 offset = 0; 2215 while (offset < _dif_size(ctx->dif_pi_format)) { 2216 _dif_sgl_get_buf(sgl, &buf, &buf_len); 2217 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2218 2219 memcpy((uint8_t *)&dif + offset, buf, buf_len); 2220 2221 _dif_sgl_advance(sgl, buf_len); 2222 offset += buf_len; 2223 } 2224 2225 if (_dif_ignore(&dif, ctx)) { 2226 goto end; 2227 } 2228 2229 /* For type 1 and 2, the Reference Tag is incremented for each 2230 * subsequent logical block. For type 3, the Reference Tag 2231 * remains the same as the initial Reference Tag. 2232 */ 2233 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2234 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2235 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2236 } else { 2237 remapped = ctx->remapped_init_ref_tag; 2238 } 2239 2240 /* Verify the stored Reference Tag. */ 2241 if (check_ref_tag && !_dif_reftag_check(&dif, ctx, expected, offset_blocks, err_blk)) { 2242 return -1; 2243 } 2244 2245 /* Update the stored Reference Tag to the remapped one. */ 2246 _dif_set_reftag(&dif, remapped, ctx->dif_pi_format); 2247 2248 offset = 0; 2249 while (offset < _dif_size(ctx->dif_pi_format)) { 2250 _dif_sgl_get_buf(&tmp_sgl, &buf, &buf_len); 2251 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2252 2253 memcpy(buf, (uint8_t *)&dif + offset, buf_len); 2254 2255 _dif_sgl_advance(&tmp_sgl, buf_len); 2256 offset += buf_len; 2257 } 2258 2259 end: 2260 _dif_sgl_advance(sgl, ctx->block_size - ctx->guard_interval - _dif_size(ctx->dif_pi_format)); 2261 2262 return 0; 2263 } 2264 2265 int 2266 spdk_dif_remap_ref_tag(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 2267 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2268 bool check_ref_tag) 2269 { 2270 struct _dif_sgl sgl; 2271 uint32_t offset_blocks; 2272 int rc; 2273 2274 _dif_sgl_init(&sgl, iovs, iovcnt); 2275 2276 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 2277 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 2278 return -EINVAL; 2279 } 2280 2281 if (_dif_is_disabled(ctx->dif_type)) { 2282 return 0; 2283 } 2284 2285 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2286 return 0; 2287 } 2288 2289 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2290 rc = _dif_remap_ref_tag(&sgl, offset_blocks, ctx, err_blk, check_ref_tag); 2291 if (rc != 0) { 2292 return rc; 2293 } 2294 } 2295 2296 return 0; 2297 } 2298 2299 static int 2300 _dix_remap_ref_tag(struct _dif_sgl *md_sgl, uint32_t offset_blocks, 2301 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2302 bool check_ref_tag) 2303 { 2304 uint64_t expected = 0, remapped; 2305 uint8_t *md_buf; 2306 struct spdk_dif *dif; 2307 2308 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 2309 2310 dif = (struct spdk_dif *)(md_buf + ctx->guard_interval); 2311 2312 if (_dif_ignore(dif, ctx)) { 2313 goto end; 2314 } 2315 2316 /* For type 1 and 2, the Reference Tag is incremented for each 2317 * subsequent logical block. For type 3, the Reference Tag 2318 * remains the same as the initialReference Tag. 2319 */ 2320 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2321 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2322 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2323 } else { 2324 remapped = ctx->remapped_init_ref_tag; 2325 } 2326 2327 /* Verify the stored Reference Tag. */ 2328 if (check_ref_tag && !_dif_reftag_check(dif, ctx, expected, offset_blocks, err_blk)) { 2329 return -1; 2330 } 2331 2332 /* Update the stored Reference Tag to the remapped one. */ 2333 _dif_set_reftag(dif, remapped, ctx->dif_pi_format); 2334 2335 end: 2336 _dif_sgl_advance(md_sgl, ctx->md_size); 2337 2338 return 0; 2339 } 2340 2341 int 2342 spdk_dix_remap_ref_tag(struct iovec *md_iov, uint32_t num_blocks, 2343 const struct spdk_dif_ctx *ctx, 2344 struct spdk_dif_error *err_blk, 2345 bool check_ref_tag) 2346 { 2347 struct _dif_sgl md_sgl; 2348 uint32_t offset_blocks; 2349 int rc; 2350 2351 _dif_sgl_init(&md_sgl, md_iov, 1); 2352 2353 if (!_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 2354 SPDK_ERRLOG("Size of metadata iovec array is not valid.\n"); 2355 return -EINVAL; 2356 } 2357 2358 if (_dif_is_disabled(ctx->dif_type)) { 2359 return 0; 2360 } 2361 2362 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2363 return 0; 2364 } 2365 2366 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2367 rc = _dix_remap_ref_tag(&md_sgl, offset_blocks, ctx, err_blk, check_ref_tag); 2368 if (rc != 0) { 2369 return rc; 2370 } 2371 } 2372 2373 return 0; 2374 } 2375