1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/dif.h" 7 #include "spdk/crc16.h" 8 #include "spdk/crc32.h" 9 #include "spdk/crc64.h" 10 #include "spdk/endian.h" 11 #include "spdk/log.h" 12 #include "spdk/util.h" 13 14 #define REFTAG_MASK_16 0x00000000FFFFFFFF 15 #define REFTAG_MASK_32 0xFFFFFFFFFFFFFFFF 16 #define REFTAG_MASK_64 0x0000FFFFFFFFFFFF 17 18 /* The variable size Storage Tag and Reference Tag is not supported yet, 19 * so the maximum size of the Reference Tag is assumed. 20 */ 21 struct spdk_dif { 22 union { 23 struct { 24 uint16_t guard; 25 uint16_t app_tag; 26 uint32_t stor_ref_space; 27 } g16; 28 struct { 29 uint32_t guard; 30 uint16_t app_tag; 31 uint16_t stor_ref_space_p1; 32 uint64_t stor_ref_space_p2; 33 } g32; 34 struct { 35 uint64_t guard; 36 uint16_t app_tag; 37 uint16_t stor_ref_space_p1; 38 uint32_t stor_ref_space_p2; 39 } g64; 40 }; 41 }; 42 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g16) == 8, "Incorrect size"); 43 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g32) == 16, "Incorrect size"); 44 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g64) == 16, "Incorrect size"); 45 46 /* Context to iterate or create a iovec array. 47 * Each sgl is either iterated or created at a time. 48 */ 49 struct _dif_sgl { 50 /* Current iovec in the iteration or creation */ 51 struct iovec *iov; 52 53 /* Remaining count of iovecs in the iteration or creation. */ 54 int iovcnt; 55 56 /* Current offset in the iovec */ 57 uint32_t iov_offset; 58 59 /* Size of the created iovec array in bytes */ 60 uint32_t total_size; 61 }; 62 63 static inline void 64 _dif_sgl_init(struct _dif_sgl *s, struct iovec *iovs, int iovcnt) 65 { 66 s->iov = iovs; 67 s->iovcnt = iovcnt; 68 s->iov_offset = 0; 69 s->total_size = 0; 70 } 71 72 static void 73 _dif_sgl_advance(struct _dif_sgl *s, uint32_t step) 74 { 75 s->iov_offset += step; 76 while (s->iovcnt != 0) { 77 if (s->iov_offset < s->iov->iov_len) { 78 break; 79 } 80 81 s->iov_offset -= s->iov->iov_len; 82 s->iov++; 83 s->iovcnt--; 84 } 85 } 86 87 static inline void 88 _dif_sgl_get_buf(struct _dif_sgl *s, uint8_t **_buf, uint32_t *_buf_len) 89 { 90 if (_buf != NULL) { 91 *_buf = (uint8_t *)s->iov->iov_base + s->iov_offset; 92 } 93 if (_buf_len != NULL) { 94 *_buf_len = s->iov->iov_len - s->iov_offset; 95 } 96 } 97 98 static inline bool 99 _dif_sgl_append(struct _dif_sgl *s, uint8_t *data, uint32_t data_len) 100 { 101 assert(s->iovcnt > 0); 102 s->iov->iov_base = data; 103 s->iov->iov_len = data_len; 104 s->total_size += data_len; 105 s->iov++; 106 s->iovcnt--; 107 108 if (s->iovcnt > 0) { 109 return true; 110 } else { 111 return false; 112 } 113 } 114 115 static inline bool 116 _dif_sgl_append_split(struct _dif_sgl *dst, struct _dif_sgl *src, uint32_t data_len) 117 { 118 uint8_t *buf; 119 uint32_t buf_len; 120 121 while (data_len != 0) { 122 _dif_sgl_get_buf(src, &buf, &buf_len); 123 buf_len = spdk_min(buf_len, data_len); 124 125 if (!_dif_sgl_append(dst, buf, buf_len)) { 126 return false; 127 } 128 129 _dif_sgl_advance(src, buf_len); 130 data_len -= buf_len; 131 } 132 133 return true; 134 } 135 136 /* This function must be used before starting iteration. */ 137 static bool 138 _dif_sgl_is_bytes_multiple(struct _dif_sgl *s, uint32_t bytes) 139 { 140 int i; 141 142 for (i = 0; i < s->iovcnt; i++) { 143 if (s->iov[i].iov_len % bytes) { 144 return false; 145 } 146 } 147 148 return true; 149 } 150 151 /* This function must be used before starting iteration. */ 152 static bool 153 _dif_sgl_is_valid(struct _dif_sgl *s, uint32_t bytes) 154 { 155 uint64_t total = 0; 156 int i; 157 158 for (i = 0; i < s->iovcnt; i++) { 159 total += s->iov[i].iov_len; 160 } 161 162 return total >= bytes; 163 } 164 165 static void 166 _dif_sgl_copy(struct _dif_sgl *to, struct _dif_sgl *from) 167 { 168 memcpy(to, from, sizeof(struct _dif_sgl)); 169 } 170 171 static bool 172 _dif_is_disabled(enum spdk_dif_type dif_type) 173 { 174 if (dif_type == SPDK_DIF_DISABLE) { 175 return true; 176 } else { 177 return false; 178 } 179 } 180 181 static inline size_t 182 _dif_size(enum spdk_dif_pi_format dif_pi_format) 183 { 184 uint8_t size; 185 186 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 187 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16); 188 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 189 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32); 190 } else { 191 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64); 192 } 193 194 return size; 195 } 196 197 static uint32_t 198 _get_guard_interval(uint32_t block_size, uint32_t md_size, bool dif_loc, bool md_interleave, 199 size_t dif_size) 200 { 201 if (!dif_loc) { 202 /* For metadata formats with more than 8/16 bytes (depending on 203 * the PI format), if the DIF is contained in the last 8/16 bytes 204 * of metadata, then the CRC covers all metadata up to but excluding 205 * these last 8/16 bytes. 206 */ 207 if (md_interleave) { 208 return block_size - dif_size; 209 } else { 210 return md_size - dif_size; 211 } 212 } else { 213 /* For metadata formats with more than 8/16 bytes (depending on 214 * the PI format), if the DIF is contained in the first 8/16 bytes 215 * of metadata, then the CRC does not cover any metadata. 216 */ 217 if (md_interleave) { 218 return block_size - md_size; 219 } else { 220 return 0; 221 } 222 } 223 } 224 225 static inline uint8_t 226 _dif_guard_size(enum spdk_dif_pi_format dif_pi_format) 227 { 228 uint8_t size; 229 230 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 231 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.guard); 232 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 233 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.guard); 234 } else { 235 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.guard); 236 } 237 238 return size; 239 } 240 241 static inline void 242 _dif_set_guard(struct spdk_dif *dif, uint64_t guard, enum spdk_dif_pi_format dif_pi_format) 243 { 244 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 245 to_be16(&(dif->g16.guard), (uint16_t)guard); 246 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 247 to_be32(&(dif->g32.guard), (uint32_t)guard); 248 } else { 249 to_be64(&(dif->g64.guard), guard); 250 } 251 } 252 253 static inline uint64_t 254 _dif_get_guard(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 255 { 256 uint64_t guard; 257 258 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 259 guard = (uint64_t)from_be16(&(dif->g16.guard)); 260 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 261 guard = (uint64_t)from_be32(&(dif->g32.guard)); 262 } else { 263 guard = from_be64(&(dif->g64.guard)); 264 } 265 266 return guard; 267 } 268 269 static inline uint64_t 270 _dif_generate_guard(uint64_t guard_seed, void *buf, size_t buf_len, 271 enum spdk_dif_pi_format dif_pi_format) 272 { 273 uint64_t guard; 274 275 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 276 guard = (uint64_t)spdk_crc16_t10dif((uint16_t)guard_seed, buf, buf_len); 277 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 278 guard = (uint64_t)spdk_crc32c_nvme(buf, buf_len, guard_seed); 279 } else { 280 guard = spdk_crc64_nvme(buf, buf_len, guard_seed); 281 } 282 283 return guard; 284 } 285 286 static uint64_t 287 dif_generate_guard_split(uint64_t guard_seed, struct _dif_sgl *sgl, uint32_t start, 288 uint32_t len, const struct spdk_dif_ctx *ctx) 289 { 290 uint64_t guard = guard_seed; 291 uint32_t offset, end, buf_len; 292 uint8_t *buf; 293 294 offset = start; 295 end = start + spdk_min(len, ctx->guard_interval - start); 296 297 while (offset < end) { 298 _dif_sgl_get_buf(sgl, &buf, &buf_len); 299 buf_len = spdk_min(buf_len, end - offset); 300 301 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 302 guard = _dif_generate_guard(guard, buf, buf_len, ctx->dif_pi_format); 303 } 304 305 _dif_sgl_advance(sgl, buf_len); 306 offset += buf_len; 307 } 308 309 return guard; 310 } 311 312 static inline uint64_t 313 _dif_generate_guard_copy(uint64_t guard_seed, void *dst, void *src, size_t buf_len, 314 enum spdk_dif_pi_format dif_pi_format) 315 { 316 uint64_t guard; 317 318 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 319 guard = (uint64_t)spdk_crc16_t10dif_copy((uint16_t)guard_seed, dst, src, buf_len); 320 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 321 memcpy(dst, src, buf_len); 322 guard = (uint64_t)spdk_crc32c_nvme(src, buf_len, guard_seed); 323 } else { 324 memcpy(dst, src, buf_len); 325 guard = spdk_crc64_nvme(src, buf_len, guard_seed); 326 } 327 328 return guard; 329 } 330 331 static uint64_t 332 _dif_generate_guard_copy_split(uint64_t guard, struct _dif_sgl *dst_sgl, 333 struct _dif_sgl *src_sgl, uint32_t data_len, 334 enum spdk_dif_pi_format dif_pi_format) 335 { 336 uint32_t offset = 0, src_len, dst_len, buf_len; 337 uint8_t *src, *dst; 338 339 while (offset < data_len) { 340 _dif_sgl_get_buf(src_sgl, &src, &src_len); 341 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 342 buf_len = spdk_min(src_len, dst_len); 343 buf_len = spdk_min(buf_len, data_len - offset); 344 345 guard = _dif_generate_guard_copy(guard, dst, src, buf_len, dif_pi_format); 346 347 _dif_sgl_advance(src_sgl, buf_len); 348 _dif_sgl_advance(dst_sgl, buf_len); 349 offset += buf_len; 350 } 351 352 return guard; 353 } 354 355 static void 356 _data_copy_split(struct _dif_sgl *dst_sgl, struct _dif_sgl *src_sgl, uint32_t data_len) 357 { 358 uint32_t offset = 0, src_len, dst_len, buf_len; 359 uint8_t *src, *dst; 360 361 while (offset < data_len) { 362 _dif_sgl_get_buf(src_sgl, &src, &src_len); 363 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 364 buf_len = spdk_min(src_len, dst_len); 365 buf_len = spdk_min(buf_len, data_len - offset); 366 367 memcpy(dst, src, buf_len); 368 369 _dif_sgl_advance(src_sgl, buf_len); 370 _dif_sgl_advance(dst_sgl, buf_len); 371 offset += buf_len; 372 } 373 } 374 375 static inline uint8_t 376 _dif_apptag_offset(enum spdk_dif_pi_format dif_pi_format) 377 { 378 return _dif_guard_size(dif_pi_format); 379 } 380 381 static inline uint8_t 382 _dif_apptag_size(void) 383 { 384 return SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.app_tag); 385 } 386 387 static inline void 388 _dif_set_apptag(struct spdk_dif *dif, uint16_t app_tag, enum spdk_dif_pi_format dif_pi_format) 389 { 390 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 391 to_be16(&(dif->g16.app_tag), app_tag); 392 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 393 to_be16(&(dif->g32.app_tag), app_tag); 394 } else { 395 to_be16(&(dif->g64.app_tag), app_tag); 396 } 397 } 398 399 static inline uint16_t 400 _dif_get_apptag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 401 { 402 uint16_t app_tag; 403 404 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 405 app_tag = from_be16(&(dif->g16.app_tag)); 406 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 407 app_tag = from_be16(&(dif->g32.app_tag)); 408 } else { 409 app_tag = from_be16(&(dif->g64.app_tag)); 410 } 411 412 return app_tag; 413 } 414 415 static inline bool 416 _dif_apptag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 417 { 418 return _dif_get_apptag(dif, dif_pi_format) == SPDK_DIF_APPTAG_IGNORE; 419 } 420 421 static inline uint8_t 422 _dif_reftag_offset(enum spdk_dif_pi_format dif_pi_format) 423 { 424 uint8_t offset; 425 426 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 427 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 428 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 429 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size() 430 + SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p1); 431 } else { 432 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 433 } 434 435 return offset; 436 } 437 438 static inline uint8_t 439 _dif_reftag_size(enum spdk_dif_pi_format dif_pi_format) 440 { 441 uint8_t size; 442 443 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 444 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.stor_ref_space); 445 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 446 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p2); 447 } else { 448 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p1) + 449 SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p2); 450 } 451 452 return size; 453 } 454 455 static inline void 456 _dif_set_reftag(struct spdk_dif *dif, uint64_t ref_tag, enum spdk_dif_pi_format dif_pi_format) 457 { 458 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 459 to_be32(&(dif->g16.stor_ref_space), (uint32_t)ref_tag); 460 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 461 to_be64(&(dif->g32.stor_ref_space_p2), ref_tag); 462 } else { 463 to_be16(&(dif->g64.stor_ref_space_p1), (uint16_t)(ref_tag >> 32)); 464 to_be32(&(dif->g64.stor_ref_space_p2), (uint32_t)ref_tag); 465 } 466 } 467 468 static inline uint64_t 469 _dif_get_reftag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 470 { 471 uint64_t ref_tag; 472 473 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 474 ref_tag = (uint64_t)from_be32(&(dif->g16.stor_ref_space)); 475 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 476 ref_tag = from_be64(&(dif->g32.stor_ref_space_p2)); 477 } else { 478 ref_tag = (uint64_t)from_be16(&(dif->g64.stor_ref_space_p1)); 479 ref_tag <<= 32; 480 ref_tag |= (uint64_t)from_be32(&(dif->g64.stor_ref_space_p2)); 481 } 482 483 return ref_tag; 484 } 485 486 static inline bool 487 _dif_reftag_match(struct spdk_dif *dif, uint64_t ref_tag, 488 enum spdk_dif_pi_format dif_pi_format) 489 { 490 uint64_t _ref_tag; 491 bool match; 492 493 _ref_tag = _dif_get_reftag(dif, dif_pi_format); 494 495 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 496 match = (_ref_tag == (ref_tag & REFTAG_MASK_16)); 497 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 498 match = (_ref_tag == ref_tag); 499 } else { 500 match = (_ref_tag == (ref_tag & REFTAG_MASK_64)); 501 } 502 503 return match; 504 } 505 506 static inline bool 507 _dif_reftag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 508 { 509 return _dif_reftag_match(dif, REFTAG_MASK_32, dif_pi_format); 510 } 511 512 static bool 513 _dif_ignore(struct spdk_dif *dif, const struct spdk_dif_ctx *ctx) 514 { 515 switch (ctx->dif_type) { 516 case SPDK_DIF_TYPE1: 517 case SPDK_DIF_TYPE2: 518 /* If Type 1 or 2 is used, then all DIF checks are disabled when 519 * the Application Tag is 0xFFFF. 520 */ 521 if (_dif_apptag_ignore(dif, ctx->dif_pi_format)) { 522 return true; 523 } 524 break; 525 case SPDK_DIF_TYPE3: 526 /* If Type 3 is used, then all DIF checks are disabled when the 527 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF 528 * or 0xFFFFFFFFFFFFFFFF depending on the PI format. 529 */ 530 531 if (_dif_apptag_ignore(dif, ctx->dif_pi_format) && 532 _dif_reftag_ignore(dif, ctx->dif_pi_format)) { 533 return true; 534 } 535 break; 536 default: 537 break; 538 } 539 540 return false; 541 } 542 543 static bool 544 _dif_pi_format_is_valid(enum spdk_dif_pi_format dif_pi_format) 545 { 546 switch (dif_pi_format) { 547 case SPDK_DIF_PI_FORMAT_16: 548 case SPDK_DIF_PI_FORMAT_32: 549 case SPDK_DIF_PI_FORMAT_64: 550 return true; 551 default: 552 return false; 553 } 554 } 555 556 static bool 557 _dif_type_is_valid(enum spdk_dif_type dif_type) 558 { 559 switch (dif_type) { 560 case SPDK_DIF_DISABLE: 561 case SPDK_DIF_TYPE1: 562 case SPDK_DIF_TYPE2: 563 case SPDK_DIF_TYPE3: 564 return true; 565 default: 566 return false; 567 } 568 } 569 570 int 571 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size, 572 bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags, 573 uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag, 574 uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts) 575 { 576 uint32_t data_block_size; 577 enum spdk_dif_pi_format dif_pi_format = SPDK_DIF_PI_FORMAT_16; 578 579 if (opts != NULL) { 580 if (!_dif_pi_format_is_valid(opts->dif_pi_format)) { 581 SPDK_ERRLOG("No valid DIF PI format provided.\n"); 582 return -EINVAL; 583 } 584 585 dif_pi_format = opts->dif_pi_format; 586 } 587 588 if (!_dif_type_is_valid(dif_type)) { 589 SPDK_ERRLOG("No valid DIF type was provided.\n"); 590 return -EINVAL; 591 } 592 593 if (md_size < _dif_size(dif_pi_format)) { 594 SPDK_ERRLOG("Metadata size is smaller than DIF size.\n"); 595 return -EINVAL; 596 } 597 598 if (md_interleave) { 599 if (block_size < md_size) { 600 SPDK_ERRLOG("Block size is smaller than DIF size.\n"); 601 return -EINVAL; 602 } 603 data_block_size = block_size - md_size; 604 } else { 605 data_block_size = block_size; 606 } 607 608 if (data_block_size == 0) { 609 SPDK_ERRLOG("Zero data block size is not allowed\n"); 610 return -EINVAL; 611 } 612 613 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 614 if ((data_block_size % 512) != 0) { 615 SPDK_ERRLOG("Data block size should be a multiple of 512B\n"); 616 return -EINVAL; 617 } 618 } else { 619 if ((data_block_size % 4096) != 0) { 620 SPDK_ERRLOG("Data block size should be a multiple of 4kB\n"); 621 return -EINVAL; 622 } 623 } 624 625 ctx->block_size = block_size; 626 ctx->md_size = md_size; 627 ctx->md_interleave = md_interleave; 628 ctx->dif_pi_format = dif_pi_format; 629 ctx->guard_interval = _get_guard_interval(block_size, md_size, dif_loc, md_interleave, 630 _dif_size(ctx->dif_pi_format)); 631 ctx->dif_type = dif_type; 632 ctx->dif_flags = dif_flags; 633 ctx->init_ref_tag = init_ref_tag; 634 ctx->apptag_mask = apptag_mask; 635 ctx->app_tag = app_tag; 636 ctx->data_offset = data_offset; 637 ctx->ref_tag_offset = data_offset / data_block_size; 638 ctx->last_guard = guard_seed; 639 ctx->guard_seed = guard_seed; 640 ctx->remapped_init_ref_tag = 0; 641 642 return 0; 643 } 644 645 void 646 spdk_dif_ctx_set_data_offset(struct spdk_dif_ctx *ctx, uint32_t data_offset) 647 { 648 uint32_t data_block_size; 649 650 if (ctx->md_interleave) { 651 data_block_size = ctx->block_size - ctx->md_size; 652 } else { 653 data_block_size = ctx->block_size; 654 } 655 656 ctx->data_offset = data_offset; 657 ctx->ref_tag_offset = data_offset / data_block_size; 658 } 659 660 void 661 spdk_dif_ctx_set_remapped_init_ref_tag(struct spdk_dif_ctx *ctx, 662 uint32_t remapped_init_ref_tag) 663 { 664 ctx->remapped_init_ref_tag = remapped_init_ref_tag; 665 } 666 667 static void 668 _dif_generate(void *_dif, uint64_t guard, uint32_t offset_blocks, 669 const struct spdk_dif_ctx *ctx) 670 { 671 struct spdk_dif *dif = _dif; 672 uint64_t ref_tag; 673 674 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 675 _dif_set_guard(dif, guard, ctx->dif_pi_format); 676 } 677 678 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 679 _dif_set_apptag(dif, ctx->app_tag, ctx->dif_pi_format); 680 } 681 682 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 683 /* For type 1 and 2, the reference tag is incremented for each 684 * subsequent logical block. For type 3, the reference tag 685 * remains the same as the initial reference tag. 686 */ 687 if (ctx->dif_type != SPDK_DIF_TYPE3) { 688 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 689 } else { 690 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 691 } 692 693 /* Overwrite reference tag if initialization reference tag is SPDK_DIF_REFTAG_IGNORE */ 694 if (ctx->init_ref_tag == SPDK_DIF_REFTAG_IGNORE) { 695 if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 696 ref_tag = REFTAG_MASK_16; 697 } else if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 698 ref_tag = REFTAG_MASK_32; 699 } else { 700 ref_tag = REFTAG_MASK_64; 701 } 702 } 703 704 _dif_set_reftag(dif, ref_tag, ctx->dif_pi_format); 705 } 706 } 707 708 static void 709 dif_generate(struct _dif_sgl *sgl, uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 710 { 711 uint32_t offset_blocks; 712 uint8_t *buf; 713 uint64_t guard = 0; 714 715 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 716 _dif_sgl_get_buf(sgl, &buf, NULL); 717 718 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 719 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 720 } 721 722 _dif_generate(buf + ctx->guard_interval, guard, offset_blocks, ctx); 723 724 _dif_sgl_advance(sgl, ctx->block_size); 725 } 726 } 727 728 static void 729 dif_store_split(struct _dif_sgl *sgl, struct spdk_dif *dif, 730 const struct spdk_dif_ctx *ctx) 731 { 732 uint32_t offset = 0, rest_md_len, buf_len; 733 uint8_t *buf; 734 735 rest_md_len = ctx->block_size - ctx->guard_interval; 736 737 while (offset < rest_md_len) { 738 _dif_sgl_get_buf(sgl, &buf, &buf_len); 739 740 if (offset < _dif_size(ctx->dif_pi_format)) { 741 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 742 memcpy(buf, (uint8_t *)dif + offset, buf_len); 743 } else { 744 buf_len = spdk_min(buf_len, rest_md_len - offset); 745 } 746 747 _dif_sgl_advance(sgl, buf_len); 748 offset += buf_len; 749 } 750 } 751 752 static uint64_t 753 _dif_generate_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 754 uint64_t guard, uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 755 { 756 struct spdk_dif dif = {}; 757 758 assert(offset_in_block < ctx->guard_interval); 759 assert(offset_in_block + data_len < ctx->guard_interval || 760 offset_in_block + data_len == ctx->block_size); 761 762 /* Compute CRC over split logical block data. */ 763 guard = dif_generate_guard_split(guard, sgl, offset_in_block, data_len, ctx); 764 765 if (offset_in_block + data_len < ctx->guard_interval) { 766 return guard; 767 } 768 769 /* If a whole logical block data is parsed, generate DIF 770 * and save it to the temporary DIF area. 771 */ 772 _dif_generate(&dif, guard, offset_blocks, ctx); 773 774 /* Copy generated DIF field to the split DIF field, and then 775 * skip metadata field after DIF field (if any). 776 */ 777 dif_store_split(sgl, &dif, ctx); 778 779 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 780 guard = ctx->guard_seed; 781 } 782 783 return guard; 784 } 785 786 static void 787 dif_generate_split(struct _dif_sgl *sgl, uint32_t num_blocks, 788 const struct spdk_dif_ctx *ctx) 789 { 790 uint32_t offset_blocks; 791 uint64_t guard = 0; 792 793 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 794 guard = ctx->guard_seed; 795 } 796 797 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 798 _dif_generate_split(sgl, 0, ctx->block_size, guard, offset_blocks, ctx); 799 } 800 } 801 802 int 803 spdk_dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 804 const struct spdk_dif_ctx *ctx) 805 { 806 struct _dif_sgl sgl; 807 808 _dif_sgl_init(&sgl, iovs, iovcnt); 809 810 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 811 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 812 return -EINVAL; 813 } 814 815 if (_dif_is_disabled(ctx->dif_type)) { 816 return 0; 817 } 818 819 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 820 dif_generate(&sgl, num_blocks, ctx); 821 } else { 822 dif_generate_split(&sgl, num_blocks, ctx); 823 } 824 825 return 0; 826 } 827 828 static void 829 _dif_error_set(struct spdk_dif_error *err_blk, uint8_t err_type, 830 uint64_t expected, uint64_t actual, uint32_t err_offset) 831 { 832 if (err_blk) { 833 err_blk->err_type = err_type; 834 err_blk->expected = expected; 835 err_blk->actual = actual; 836 err_blk->err_offset = err_offset; 837 } 838 } 839 840 static bool 841 _dif_reftag_check(struct spdk_dif *dif, const struct spdk_dif_ctx *ctx, 842 uint64_t expected_reftag, uint32_t offset_blocks, struct spdk_dif_error *err_blk) 843 { 844 uint64_t reftag; 845 846 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 847 switch (ctx->dif_type) { 848 case SPDK_DIF_TYPE1: 849 case SPDK_DIF_TYPE2: 850 /* Compare the DIF Reference Tag field to the passed Reference Tag. 851 * The passed Reference Tag will be the least significant 4 bytes 852 * or 8 bytes (depending on the PI format) 853 * of the LBA when Type 1 is used, and application specific value 854 * if Type 2 is used. 855 */ 856 if (!_dif_reftag_match(dif, expected_reftag, ctx->dif_pi_format)) { 857 reftag = _dif_get_reftag(dif, ctx->dif_pi_format); 858 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected_reftag, 859 reftag, offset_blocks); 860 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu64 "," \ 861 " Expected=%lx, Actual=%lx\n", 862 expected_reftag, expected_reftag, reftag); 863 return false; 864 } 865 break; 866 case SPDK_DIF_TYPE3: 867 /* For Type 3, computed Reference Tag remains unchanged. 868 * Hence ignore the Reference Tag field. 869 */ 870 break; 871 default: 872 break; 873 } 874 } 875 876 return true; 877 } 878 879 static int 880 _dif_verify(void *_dif, uint64_t guard, uint32_t offset_blocks, 881 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 882 { 883 struct spdk_dif *dif = _dif; 884 uint64_t _guard; 885 uint16_t _app_tag; 886 uint64_t ref_tag; 887 888 if (_dif_ignore(dif, ctx)) { 889 return 0; 890 } 891 892 /* For type 1 and 2, the reference tag is incremented for each 893 * subsequent logical block. For type 3, the reference tag 894 * remains the same as the initial reference tag. 895 */ 896 if (ctx->dif_type != SPDK_DIF_TYPE3) { 897 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 898 } else { 899 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 900 } 901 902 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 903 /* Compare the DIF Guard field to the CRC computed over the logical 904 * block data. 905 */ 906 _guard = _dif_get_guard(dif, ctx->dif_pi_format); 907 if (_guard != guard) { 908 _dif_error_set(err_blk, SPDK_DIF_GUARD_ERROR, _guard, guard, 909 offset_blocks); 910 SPDK_ERRLOG("Failed to compare Guard: LBA=%" PRIu64 "," \ 911 " Expected=%lx, Actual=%lx\n", 912 ref_tag, _guard, guard); 913 return -1; 914 } 915 } 916 917 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 918 /* Compare unmasked bits in the DIF Application Tag field to the 919 * passed Application Tag. 920 */ 921 _app_tag = _dif_get_apptag(dif, ctx->dif_pi_format); 922 if ((_app_tag & ctx->apptag_mask) != (ctx->app_tag & ctx->apptag_mask)) { 923 _dif_error_set(err_blk, SPDK_DIF_APPTAG_ERROR, ctx->app_tag, 924 (_app_tag & ctx->apptag_mask), offset_blocks); 925 SPDK_ERRLOG("Failed to compare App Tag: LBA=%" PRIu64 "," \ 926 " Expected=%x, Actual=%x\n", 927 ref_tag, ctx->app_tag, (_app_tag & ctx->apptag_mask)); 928 return -1; 929 } 930 } 931 932 if (!_dif_reftag_check(dif, ctx, ref_tag, offset_blocks, err_blk)) { 933 return -1; 934 } 935 936 return 0; 937 } 938 939 static int 940 dif_verify(struct _dif_sgl *sgl, uint32_t num_blocks, 941 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 942 { 943 uint32_t offset_blocks; 944 int rc; 945 uint8_t *buf; 946 uint64_t guard = 0; 947 948 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 949 _dif_sgl_get_buf(sgl, &buf, NULL); 950 951 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 952 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 953 } 954 955 rc = _dif_verify(buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 956 if (rc != 0) { 957 return rc; 958 } 959 960 _dif_sgl_advance(sgl, ctx->block_size); 961 } 962 963 return 0; 964 } 965 966 static void 967 dif_load_split(struct _dif_sgl *sgl, struct spdk_dif *dif, 968 const struct spdk_dif_ctx *ctx) 969 { 970 uint32_t offset = 0, rest_md_len, buf_len; 971 uint8_t *buf; 972 973 rest_md_len = ctx->block_size - ctx->guard_interval; 974 975 while (offset < rest_md_len) { 976 _dif_sgl_get_buf(sgl, &buf, &buf_len); 977 978 if (offset < _dif_size(ctx->dif_pi_format)) { 979 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 980 memcpy((uint8_t *)dif + offset, buf, buf_len); 981 } else { 982 buf_len = spdk_min(buf_len, rest_md_len - offset); 983 } 984 985 _dif_sgl_advance(sgl, buf_len); 986 offset += buf_len; 987 } 988 } 989 990 static int 991 _dif_verify_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 992 uint64_t *_guard, uint32_t offset_blocks, 993 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 994 { 995 uint64_t guard = *_guard; 996 struct spdk_dif dif = {}; 997 int rc; 998 999 assert(_guard != NULL); 1000 assert(offset_in_block < ctx->guard_interval); 1001 assert(offset_in_block + data_len < ctx->guard_interval || 1002 offset_in_block + data_len == ctx->block_size); 1003 1004 guard = dif_generate_guard_split(guard, sgl, offset_in_block, data_len, ctx); 1005 1006 if (offset_in_block + data_len < ctx->guard_interval) { 1007 *_guard = guard; 1008 return 0; 1009 } 1010 1011 dif_load_split(sgl, &dif, ctx); 1012 1013 rc = _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 1014 if (rc != 0) { 1015 return rc; 1016 } 1017 1018 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1019 guard = ctx->guard_seed; 1020 } 1021 1022 *_guard = guard; 1023 return 0; 1024 } 1025 1026 static int 1027 dif_verify_split(struct _dif_sgl *sgl, uint32_t num_blocks, 1028 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1029 { 1030 uint32_t offset_blocks; 1031 uint64_t guard = 0; 1032 int rc; 1033 1034 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1035 guard = ctx->guard_seed; 1036 } 1037 1038 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1039 rc = _dif_verify_split(sgl, 0, ctx->block_size, &guard, offset_blocks, 1040 ctx, err_blk); 1041 if (rc != 0) { 1042 return rc; 1043 } 1044 } 1045 1046 return 0; 1047 } 1048 1049 int 1050 spdk_dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1051 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 1052 { 1053 struct _dif_sgl sgl; 1054 1055 _dif_sgl_init(&sgl, iovs, iovcnt); 1056 1057 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1058 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1059 return -EINVAL; 1060 } 1061 1062 if (_dif_is_disabled(ctx->dif_type)) { 1063 return 0; 1064 } 1065 1066 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 1067 return dif_verify(&sgl, num_blocks, ctx, err_blk); 1068 } else { 1069 return dif_verify_split(&sgl, num_blocks, ctx, err_blk); 1070 } 1071 } 1072 1073 static uint32_t 1074 dif_update_crc32c(struct _dif_sgl *sgl, uint32_t num_blocks, 1075 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1076 { 1077 uint32_t offset_blocks; 1078 uint8_t *buf; 1079 1080 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1081 _dif_sgl_get_buf(sgl, &buf, NULL); 1082 1083 crc32c = spdk_crc32c_update(buf, ctx->block_size - ctx->md_size, crc32c); 1084 1085 _dif_sgl_advance(sgl, ctx->block_size); 1086 } 1087 1088 return crc32c; 1089 } 1090 1091 static uint32_t 1092 _dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 1093 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1094 { 1095 uint32_t data_block_size, buf_len; 1096 uint8_t *buf; 1097 1098 data_block_size = ctx->block_size - ctx->md_size; 1099 1100 assert(offset_in_block + data_len <= ctx->block_size); 1101 1102 while (data_len != 0) { 1103 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1104 buf_len = spdk_min(buf_len, data_len); 1105 1106 if (offset_in_block < data_block_size) { 1107 buf_len = spdk_min(buf_len, data_block_size - offset_in_block); 1108 crc32c = spdk_crc32c_update(buf, buf_len, crc32c); 1109 } 1110 1111 _dif_sgl_advance(sgl, buf_len); 1112 offset_in_block += buf_len; 1113 data_len -= buf_len; 1114 } 1115 1116 return crc32c; 1117 } 1118 1119 static uint32_t 1120 dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t num_blocks, 1121 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1122 { 1123 uint32_t offset_blocks; 1124 1125 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1126 crc32c = _dif_update_crc32c_split(sgl, 0, ctx->block_size, crc32c, ctx); 1127 } 1128 1129 return crc32c; 1130 } 1131 1132 int 1133 spdk_dif_update_crc32c(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1134 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 1135 { 1136 struct _dif_sgl sgl; 1137 1138 if (_crc32c == NULL) { 1139 return -EINVAL; 1140 } 1141 1142 _dif_sgl_init(&sgl, iovs, iovcnt); 1143 1144 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1145 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1146 return -EINVAL; 1147 } 1148 1149 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 1150 *_crc32c = dif_update_crc32c(&sgl, num_blocks, *_crc32c, ctx); 1151 } else { 1152 *_crc32c = dif_update_crc32c_split(&sgl, num_blocks, *_crc32c, ctx); 1153 } 1154 1155 return 0; 1156 } 1157 1158 static void 1159 _dif_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1160 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1161 { 1162 uint32_t data_block_size; 1163 uint8_t *src, *dst; 1164 uint64_t guard = 0; 1165 1166 data_block_size = ctx->block_size - ctx->md_size; 1167 1168 _dif_sgl_get_buf(src_sgl, &src, NULL); 1169 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1170 1171 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1172 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1173 ctx->dif_pi_format); 1174 guard = _dif_generate_guard(guard, dst + data_block_size, 1175 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1176 } else { 1177 memcpy(dst, src, data_block_size); 1178 } 1179 1180 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 1181 1182 _dif_sgl_advance(src_sgl, data_block_size); 1183 _dif_sgl_advance(dst_sgl, ctx->block_size); 1184 } 1185 1186 static void 1187 dif_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1188 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1189 { 1190 uint32_t offset_blocks; 1191 1192 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1193 _dif_insert_copy(src_sgl, dst_sgl, offset_blocks, ctx); 1194 } 1195 } 1196 1197 static void 1198 _dif_insert_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1199 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1200 { 1201 uint32_t data_block_size; 1202 uint64_t guard = 0; 1203 struct spdk_dif dif = {}; 1204 1205 data_block_size = ctx->block_size - ctx->md_size; 1206 1207 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1208 guard = _dif_generate_guard_copy_split(ctx->guard_seed, dst_sgl, src_sgl, 1209 data_block_size, ctx->dif_pi_format); 1210 guard = dif_generate_guard_split(guard, dst_sgl, data_block_size, 1211 ctx->guard_interval - data_block_size, ctx); 1212 } else { 1213 _data_copy_split(dst_sgl, src_sgl, data_block_size); 1214 _dif_sgl_advance(dst_sgl, ctx->guard_interval - data_block_size); 1215 } 1216 1217 _dif_generate(&dif, guard, offset_blocks, ctx); 1218 1219 dif_store_split(dst_sgl, &dif, ctx); 1220 } 1221 1222 static void 1223 dif_insert_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1224 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1225 { 1226 uint32_t offset_blocks; 1227 1228 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1229 _dif_insert_copy_split(src_sgl, dst_sgl, offset_blocks, ctx); 1230 } 1231 } 1232 1233 static void 1234 _dif_disable_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1235 const struct spdk_dif_ctx *ctx) 1236 { 1237 uint32_t offset = 0, src_len, dst_len, buf_len, data_block_size; 1238 uint8_t *src, *dst; 1239 1240 data_block_size = ctx->block_size - ctx->md_size; 1241 1242 while (offset < data_block_size) { 1243 _dif_sgl_get_buf(src_sgl, &src, &src_len); 1244 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 1245 buf_len = spdk_min(src_len, dst_len); 1246 buf_len = spdk_min(buf_len, data_block_size - offset); 1247 1248 memcpy(dst, src, buf_len); 1249 1250 _dif_sgl_advance(src_sgl, buf_len); 1251 _dif_sgl_advance(dst_sgl, buf_len); 1252 offset += buf_len; 1253 } 1254 1255 _dif_sgl_advance(dst_sgl, ctx->md_size); 1256 } 1257 1258 static void 1259 dif_disable_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1260 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1261 { 1262 uint32_t offset_blocks; 1263 1264 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1265 _dif_disable_insert_copy(src_sgl, dst_sgl, ctx); 1266 } 1267 } 1268 1269 static int 1270 _spdk_dif_insert_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1271 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1272 { 1273 uint32_t data_block_size; 1274 1275 data_block_size = ctx->block_size - ctx->md_size; 1276 1277 if (!_dif_sgl_is_valid(src_sgl, data_block_size * num_blocks) || 1278 !_dif_sgl_is_valid(dst_sgl, ctx->block_size * num_blocks)) { 1279 SPDK_ERRLOG("Size of iovec arrays are not valid.\n"); 1280 return -EINVAL; 1281 } 1282 1283 if (_dif_is_disabled(ctx->dif_type)) { 1284 dif_disable_insert_copy(src_sgl, dst_sgl, num_blocks, ctx); 1285 return 0; 1286 } 1287 1288 if (_dif_sgl_is_bytes_multiple(src_sgl, data_block_size) && 1289 _dif_sgl_is_bytes_multiple(dst_sgl, ctx->block_size)) { 1290 dif_insert_copy(src_sgl, dst_sgl, num_blocks, ctx); 1291 } else { 1292 dif_insert_copy_split(src_sgl, dst_sgl, num_blocks, ctx); 1293 } 1294 1295 return 0; 1296 } 1297 1298 int 1299 spdk_dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1300 int bounce_iovcnt, uint32_t num_blocks, 1301 const struct spdk_dif_ctx *ctx) 1302 { 1303 struct _dif_sgl src_sgl, dst_sgl; 1304 1305 _dif_sgl_init(&src_sgl, iovs, iovcnt); 1306 _dif_sgl_init(&dst_sgl, bounce_iovs, bounce_iovcnt); 1307 1308 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_NVME_PRACT) || 1309 ctx->md_size == _dif_size(ctx->dif_pi_format)) { 1310 return _spdk_dif_insert_copy(&src_sgl, &dst_sgl, num_blocks, ctx); 1311 } else { 1312 return -ENOTSUP; 1313 } 1314 } 1315 1316 static int 1317 _dif_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1318 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1319 struct spdk_dif_error *err_blk) 1320 { 1321 uint32_t data_block_size; 1322 uint8_t *src, *dst; 1323 int rc; 1324 uint64_t guard = 0; 1325 1326 data_block_size = ctx->block_size - ctx->md_size; 1327 1328 _dif_sgl_get_buf(src_sgl, &src, NULL); 1329 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1330 1331 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1332 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1333 ctx->dif_pi_format); 1334 guard = _dif_generate_guard(guard, src + data_block_size, 1335 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1336 } else { 1337 memcpy(dst, src, data_block_size); 1338 } 1339 1340 rc = _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1341 if (rc != 0) { 1342 return rc; 1343 } 1344 1345 _dif_sgl_advance(src_sgl, ctx->block_size); 1346 _dif_sgl_advance(dst_sgl, data_block_size); 1347 1348 return 0; 1349 } 1350 1351 static int 1352 dif_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1353 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1354 struct spdk_dif_error *err_blk) 1355 { 1356 uint32_t offset_blocks; 1357 int rc; 1358 1359 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1360 rc = _dif_strip_copy(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1361 if (rc != 0) { 1362 return rc; 1363 } 1364 } 1365 1366 return 0; 1367 } 1368 1369 static int 1370 _dif_strip_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1371 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1372 struct spdk_dif_error *err_blk) 1373 { 1374 uint32_t data_block_size; 1375 uint64_t guard = 0; 1376 struct spdk_dif dif = {}; 1377 1378 data_block_size = ctx->block_size - ctx->md_size; 1379 1380 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1381 guard = _dif_generate_guard_copy_split(ctx->guard_seed, dst_sgl, src_sgl, 1382 data_block_size, ctx->dif_pi_format); 1383 guard = dif_generate_guard_split(guard, src_sgl, data_block_size, 1384 ctx->guard_interval - data_block_size, ctx); 1385 } else { 1386 _data_copy_split(dst_sgl, src_sgl, data_block_size); 1387 _dif_sgl_advance(src_sgl, ctx->guard_interval - data_block_size); 1388 } 1389 1390 dif_load_split(src_sgl, &dif, ctx); 1391 1392 return _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 1393 } 1394 1395 static int 1396 dif_strip_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1397 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1398 struct spdk_dif_error *err_blk) 1399 { 1400 uint32_t offset_blocks; 1401 int rc; 1402 1403 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1404 rc = _dif_strip_copy_split(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1405 if (rc != 0) { 1406 return rc; 1407 } 1408 } 1409 1410 return 0; 1411 } 1412 1413 static void 1414 _dif_disable_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1415 const struct spdk_dif_ctx *ctx) 1416 { 1417 uint32_t offset = 0, src_len, dst_len, buf_len, data_block_size; 1418 uint8_t *src, *dst; 1419 1420 data_block_size = ctx->block_size - ctx->md_size; 1421 1422 while (offset < data_block_size) { 1423 _dif_sgl_get_buf(src_sgl, &src, &src_len); 1424 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 1425 buf_len = spdk_min(src_len, dst_len); 1426 buf_len = spdk_min(buf_len, data_block_size - offset); 1427 1428 memcpy(dst, src, buf_len); 1429 1430 _dif_sgl_advance(src_sgl, buf_len); 1431 _dif_sgl_advance(dst_sgl, buf_len); 1432 offset += buf_len; 1433 } 1434 1435 _dif_sgl_advance(src_sgl, ctx->md_size); 1436 } 1437 1438 static void 1439 dif_disable_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1440 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1441 { 1442 uint32_t offset_blocks; 1443 1444 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1445 _dif_disable_strip_copy(src_sgl, dst_sgl, ctx); 1446 } 1447 } 1448 1449 static int 1450 _spdk_dif_strip_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1451 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1452 struct spdk_dif_error *err_blk) 1453 { 1454 uint32_t data_block_size; 1455 1456 data_block_size = ctx->block_size - ctx->md_size; 1457 1458 if (!_dif_sgl_is_valid(dst_sgl, data_block_size * num_blocks) || 1459 !_dif_sgl_is_valid(src_sgl, ctx->block_size * num_blocks)) { 1460 SPDK_ERRLOG("Size of iovec arrays are not valid\n"); 1461 return -EINVAL; 1462 } 1463 1464 if (_dif_is_disabled(ctx->dif_type)) { 1465 dif_disable_strip_copy(src_sgl, dst_sgl, num_blocks, ctx); 1466 return 0; 1467 } 1468 1469 if (_dif_sgl_is_bytes_multiple(dst_sgl, data_block_size) && 1470 _dif_sgl_is_bytes_multiple(src_sgl, ctx->block_size)) { 1471 return dif_strip_copy(src_sgl, dst_sgl, num_blocks, ctx, err_blk); 1472 } else { 1473 return dif_strip_copy_split(src_sgl, dst_sgl, num_blocks, ctx, err_blk); 1474 } 1475 } 1476 1477 int 1478 spdk_dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1479 int bounce_iovcnt, uint32_t num_blocks, 1480 const struct spdk_dif_ctx *ctx, 1481 struct spdk_dif_error *err_blk) 1482 { 1483 struct _dif_sgl src_sgl, dst_sgl; 1484 1485 _dif_sgl_init(&src_sgl, bounce_iovs, bounce_iovcnt); 1486 _dif_sgl_init(&dst_sgl, iovs, iovcnt); 1487 1488 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_NVME_PRACT) || 1489 ctx->md_size == _dif_size(ctx->dif_pi_format)) { 1490 return _spdk_dif_strip_copy(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1491 } else { 1492 return -ENOTSUP; 1493 } 1494 } 1495 1496 static void 1497 _bit_flip(uint8_t *buf, uint32_t flip_bit) 1498 { 1499 uint8_t byte; 1500 1501 byte = *buf; 1502 byte ^= 1 << flip_bit; 1503 *buf = byte; 1504 } 1505 1506 static int 1507 _dif_inject_error(struct _dif_sgl *sgl, 1508 uint32_t block_size, uint32_t num_blocks, 1509 uint32_t inject_offset_blocks, 1510 uint32_t inject_offset_bytes, 1511 uint32_t inject_offset_bits) 1512 { 1513 uint32_t offset_in_block, buf_len; 1514 uint8_t *buf; 1515 1516 _dif_sgl_advance(sgl, block_size * inject_offset_blocks); 1517 1518 offset_in_block = 0; 1519 1520 while (offset_in_block < block_size) { 1521 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1522 buf_len = spdk_min(buf_len, block_size - offset_in_block); 1523 1524 if (inject_offset_bytes >= offset_in_block && 1525 inject_offset_bytes < offset_in_block + buf_len) { 1526 buf += inject_offset_bytes - offset_in_block; 1527 _bit_flip(buf, inject_offset_bits); 1528 return 0; 1529 } 1530 1531 _dif_sgl_advance(sgl, buf_len); 1532 offset_in_block += buf_len; 1533 } 1534 1535 return -1; 1536 } 1537 1538 static int 1539 dif_inject_error(struct _dif_sgl *sgl, uint32_t block_size, uint32_t num_blocks, 1540 uint32_t start_inject_bytes, uint32_t inject_range_bytes, 1541 uint32_t *inject_offset) 1542 { 1543 uint32_t inject_offset_blocks, inject_offset_bytes, inject_offset_bits; 1544 uint32_t offset_blocks; 1545 int rc; 1546 1547 srand(time(0)); 1548 1549 inject_offset_blocks = rand() % num_blocks; 1550 inject_offset_bytes = start_inject_bytes + (rand() % inject_range_bytes); 1551 inject_offset_bits = rand() % 8; 1552 1553 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1554 if (offset_blocks == inject_offset_blocks) { 1555 rc = _dif_inject_error(sgl, block_size, num_blocks, 1556 inject_offset_blocks, 1557 inject_offset_bytes, 1558 inject_offset_bits); 1559 if (rc == 0) { 1560 *inject_offset = inject_offset_blocks; 1561 } 1562 return rc; 1563 } 1564 } 1565 1566 return -1; 1567 } 1568 1569 int 1570 spdk_dif_inject_error(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1571 const struct spdk_dif_ctx *ctx, uint32_t inject_flags, 1572 uint32_t *inject_offset) 1573 { 1574 struct _dif_sgl sgl; 1575 int rc; 1576 1577 _dif_sgl_init(&sgl, iovs, iovcnt); 1578 1579 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1580 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1581 return -EINVAL; 1582 } 1583 1584 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1585 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1586 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1587 _dif_reftag_size(ctx->dif_pi_format), 1588 inject_offset); 1589 if (rc != 0) { 1590 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1591 return rc; 1592 } 1593 } 1594 1595 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1596 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1597 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1598 _dif_apptag_size(), 1599 inject_offset); 1600 if (rc != 0) { 1601 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1602 return rc; 1603 } 1604 } 1605 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1606 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1607 ctx->guard_interval, 1608 _dif_guard_size(ctx->dif_pi_format), 1609 inject_offset); 1610 if (rc != 0) { 1611 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1612 return rc; 1613 } 1614 } 1615 1616 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1617 /* If the DIF information is contained within the last 8/16 bytes of 1618 * metadata (depending on the PI format), then the CRC covers all metadata 1619 * bytes up to but excluding the last 8/16 bytes. But error injection does not 1620 * cover these metadata because classification is not determined yet. 1621 * 1622 * Note: Error injection to data block is expected to be detected as 1623 * guard error. 1624 */ 1625 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1626 0, 1627 ctx->block_size - ctx->md_size, 1628 inject_offset); 1629 if (rc != 0) { 1630 SPDK_ERRLOG("Failed to inject error to data block.\n"); 1631 return rc; 1632 } 1633 } 1634 1635 return 0; 1636 } 1637 1638 static void 1639 dix_generate(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1640 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1641 { 1642 uint32_t offset_blocks = 0; 1643 uint8_t *data_buf, *md_buf; 1644 uint64_t guard; 1645 1646 while (offset_blocks < num_blocks) { 1647 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1648 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1649 1650 guard = 0; 1651 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1652 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1653 ctx->dif_pi_format); 1654 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1655 ctx->dif_pi_format); 1656 } 1657 1658 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1659 1660 _dif_sgl_advance(data_sgl, ctx->block_size); 1661 _dif_sgl_advance(md_sgl, ctx->md_size); 1662 offset_blocks++; 1663 } 1664 } 1665 1666 static void 1667 _dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1668 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1669 { 1670 uint32_t offset_in_block, data_buf_len; 1671 uint8_t *data_buf, *md_buf; 1672 uint64_t guard = 0; 1673 1674 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1675 1676 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1677 guard = ctx->guard_seed; 1678 } 1679 offset_in_block = 0; 1680 1681 while (offset_in_block < ctx->block_size) { 1682 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1683 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1684 1685 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1686 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1687 ctx->dif_pi_format); 1688 } 1689 1690 _dif_sgl_advance(data_sgl, data_buf_len); 1691 offset_in_block += data_buf_len; 1692 } 1693 1694 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1695 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1696 ctx->dif_pi_format); 1697 } 1698 1699 _dif_sgl_advance(md_sgl, ctx->md_size); 1700 1701 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1702 } 1703 1704 static void 1705 dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1706 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1707 { 1708 uint32_t offset_blocks; 1709 1710 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1711 _dix_generate_split(data_sgl, md_sgl, offset_blocks, ctx); 1712 } 1713 } 1714 1715 int 1716 spdk_dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1717 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1718 { 1719 struct _dif_sgl data_sgl, md_sgl; 1720 1721 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1722 _dif_sgl_init(&md_sgl, md_iov, 1); 1723 1724 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1725 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1726 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1727 return -EINVAL; 1728 } 1729 1730 if (_dif_is_disabled(ctx->dif_type)) { 1731 return 0; 1732 } 1733 1734 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1735 dix_generate(&data_sgl, &md_sgl, num_blocks, ctx); 1736 } else { 1737 dix_generate_split(&data_sgl, &md_sgl, num_blocks, ctx); 1738 } 1739 1740 return 0; 1741 } 1742 1743 static int 1744 dix_verify(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1745 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1746 struct spdk_dif_error *err_blk) 1747 { 1748 uint32_t offset_blocks = 0; 1749 uint8_t *data_buf, *md_buf; 1750 uint64_t guard; 1751 int rc; 1752 1753 while (offset_blocks < num_blocks) { 1754 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1755 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1756 1757 guard = 0; 1758 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1759 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1760 ctx->dif_pi_format); 1761 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1762 ctx->dif_pi_format); 1763 } 1764 1765 rc = _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1766 if (rc != 0) { 1767 return rc; 1768 } 1769 1770 _dif_sgl_advance(data_sgl, ctx->block_size); 1771 _dif_sgl_advance(md_sgl, ctx->md_size); 1772 offset_blocks++; 1773 } 1774 1775 return 0; 1776 } 1777 1778 static int 1779 _dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1780 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1781 struct spdk_dif_error *err_blk) 1782 { 1783 uint32_t offset_in_block, data_buf_len; 1784 uint8_t *data_buf, *md_buf; 1785 uint64_t guard = 0; 1786 1787 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1788 1789 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1790 guard = ctx->guard_seed; 1791 } 1792 offset_in_block = 0; 1793 1794 while (offset_in_block < ctx->block_size) { 1795 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1796 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1797 1798 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1799 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1800 ctx->dif_pi_format); 1801 } 1802 1803 _dif_sgl_advance(data_sgl, data_buf_len); 1804 offset_in_block += data_buf_len; 1805 } 1806 1807 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1808 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1809 ctx->dif_pi_format); 1810 } 1811 1812 _dif_sgl_advance(md_sgl, ctx->md_size); 1813 1814 return _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1815 } 1816 1817 static int 1818 dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1819 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1820 struct spdk_dif_error *err_blk) 1821 { 1822 uint32_t offset_blocks; 1823 int rc; 1824 1825 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1826 rc = _dix_verify_split(data_sgl, md_sgl, offset_blocks, ctx, err_blk); 1827 if (rc != 0) { 1828 return rc; 1829 } 1830 } 1831 1832 return 0; 1833 } 1834 1835 int 1836 spdk_dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1837 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1838 struct spdk_dif_error *err_blk) 1839 { 1840 struct _dif_sgl data_sgl, md_sgl; 1841 1842 if (md_iov->iov_base == NULL) { 1843 SPDK_ERRLOG("Metadata buffer is NULL.\n"); 1844 return -EINVAL; 1845 } 1846 1847 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1848 _dif_sgl_init(&md_sgl, md_iov, 1); 1849 1850 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1851 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1852 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1853 return -EINVAL; 1854 } 1855 1856 if (_dif_is_disabled(ctx->dif_type)) { 1857 return 0; 1858 } 1859 1860 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1861 return dix_verify(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1862 } else { 1863 return dix_verify_split(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1864 } 1865 } 1866 1867 int 1868 spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1869 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1870 uint32_t inject_flags, uint32_t *inject_offset) 1871 { 1872 struct _dif_sgl data_sgl, md_sgl; 1873 int rc; 1874 1875 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1876 _dif_sgl_init(&md_sgl, md_iov, 1); 1877 1878 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1879 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1880 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1881 return -EINVAL; 1882 } 1883 1884 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1885 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1886 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1887 _dif_reftag_size(ctx->dif_pi_format), 1888 inject_offset); 1889 if (rc != 0) { 1890 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1891 return rc; 1892 } 1893 } 1894 1895 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1896 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1897 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1898 _dif_apptag_size(), 1899 inject_offset); 1900 if (rc != 0) { 1901 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1902 return rc; 1903 } 1904 } 1905 1906 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1907 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1908 ctx->guard_interval, 1909 _dif_guard_size(ctx->dif_pi_format), 1910 inject_offset); 1911 if (rc != 0) { 1912 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1913 return rc; 1914 } 1915 } 1916 1917 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1918 /* Note: Error injection to data block is expected to be detected 1919 * as guard error. 1920 */ 1921 rc = dif_inject_error(&data_sgl, ctx->block_size, num_blocks, 1922 0, 1923 ctx->block_size, 1924 inject_offset); 1925 if (rc != 0) { 1926 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1927 return rc; 1928 } 1929 } 1930 1931 return 0; 1932 } 1933 1934 static uint32_t 1935 _to_next_boundary(uint32_t offset, uint32_t boundary) 1936 { 1937 return boundary - (offset % boundary); 1938 } 1939 1940 static uint32_t 1941 _to_size_with_md(uint32_t size, uint32_t data_block_size, uint32_t block_size) 1942 { 1943 return (size / data_block_size) * block_size + (size % data_block_size); 1944 } 1945 1946 int 1947 spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int iovcnt, 1948 struct iovec *buf_iovs, int buf_iovcnt, 1949 uint32_t data_offset, uint32_t data_len, 1950 uint32_t *_mapped_len, 1951 const struct spdk_dif_ctx *ctx) 1952 { 1953 uint32_t data_block_size, data_unalign, buf_len, buf_offset, len; 1954 struct _dif_sgl dif_sgl; 1955 struct _dif_sgl buf_sgl; 1956 1957 if (iovs == NULL || iovcnt == 0 || buf_iovs == NULL || buf_iovcnt == 0) { 1958 return -EINVAL; 1959 } 1960 1961 data_block_size = ctx->block_size - ctx->md_size; 1962 1963 data_unalign = ctx->data_offset % data_block_size; 1964 1965 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1966 ctx->block_size); 1967 buf_len -= data_unalign; 1968 1969 _dif_sgl_init(&dif_sgl, iovs, iovcnt); 1970 _dif_sgl_init(&buf_sgl, buf_iovs, buf_iovcnt); 1971 1972 if (!_dif_sgl_is_valid(&buf_sgl, buf_len)) { 1973 SPDK_ERRLOG("Buffer overflow will occur.\n"); 1974 return -ERANGE; 1975 } 1976 1977 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1978 buf_offset -= data_unalign; 1979 1980 _dif_sgl_advance(&buf_sgl, buf_offset); 1981 1982 while (data_len != 0) { 1983 len = spdk_min(data_len, _to_next_boundary(ctx->data_offset + data_offset, data_block_size)); 1984 if (!_dif_sgl_append_split(&dif_sgl, &buf_sgl, len)) { 1985 break; 1986 } 1987 _dif_sgl_advance(&buf_sgl, ctx->md_size); 1988 data_offset += len; 1989 data_len -= len; 1990 } 1991 1992 if (_mapped_len != NULL) { 1993 *_mapped_len = dif_sgl.total_size; 1994 } 1995 1996 return iovcnt - dif_sgl.iovcnt; 1997 } 1998 1999 static int 2000 _dif_sgl_setup_stream(struct _dif_sgl *sgl, uint32_t *_buf_offset, uint32_t *_buf_len, 2001 uint32_t data_offset, uint32_t data_len, 2002 const struct spdk_dif_ctx *ctx) 2003 { 2004 uint32_t data_block_size, data_unalign, buf_len, buf_offset; 2005 2006 data_block_size = ctx->block_size - ctx->md_size; 2007 2008 data_unalign = ctx->data_offset % data_block_size; 2009 2010 /* If the last data block is complete, DIF of the data block is 2011 * inserted or verified in this turn. 2012 */ 2013 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 2014 ctx->block_size); 2015 buf_len -= data_unalign; 2016 2017 if (!_dif_sgl_is_valid(sgl, buf_len)) { 2018 return -ERANGE; 2019 } 2020 2021 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 2022 buf_offset -= data_unalign; 2023 2024 _dif_sgl_advance(sgl, buf_offset); 2025 buf_len -= buf_offset; 2026 2027 buf_offset += data_unalign; 2028 2029 *_buf_offset = buf_offset; 2030 *_buf_len = buf_len; 2031 2032 return 0; 2033 } 2034 2035 int 2036 spdk_dif_generate_stream(struct iovec *iovs, int iovcnt, 2037 uint32_t data_offset, uint32_t data_len, 2038 struct spdk_dif_ctx *ctx) 2039 { 2040 uint32_t buf_len = 0, buf_offset = 0; 2041 uint32_t len, offset_in_block, offset_blocks; 2042 uint64_t guard = 0; 2043 struct _dif_sgl sgl; 2044 int rc; 2045 2046 if (iovs == NULL || iovcnt == 0) { 2047 return -EINVAL; 2048 } 2049 2050 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2051 guard = ctx->last_guard; 2052 } 2053 2054 _dif_sgl_init(&sgl, iovs, iovcnt); 2055 2056 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2057 if (rc != 0) { 2058 return rc; 2059 } 2060 2061 while (buf_len != 0) { 2062 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2063 offset_in_block = buf_offset % ctx->block_size; 2064 offset_blocks = buf_offset / ctx->block_size; 2065 2066 guard = _dif_generate_split(&sgl, offset_in_block, len, guard, offset_blocks, ctx); 2067 2068 buf_len -= len; 2069 buf_offset += len; 2070 } 2071 2072 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2073 ctx->last_guard = guard; 2074 } 2075 2076 return 0; 2077 } 2078 2079 int 2080 spdk_dif_verify_stream(struct iovec *iovs, int iovcnt, 2081 uint32_t data_offset, uint32_t data_len, 2082 struct spdk_dif_ctx *ctx, 2083 struct spdk_dif_error *err_blk) 2084 { 2085 uint32_t buf_len = 0, buf_offset = 0; 2086 uint32_t len, offset_in_block, offset_blocks; 2087 uint64_t guard = 0; 2088 struct _dif_sgl sgl; 2089 int rc = 0; 2090 2091 if (iovs == NULL || iovcnt == 0) { 2092 return -EINVAL; 2093 } 2094 2095 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2096 guard = ctx->last_guard; 2097 } 2098 2099 _dif_sgl_init(&sgl, iovs, iovcnt); 2100 2101 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2102 if (rc != 0) { 2103 return rc; 2104 } 2105 2106 while (buf_len != 0) { 2107 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2108 offset_in_block = buf_offset % ctx->block_size; 2109 offset_blocks = buf_offset / ctx->block_size; 2110 2111 rc = _dif_verify_split(&sgl, offset_in_block, len, &guard, offset_blocks, 2112 ctx, err_blk); 2113 if (rc != 0) { 2114 goto error; 2115 } 2116 2117 buf_len -= len; 2118 buf_offset += len; 2119 } 2120 2121 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 2122 ctx->last_guard = guard; 2123 } 2124 error: 2125 return rc; 2126 } 2127 2128 int 2129 spdk_dif_update_crc32c_stream(struct iovec *iovs, int iovcnt, 2130 uint32_t data_offset, uint32_t data_len, 2131 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 2132 { 2133 uint32_t buf_len = 0, buf_offset = 0, len, offset_in_block; 2134 uint32_t crc32c; 2135 struct _dif_sgl sgl; 2136 int rc; 2137 2138 if (iovs == NULL || iovcnt == 0) { 2139 return -EINVAL; 2140 } 2141 2142 crc32c = *_crc32c; 2143 _dif_sgl_init(&sgl, iovs, iovcnt); 2144 2145 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 2146 if (rc != 0) { 2147 return rc; 2148 } 2149 2150 while (buf_len != 0) { 2151 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 2152 offset_in_block = buf_offset % ctx->block_size; 2153 2154 crc32c = _dif_update_crc32c_split(&sgl, offset_in_block, len, crc32c, ctx); 2155 2156 buf_len -= len; 2157 buf_offset += len; 2158 } 2159 2160 *_crc32c = crc32c; 2161 2162 return 0; 2163 } 2164 2165 void 2166 spdk_dif_get_range_with_md(uint32_t data_offset, uint32_t data_len, 2167 uint32_t *_buf_offset, uint32_t *_buf_len, 2168 const struct spdk_dif_ctx *ctx) 2169 { 2170 uint32_t data_block_size, data_unalign, buf_offset, buf_len; 2171 2172 if (!ctx->md_interleave) { 2173 buf_offset = data_offset; 2174 buf_len = data_len; 2175 } else { 2176 data_block_size = ctx->block_size - ctx->md_size; 2177 2178 data_unalign = data_offset % data_block_size; 2179 2180 buf_offset = _to_size_with_md(data_offset, data_block_size, ctx->block_size); 2181 buf_len = _to_size_with_md(data_unalign + data_len, data_block_size, ctx->block_size) - 2182 data_unalign; 2183 } 2184 2185 if (_buf_offset != NULL) { 2186 *_buf_offset = buf_offset; 2187 } 2188 2189 if (_buf_len != NULL) { 2190 *_buf_len = buf_len; 2191 } 2192 } 2193 2194 uint32_t 2195 spdk_dif_get_length_with_md(uint32_t data_len, const struct spdk_dif_ctx *ctx) 2196 { 2197 uint32_t data_block_size; 2198 2199 if (!ctx->md_interleave) { 2200 return data_len; 2201 } else { 2202 data_block_size = ctx->block_size - ctx->md_size; 2203 2204 return _to_size_with_md(data_len, data_block_size, ctx->block_size); 2205 } 2206 } 2207 2208 static int 2209 _dif_remap_ref_tag(struct _dif_sgl *sgl, uint32_t offset_blocks, 2210 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2211 bool check_ref_tag) 2212 { 2213 uint32_t offset, buf_len; 2214 uint64_t expected = 0, remapped; 2215 uint8_t *buf; 2216 struct _dif_sgl tmp_sgl; 2217 struct spdk_dif dif; 2218 2219 /* Fast forward to DIF field. */ 2220 _dif_sgl_advance(sgl, ctx->guard_interval); 2221 _dif_sgl_copy(&tmp_sgl, sgl); 2222 2223 /* Copy the split DIF field to the temporary DIF buffer */ 2224 offset = 0; 2225 while (offset < _dif_size(ctx->dif_pi_format)) { 2226 _dif_sgl_get_buf(sgl, &buf, &buf_len); 2227 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2228 2229 memcpy((uint8_t *)&dif + offset, buf, buf_len); 2230 2231 _dif_sgl_advance(sgl, buf_len); 2232 offset += buf_len; 2233 } 2234 2235 if (_dif_ignore(&dif, ctx)) { 2236 goto end; 2237 } 2238 2239 /* For type 1 and 2, the Reference Tag is incremented for each 2240 * subsequent logical block. For type 3, the Reference Tag 2241 * remains the same as the initial Reference Tag. 2242 */ 2243 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2244 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2245 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2246 } else { 2247 remapped = ctx->remapped_init_ref_tag; 2248 } 2249 2250 /* Verify the stored Reference Tag. */ 2251 if (check_ref_tag && !_dif_reftag_check(&dif, ctx, expected, offset_blocks, err_blk)) { 2252 return -1; 2253 } 2254 2255 /* Update the stored Reference Tag to the remapped one. */ 2256 _dif_set_reftag(&dif, remapped, ctx->dif_pi_format); 2257 2258 offset = 0; 2259 while (offset < _dif_size(ctx->dif_pi_format)) { 2260 _dif_sgl_get_buf(&tmp_sgl, &buf, &buf_len); 2261 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2262 2263 memcpy(buf, (uint8_t *)&dif + offset, buf_len); 2264 2265 _dif_sgl_advance(&tmp_sgl, buf_len); 2266 offset += buf_len; 2267 } 2268 2269 end: 2270 _dif_sgl_advance(sgl, ctx->block_size - ctx->guard_interval - _dif_size(ctx->dif_pi_format)); 2271 2272 return 0; 2273 } 2274 2275 int 2276 spdk_dif_remap_ref_tag(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 2277 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2278 bool check_ref_tag) 2279 { 2280 struct _dif_sgl sgl; 2281 uint32_t offset_blocks; 2282 int rc; 2283 2284 _dif_sgl_init(&sgl, iovs, iovcnt); 2285 2286 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 2287 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 2288 return -EINVAL; 2289 } 2290 2291 if (_dif_is_disabled(ctx->dif_type)) { 2292 return 0; 2293 } 2294 2295 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2296 return 0; 2297 } 2298 2299 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2300 rc = _dif_remap_ref_tag(&sgl, offset_blocks, ctx, err_blk, check_ref_tag); 2301 if (rc != 0) { 2302 return rc; 2303 } 2304 } 2305 2306 return 0; 2307 } 2308 2309 static int 2310 _dix_remap_ref_tag(struct _dif_sgl *md_sgl, uint32_t offset_blocks, 2311 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk, 2312 bool check_ref_tag) 2313 { 2314 uint64_t expected = 0, remapped; 2315 uint8_t *md_buf; 2316 struct spdk_dif *dif; 2317 2318 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 2319 2320 dif = (struct spdk_dif *)(md_buf + ctx->guard_interval); 2321 2322 if (_dif_ignore(dif, ctx)) { 2323 goto end; 2324 } 2325 2326 /* For type 1 and 2, the Reference Tag is incremented for each 2327 * subsequent logical block. For type 3, the Reference Tag 2328 * remains the same as the initialReference Tag. 2329 */ 2330 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2331 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2332 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2333 } else { 2334 remapped = ctx->remapped_init_ref_tag; 2335 } 2336 2337 /* Verify the stored Reference Tag. */ 2338 if (check_ref_tag && !_dif_reftag_check(dif, ctx, expected, offset_blocks, err_blk)) { 2339 return -1; 2340 } 2341 2342 /* Update the stored Reference Tag to the remapped one. */ 2343 _dif_set_reftag(dif, remapped, ctx->dif_pi_format); 2344 2345 end: 2346 _dif_sgl_advance(md_sgl, ctx->md_size); 2347 2348 return 0; 2349 } 2350 2351 int 2352 spdk_dix_remap_ref_tag(struct iovec *md_iov, uint32_t num_blocks, 2353 const struct spdk_dif_ctx *ctx, 2354 struct spdk_dif_error *err_blk, 2355 bool check_ref_tag) 2356 { 2357 struct _dif_sgl md_sgl; 2358 uint32_t offset_blocks; 2359 int rc; 2360 2361 _dif_sgl_init(&md_sgl, md_iov, 1); 2362 2363 if (!_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 2364 SPDK_ERRLOG("Size of metadata iovec array is not valid.\n"); 2365 return -EINVAL; 2366 } 2367 2368 if (_dif_is_disabled(ctx->dif_type)) { 2369 return 0; 2370 } 2371 2372 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2373 return 0; 2374 } 2375 2376 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2377 rc = _dix_remap_ref_tag(&md_sgl, offset_blocks, ctx, err_blk, check_ref_tag); 2378 if (rc != 0) { 2379 return rc; 2380 } 2381 } 2382 2383 return 0; 2384 } 2385