1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/dif.h" 7 #include "spdk/crc16.h" 8 #include "spdk/crc32.h" 9 #include "spdk/crc64.h" 10 #include "spdk/endian.h" 11 #include "spdk/log.h" 12 #include "spdk/util.h" 13 14 #define REFTAG_MASK_16 0x00000000FFFFFFFF 15 #define REFTAG_MASK_32 0xFFFFFFFFFFFFFFFF 16 #define REFTAG_MASK_64 0x0000FFFFFFFFFFFF 17 18 /* The variable size Storage Tag and Reference Tag is not supported yet, 19 * so the maximum size of the Reference Tag is assumed. 20 */ 21 struct spdk_dif { 22 union { 23 struct { 24 uint16_t guard; 25 uint16_t app_tag; 26 uint32_t stor_ref_space; 27 } g16; 28 struct { 29 uint32_t guard; 30 uint16_t app_tag; 31 uint16_t stor_ref_space_p1; 32 uint64_t stor_ref_space_p2; 33 } g32; 34 struct { 35 uint64_t guard; 36 uint16_t app_tag; 37 uint16_t stor_ref_space_p1; 38 uint32_t stor_ref_space_p2; 39 } g64; 40 }; 41 }; 42 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g16) == 8, "Incorrect size"); 43 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g32) == 16, "Incorrect size"); 44 SPDK_STATIC_ASSERT(SPDK_SIZEOF_MEMBER(struct spdk_dif, g64) == 16, "Incorrect size"); 45 46 /* Context to iterate or create a iovec array. 47 * Each sgl is either iterated or created at a time. 48 */ 49 struct _dif_sgl { 50 /* Current iovec in the iteration or creation */ 51 struct iovec *iov; 52 53 /* Remaining count of iovecs in the iteration or creation. */ 54 int iovcnt; 55 56 /* Current offset in the iovec */ 57 uint32_t iov_offset; 58 59 /* Size of the created iovec array in bytes */ 60 uint32_t total_size; 61 }; 62 63 static inline void 64 _dif_sgl_init(struct _dif_sgl *s, struct iovec *iovs, int iovcnt) 65 { 66 s->iov = iovs; 67 s->iovcnt = iovcnt; 68 s->iov_offset = 0; 69 s->total_size = 0; 70 } 71 72 static void 73 _dif_sgl_advance(struct _dif_sgl *s, uint32_t step) 74 { 75 s->iov_offset += step; 76 while (s->iovcnt != 0) { 77 if (s->iov_offset < s->iov->iov_len) { 78 break; 79 } 80 81 s->iov_offset -= s->iov->iov_len; 82 s->iov++; 83 s->iovcnt--; 84 } 85 } 86 87 static inline void 88 _dif_sgl_get_buf(struct _dif_sgl *s, uint8_t **_buf, uint32_t *_buf_len) 89 { 90 if (_buf != NULL) { 91 *_buf = (uint8_t *)s->iov->iov_base + s->iov_offset; 92 } 93 if (_buf_len != NULL) { 94 *_buf_len = s->iov->iov_len - s->iov_offset; 95 } 96 } 97 98 static inline bool 99 _dif_sgl_append(struct _dif_sgl *s, uint8_t *data, uint32_t data_len) 100 { 101 assert(s->iovcnt > 0); 102 s->iov->iov_base = data; 103 s->iov->iov_len = data_len; 104 s->total_size += data_len; 105 s->iov++; 106 s->iovcnt--; 107 108 if (s->iovcnt > 0) { 109 return true; 110 } else { 111 return false; 112 } 113 } 114 115 static inline bool 116 _dif_sgl_append_split(struct _dif_sgl *dst, struct _dif_sgl *src, uint32_t data_len) 117 { 118 uint8_t *buf; 119 uint32_t buf_len; 120 121 while (data_len != 0) { 122 _dif_sgl_get_buf(src, &buf, &buf_len); 123 buf_len = spdk_min(buf_len, data_len); 124 125 if (!_dif_sgl_append(dst, buf, buf_len)) { 126 return false; 127 } 128 129 _dif_sgl_advance(src, buf_len); 130 data_len -= buf_len; 131 } 132 133 return true; 134 } 135 136 /* This function must be used before starting iteration. */ 137 static bool 138 _dif_sgl_is_bytes_multiple(struct _dif_sgl *s, uint32_t bytes) 139 { 140 int i; 141 142 for (i = 0; i < s->iovcnt; i++) { 143 if (s->iov[i].iov_len % bytes) { 144 return false; 145 } 146 } 147 148 return true; 149 } 150 151 static bool 152 _dif_sgl_is_valid_block_aligned(struct _dif_sgl *s, uint32_t num_blocks, uint32_t block_size) 153 { 154 uint32_t count = 0; 155 int i; 156 157 for (i = 0; i < s->iovcnt; i++) { 158 if (s->iov[i].iov_len % block_size) { 159 return false; 160 } 161 count += s->iov[i].iov_len / block_size; 162 } 163 164 return count >= num_blocks; 165 } 166 167 /* This function must be used before starting iteration. */ 168 static bool 169 _dif_sgl_is_valid(struct _dif_sgl *s, uint32_t bytes) 170 { 171 uint64_t total = 0; 172 int i; 173 174 for (i = 0; i < s->iovcnt; i++) { 175 total += s->iov[i].iov_len; 176 } 177 178 return total >= bytes; 179 } 180 181 static void 182 _dif_sgl_copy(struct _dif_sgl *to, struct _dif_sgl *from) 183 { 184 memcpy(to, from, sizeof(struct _dif_sgl)); 185 } 186 187 static bool 188 _dif_is_disabled(enum spdk_dif_type dif_type) 189 { 190 if (dif_type == SPDK_DIF_DISABLE) { 191 return true; 192 } else { 193 return false; 194 } 195 } 196 197 static inline size_t 198 _dif_size(enum spdk_dif_pi_format dif_pi_format) 199 { 200 uint8_t size; 201 202 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 203 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16); 204 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 205 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32); 206 } else { 207 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64); 208 } 209 210 return size; 211 } 212 213 static uint32_t 214 _get_guard_interval(uint32_t block_size, uint32_t md_size, bool dif_loc, bool md_interleave, 215 size_t dif_size) 216 { 217 if (!dif_loc) { 218 /* For metadata formats with more than 8/16 bytes (depending on 219 * the PI format), if the DIF is contained in the last 8/16 bytes 220 * of metadata, then the CRC covers all metadata up to but excluding 221 * these last 8/16 bytes. 222 */ 223 if (md_interleave) { 224 return block_size - dif_size; 225 } else { 226 return md_size - dif_size; 227 } 228 } else { 229 /* For metadata formats with more than 8/16 bytes (depending on 230 * the PI format), if the DIF is contained in the first 8/16 bytes 231 * of metadata, then the CRC does not cover any metadata. 232 */ 233 if (md_interleave) { 234 return block_size - md_size; 235 } else { 236 return 0; 237 } 238 } 239 } 240 241 static inline uint8_t 242 _dif_guard_size(enum spdk_dif_pi_format dif_pi_format) 243 { 244 uint8_t size; 245 246 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 247 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.guard); 248 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 249 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.guard); 250 } else { 251 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.guard); 252 } 253 254 return size; 255 } 256 257 static inline void 258 _dif_set_guard(struct spdk_dif *dif, uint64_t guard, enum spdk_dif_pi_format dif_pi_format) 259 { 260 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 261 to_be16(&(dif->g16.guard), (uint16_t)guard); 262 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 263 to_be32(&(dif->g32.guard), (uint32_t)guard); 264 } else { 265 to_be64(&(dif->g64.guard), guard); 266 } 267 } 268 269 static inline uint64_t 270 _dif_get_guard(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 271 { 272 uint64_t guard; 273 274 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 275 guard = (uint64_t)from_be16(&(dif->g16.guard)); 276 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 277 guard = (uint64_t)from_be32(&(dif->g32.guard)); 278 } else { 279 guard = from_be64(&(dif->g64.guard)); 280 } 281 282 return guard; 283 } 284 285 static inline uint64_t 286 _dif_generate_guard(uint64_t guard_seed, void *buf, size_t buf_len, 287 enum spdk_dif_pi_format dif_pi_format) 288 { 289 uint64_t guard; 290 291 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 292 guard = (uint64_t)spdk_crc16_t10dif((uint16_t)guard_seed, buf, buf_len); 293 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 294 guard = (uint64_t)spdk_crc32c_nvme(buf, buf_len, guard_seed); 295 } else { 296 guard = spdk_crc64_nvme(buf, buf_len, guard_seed); 297 } 298 299 return guard; 300 } 301 302 static inline uint64_t 303 _dif_generate_guard_copy(uint64_t guard_seed, void *dst, void *src, size_t buf_len, 304 enum spdk_dif_pi_format dif_pi_format) 305 { 306 uint64_t guard; 307 308 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 309 guard = (uint64_t)spdk_crc16_t10dif_copy((uint16_t)guard_seed, dst, src, buf_len); 310 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 311 memcpy(dst, src, buf_len); 312 guard = (uint64_t)spdk_crc32c_nvme(src, buf_len, guard_seed); 313 } else { 314 memcpy(dst, src, buf_len); 315 guard = spdk_crc64_nvme(src, buf_len, guard_seed); 316 } 317 318 return guard; 319 } 320 321 static inline uint8_t 322 _dif_apptag_offset(enum spdk_dif_pi_format dif_pi_format) 323 { 324 return _dif_guard_size(dif_pi_format); 325 } 326 327 static inline uint8_t 328 _dif_apptag_size(void) 329 { 330 return SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.app_tag); 331 } 332 333 static inline void 334 _dif_set_apptag(struct spdk_dif *dif, uint16_t app_tag, enum spdk_dif_pi_format dif_pi_format) 335 { 336 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 337 to_be16(&(dif->g16.app_tag), app_tag); 338 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 339 to_be16(&(dif->g32.app_tag), app_tag); 340 } else { 341 to_be16(&(dif->g64.app_tag), app_tag); 342 } 343 } 344 345 static inline uint16_t 346 _dif_get_apptag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 347 { 348 uint16_t app_tag; 349 350 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 351 app_tag = from_be16(&(dif->g16.app_tag)); 352 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 353 app_tag = from_be16(&(dif->g32.app_tag)); 354 } else { 355 app_tag = from_be16(&(dif->g64.app_tag)); 356 } 357 358 return app_tag; 359 } 360 361 static inline bool 362 _dif_apptag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 363 { 364 return _dif_get_apptag(dif, dif_pi_format) == SPDK_DIF_APPTAG_IGNORE; 365 } 366 367 static inline uint8_t 368 _dif_reftag_offset(enum spdk_dif_pi_format dif_pi_format) 369 { 370 uint8_t offset; 371 372 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 373 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 374 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 375 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size() 376 + SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p1); 377 } else { 378 offset = _dif_apptag_offset(dif_pi_format) + _dif_apptag_size(); 379 } 380 381 return offset; 382 } 383 384 static inline uint8_t 385 _dif_reftag_size(enum spdk_dif_pi_format dif_pi_format) 386 { 387 uint8_t size; 388 389 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 390 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g16.stor_ref_space); 391 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 392 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g32.stor_ref_space_p2); 393 } else { 394 size = SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p1) + 395 SPDK_SIZEOF_MEMBER(struct spdk_dif, g64.stor_ref_space_p2); 396 } 397 398 return size; 399 } 400 401 static inline void 402 _dif_set_reftag(struct spdk_dif *dif, uint64_t ref_tag, enum spdk_dif_pi_format dif_pi_format) 403 { 404 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 405 to_be32(&(dif->g16.stor_ref_space), (uint32_t)ref_tag); 406 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 407 to_be64(&(dif->g32.stor_ref_space_p2), ref_tag); 408 } else { 409 to_be16(&(dif->g64.stor_ref_space_p1), (uint16_t)(ref_tag >> 32)); 410 to_be32(&(dif->g64.stor_ref_space_p2), (uint32_t)ref_tag); 411 } 412 } 413 414 static inline uint64_t 415 _dif_get_reftag(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 416 { 417 uint64_t ref_tag; 418 419 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 420 ref_tag = (uint64_t)from_be32(&(dif->g16.stor_ref_space)); 421 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 422 ref_tag = from_be64(&(dif->g32.stor_ref_space_p2)); 423 } else { 424 ref_tag = (uint64_t)from_be16(&(dif->g64.stor_ref_space_p1)); 425 ref_tag <<= 32; 426 ref_tag |= (uint64_t)from_be32(&(dif->g64.stor_ref_space_p2)); 427 } 428 429 return ref_tag; 430 } 431 432 static inline bool 433 _dif_reftag_match(struct spdk_dif *dif, uint64_t ref_tag, 434 enum spdk_dif_pi_format dif_pi_format) 435 { 436 uint64_t _ref_tag; 437 bool match; 438 439 _ref_tag = _dif_get_reftag(dif, dif_pi_format); 440 441 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 442 match = (_ref_tag == (ref_tag & REFTAG_MASK_16)); 443 } else if (dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 444 match = (_ref_tag == ref_tag); 445 } else { 446 match = (_ref_tag == (ref_tag & REFTAG_MASK_64)); 447 } 448 449 return match; 450 } 451 452 static inline bool 453 _dif_reftag_ignore(struct spdk_dif *dif, enum spdk_dif_pi_format dif_pi_format) 454 { 455 return _dif_reftag_match(dif, REFTAG_MASK_32, dif_pi_format); 456 } 457 458 int 459 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size, 460 bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags, 461 uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag, 462 uint32_t data_offset, uint64_t guard_seed, struct spdk_dif_ctx_init_ext_opts *opts) 463 { 464 uint32_t data_block_size; 465 enum spdk_dif_pi_format dif_pi_format = SPDK_DIF_PI_FORMAT_16; 466 467 if (opts != NULL) { 468 if (opts->dif_pi_format != SPDK_DIF_PI_FORMAT_16 && 469 opts->dif_pi_format != SPDK_DIF_PI_FORMAT_32 && 470 opts->dif_pi_format != SPDK_DIF_PI_FORMAT_64) { 471 SPDK_ERRLOG("No valid DIF PI format provided.\n"); 472 return -EINVAL; 473 } 474 475 dif_pi_format = opts->dif_pi_format; 476 } 477 478 if (md_size < _dif_size(dif_pi_format)) { 479 SPDK_ERRLOG("Metadata size is smaller than DIF size.\n"); 480 return -EINVAL; 481 } 482 483 if (md_interleave) { 484 if (block_size < md_size) { 485 SPDK_ERRLOG("Block size is smaller than DIF size.\n"); 486 return -EINVAL; 487 } 488 data_block_size = block_size - md_size; 489 } else { 490 if (dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 491 if (block_size == 0 || (block_size % 512) != 0) { 492 SPDK_ERRLOG("Zero block size is not allowed and should be a multiple of 512B\n"); 493 return -EINVAL; 494 } 495 } else { 496 if (block_size == 0 || (block_size % 4096) != 0) { 497 SPDK_ERRLOG("Zero block size is not allowed and should be a multiple of 4kB\n"); 498 return -EINVAL; 499 } 500 } 501 502 data_block_size = block_size; 503 } 504 505 ctx->block_size = block_size; 506 ctx->md_size = md_size; 507 ctx->md_interleave = md_interleave; 508 ctx->dif_pi_format = dif_pi_format; 509 ctx->guard_interval = _get_guard_interval(block_size, md_size, dif_loc, md_interleave, 510 _dif_size(ctx->dif_pi_format)); 511 ctx->dif_type = dif_type; 512 ctx->dif_flags = dif_flags; 513 ctx->init_ref_tag = init_ref_tag; 514 ctx->apptag_mask = apptag_mask; 515 ctx->app_tag = app_tag; 516 ctx->data_offset = data_offset; 517 ctx->ref_tag_offset = data_offset / data_block_size; 518 ctx->last_guard = guard_seed; 519 ctx->guard_seed = guard_seed; 520 ctx->remapped_init_ref_tag = 0; 521 522 return 0; 523 } 524 525 void 526 spdk_dif_ctx_set_data_offset(struct spdk_dif_ctx *ctx, uint32_t data_offset) 527 { 528 uint32_t data_block_size; 529 530 if (ctx->md_interleave) { 531 data_block_size = ctx->block_size - ctx->md_size; 532 } else { 533 data_block_size = ctx->block_size; 534 } 535 536 ctx->data_offset = data_offset; 537 ctx->ref_tag_offset = data_offset / data_block_size; 538 } 539 540 void 541 spdk_dif_ctx_set_remapped_init_ref_tag(struct spdk_dif_ctx *ctx, 542 uint32_t remapped_init_ref_tag) 543 { 544 ctx->remapped_init_ref_tag = remapped_init_ref_tag; 545 } 546 547 static void 548 _dif_generate(void *_dif, uint64_t guard, uint32_t offset_blocks, 549 const struct spdk_dif_ctx *ctx) 550 { 551 struct spdk_dif *dif = _dif; 552 uint64_t ref_tag; 553 554 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 555 _dif_set_guard(dif, guard, ctx->dif_pi_format); 556 } 557 558 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 559 _dif_set_apptag(dif, ctx->app_tag, ctx->dif_pi_format); 560 } 561 562 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 563 /* For type 1 and 2, the reference tag is incremented for each 564 * subsequent logical block. For type 3, the reference tag 565 * remains the same as the initial reference tag. 566 */ 567 if (ctx->dif_type != SPDK_DIF_TYPE3) { 568 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 569 } else { 570 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 571 } 572 573 /* Overwrite reference tag if initialization reference tag is SPDK_DIF_REFTAG_IGNORE */ 574 if (ctx->init_ref_tag == SPDK_DIF_REFTAG_IGNORE) { 575 if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_16) { 576 ref_tag = REFTAG_MASK_16; 577 } else if (ctx->dif_pi_format == SPDK_DIF_PI_FORMAT_32) { 578 ref_tag = REFTAG_MASK_32; 579 } else { 580 ref_tag = REFTAG_MASK_64; 581 } 582 } 583 584 _dif_set_reftag(dif, ref_tag, ctx->dif_pi_format); 585 } 586 } 587 588 static void 589 dif_generate(struct _dif_sgl *sgl, uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 590 { 591 uint32_t offset_blocks = 0; 592 uint8_t *buf; 593 uint64_t guard = 0; 594 595 while (offset_blocks < num_blocks) { 596 _dif_sgl_get_buf(sgl, &buf, NULL); 597 598 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 599 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 600 } 601 602 _dif_generate(buf + ctx->guard_interval, guard, offset_blocks, ctx); 603 604 _dif_sgl_advance(sgl, ctx->block_size); 605 offset_blocks++; 606 } 607 } 608 609 static uint64_t 610 _dif_generate_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 611 uint64_t guard, uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 612 { 613 uint32_t offset_in_dif, buf_len; 614 uint8_t *buf; 615 struct spdk_dif dif = {}; 616 617 assert(offset_in_block < ctx->guard_interval); 618 assert(offset_in_block + data_len < ctx->guard_interval || 619 offset_in_block + data_len == ctx->block_size); 620 621 /* Compute CRC over split logical block data. */ 622 while (data_len != 0 && offset_in_block < ctx->guard_interval) { 623 _dif_sgl_get_buf(sgl, &buf, &buf_len); 624 buf_len = spdk_min(buf_len, data_len); 625 buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block); 626 627 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 628 guard = _dif_generate_guard(guard, buf, buf_len, ctx->dif_pi_format); 629 } 630 631 _dif_sgl_advance(sgl, buf_len); 632 offset_in_block += buf_len; 633 data_len -= buf_len; 634 } 635 636 if (offset_in_block < ctx->guard_interval) { 637 return guard; 638 } 639 640 /* If a whole logical block data is parsed, generate DIF 641 * and save it to the temporary DIF area. 642 */ 643 _dif_generate(&dif, guard, offset_blocks, ctx); 644 645 /* Copy generated DIF field to the split DIF field, and then 646 * skip metadata field after DIF field (if any). 647 */ 648 while (offset_in_block < ctx->block_size) { 649 _dif_sgl_get_buf(sgl, &buf, &buf_len); 650 651 if (offset_in_block < ctx->guard_interval + _dif_size(ctx->dif_pi_format)) { 652 offset_in_dif = offset_in_block - ctx->guard_interval; 653 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset_in_dif); 654 655 memcpy(buf, ((uint8_t *)&dif) + offset_in_dif, buf_len); 656 } else { 657 buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block); 658 } 659 660 _dif_sgl_advance(sgl, buf_len); 661 offset_in_block += buf_len; 662 } 663 664 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 665 guard = ctx->guard_seed; 666 } 667 668 return guard; 669 } 670 671 static void 672 dif_generate_split(struct _dif_sgl *sgl, uint32_t num_blocks, 673 const struct spdk_dif_ctx *ctx) 674 { 675 uint32_t offset_blocks; 676 uint64_t guard = 0; 677 678 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 679 guard = ctx->guard_seed; 680 } 681 682 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 683 _dif_generate_split(sgl, 0, ctx->block_size, guard, offset_blocks, ctx); 684 } 685 } 686 687 int 688 spdk_dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 689 const struct spdk_dif_ctx *ctx) 690 { 691 struct _dif_sgl sgl; 692 693 _dif_sgl_init(&sgl, iovs, iovcnt); 694 695 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 696 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 697 return -EINVAL; 698 } 699 700 if (_dif_is_disabled(ctx->dif_type)) { 701 return 0; 702 } 703 704 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 705 dif_generate(&sgl, num_blocks, ctx); 706 } else { 707 dif_generate_split(&sgl, num_blocks, ctx); 708 } 709 710 return 0; 711 } 712 713 static void 714 _dif_error_set(struct spdk_dif_error *err_blk, uint8_t err_type, 715 uint64_t expected, uint64_t actual, uint32_t err_offset) 716 { 717 if (err_blk) { 718 err_blk->err_type = err_type; 719 err_blk->expected = expected; 720 err_blk->actual = actual; 721 err_blk->err_offset = err_offset; 722 } 723 } 724 725 static int 726 _dif_verify(void *_dif, uint64_t guard, uint32_t offset_blocks, 727 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 728 { 729 struct spdk_dif *dif = _dif; 730 uint64_t _guard; 731 uint16_t _app_tag; 732 uint64_t ref_tag, _ref_tag; 733 734 switch (ctx->dif_type) { 735 case SPDK_DIF_TYPE1: 736 case SPDK_DIF_TYPE2: 737 /* If Type 1 or 2 is used, then all DIF checks are disabled when 738 * the Application Tag is 0xFFFF. 739 */ 740 if (_dif_apptag_ignore(dif, ctx->dif_pi_format)) { 741 return 0; 742 } 743 break; 744 case SPDK_DIF_TYPE3: 745 /* If Type 3 is used, then all DIF checks are disabled when the 746 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF 747 * or 0xFFFFFFFFFFFFFFFF depending on the PI format. 748 */ 749 750 if (_dif_apptag_ignore(dif, ctx->dif_pi_format) && 751 _dif_reftag_ignore(dif, ctx->dif_pi_format)) { 752 return 0; 753 } 754 break; 755 default: 756 break; 757 } 758 759 /* For type 1 and 2, the reference tag is incremented for each 760 * subsequent logical block. For type 3, the reference tag 761 * remains the same as the initial reference tag. 762 */ 763 if (ctx->dif_type != SPDK_DIF_TYPE3) { 764 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 765 } else { 766 ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset; 767 } 768 769 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 770 /* Compare the DIF Guard field to the CRC computed over the logical 771 * block data. 772 */ 773 _guard = _dif_get_guard(dif, ctx->dif_pi_format); 774 if (_guard != guard) { 775 _dif_error_set(err_blk, SPDK_DIF_GUARD_ERROR, _guard, guard, 776 offset_blocks); 777 SPDK_ERRLOG("Failed to compare Guard: LBA=%" PRIu64 "," \ 778 " Expected=%lx, Actual=%lx\n", 779 ref_tag, _guard, guard); 780 return -1; 781 } 782 } 783 784 if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) { 785 /* Compare unmasked bits in the DIF Application Tag field to the 786 * passed Application Tag. 787 */ 788 _app_tag = _dif_get_apptag(dif, ctx->dif_pi_format); 789 if ((_app_tag & ctx->apptag_mask) != (ctx->app_tag & ctx->apptag_mask)) { 790 _dif_error_set(err_blk, SPDK_DIF_APPTAG_ERROR, ctx->app_tag, 791 (_app_tag & ctx->apptag_mask), offset_blocks); 792 SPDK_ERRLOG("Failed to compare App Tag: LBA=%" PRIu64 "," \ 793 " Expected=%x, Actual=%x\n", 794 ref_tag, ctx->app_tag, (_app_tag & ctx->apptag_mask)); 795 return -1; 796 } 797 } 798 799 if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) { 800 switch (ctx->dif_type) { 801 case SPDK_DIF_TYPE1: 802 case SPDK_DIF_TYPE2: 803 /* Compare the DIF Reference Tag field to the passed Reference Tag. 804 * The passed Reference Tag will be the least significant 4 bytes 805 * or 8 bytes (depending on the PI format) 806 * of the LBA when Type 1 is used, and application specific value 807 * if Type 2 is used. 808 */ 809 if (!_dif_reftag_match(dif, ref_tag, ctx->dif_pi_format)) { 810 _ref_tag = _dif_get_reftag(dif, ctx->dif_pi_format); 811 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, ref_tag, 812 _ref_tag, offset_blocks); 813 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu64 "," \ 814 " Expected=%lx, Actual=%lx\n", 815 ref_tag, ref_tag, _ref_tag); 816 return -1; 817 } 818 break; 819 case SPDK_DIF_TYPE3: 820 /* For Type 3, computed Reference Tag remains unchanged. 821 * Hence ignore the Reference Tag field. 822 */ 823 break; 824 default: 825 break; 826 } 827 } 828 829 return 0; 830 } 831 832 static int 833 dif_verify(struct _dif_sgl *sgl, uint32_t num_blocks, 834 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 835 { 836 uint32_t offset_blocks = 0; 837 int rc; 838 uint8_t *buf; 839 uint64_t guard = 0; 840 841 while (offset_blocks < num_blocks) { 842 _dif_sgl_get_buf(sgl, &buf, NULL); 843 844 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 845 guard = _dif_generate_guard(ctx->guard_seed, buf, ctx->guard_interval, ctx->dif_pi_format); 846 } 847 848 rc = _dif_verify(buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 849 if (rc != 0) { 850 return rc; 851 } 852 853 _dif_sgl_advance(sgl, ctx->block_size); 854 offset_blocks++; 855 } 856 857 return 0; 858 } 859 860 static int 861 _dif_verify_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 862 uint64_t *_guard, uint32_t offset_blocks, 863 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 864 { 865 uint32_t offset_in_dif, buf_len; 866 uint8_t *buf; 867 uint64_t guard; 868 struct spdk_dif dif = {}; 869 int rc; 870 871 assert(_guard != NULL); 872 assert(offset_in_block < ctx->guard_interval); 873 assert(offset_in_block + data_len < ctx->guard_interval || 874 offset_in_block + data_len == ctx->block_size); 875 876 guard = *_guard; 877 878 /* Compute CRC over split logical block data. */ 879 while (data_len != 0 && offset_in_block < ctx->guard_interval) { 880 _dif_sgl_get_buf(sgl, &buf, &buf_len); 881 buf_len = spdk_min(buf_len, data_len); 882 buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block); 883 884 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 885 guard = _dif_generate_guard(guard, buf, buf_len, ctx->dif_pi_format); 886 } 887 888 _dif_sgl_advance(sgl, buf_len); 889 offset_in_block += buf_len; 890 data_len -= buf_len; 891 } 892 893 if (offset_in_block < ctx->guard_interval) { 894 *_guard = guard; 895 return 0; 896 } 897 898 /* Copy the split DIF field to the temporary DIF buffer, and then 899 * skip metadata field after DIF field (if any). */ 900 while (offset_in_block < ctx->block_size) { 901 _dif_sgl_get_buf(sgl, &buf, &buf_len); 902 903 if (offset_in_block < ctx->guard_interval + _dif_size(ctx->dif_pi_format)) { 904 offset_in_dif = offset_in_block - ctx->guard_interval; 905 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset_in_dif); 906 907 memcpy((uint8_t *)&dif + offset_in_dif, buf, buf_len); 908 } else { 909 buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block); 910 } 911 _dif_sgl_advance(sgl, buf_len); 912 offset_in_block += buf_len; 913 } 914 915 rc = _dif_verify(&dif, guard, offset_blocks, ctx, err_blk); 916 if (rc != 0) { 917 return rc; 918 } 919 920 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 921 guard = ctx->guard_seed; 922 } 923 924 *_guard = guard; 925 return 0; 926 } 927 928 static int 929 dif_verify_split(struct _dif_sgl *sgl, uint32_t num_blocks, 930 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 931 { 932 uint32_t offset_blocks; 933 uint64_t guard = 0; 934 int rc; 935 936 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 937 guard = ctx->guard_seed; 938 } 939 940 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 941 rc = _dif_verify_split(sgl, 0, ctx->block_size, &guard, offset_blocks, 942 ctx, err_blk); 943 if (rc != 0) { 944 return rc; 945 } 946 } 947 948 return 0; 949 } 950 951 int 952 spdk_dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 953 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 954 { 955 struct _dif_sgl sgl; 956 957 _dif_sgl_init(&sgl, iovs, iovcnt); 958 959 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 960 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 961 return -EINVAL; 962 } 963 964 if (_dif_is_disabled(ctx->dif_type)) { 965 return 0; 966 } 967 968 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 969 return dif_verify(&sgl, num_blocks, ctx, err_blk); 970 } else { 971 return dif_verify_split(&sgl, num_blocks, ctx, err_blk); 972 } 973 } 974 975 static uint32_t 976 dif_update_crc32c(struct _dif_sgl *sgl, uint32_t num_blocks, 977 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 978 { 979 uint32_t offset_blocks; 980 uint8_t *buf; 981 982 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 983 _dif_sgl_get_buf(sgl, &buf, NULL); 984 985 crc32c = spdk_crc32c_update(buf, ctx->block_size - ctx->md_size, crc32c); 986 987 _dif_sgl_advance(sgl, ctx->block_size); 988 } 989 990 return crc32c; 991 } 992 993 static uint32_t 994 _dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len, 995 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 996 { 997 uint32_t data_block_size, buf_len; 998 uint8_t *buf; 999 1000 data_block_size = ctx->block_size - ctx->md_size; 1001 1002 assert(offset_in_block + data_len <= ctx->block_size); 1003 1004 while (data_len != 0) { 1005 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1006 buf_len = spdk_min(buf_len, data_len); 1007 1008 if (offset_in_block < data_block_size) { 1009 buf_len = spdk_min(buf_len, data_block_size - offset_in_block); 1010 crc32c = spdk_crc32c_update(buf, buf_len, crc32c); 1011 } 1012 1013 _dif_sgl_advance(sgl, buf_len); 1014 offset_in_block += buf_len; 1015 data_len -= buf_len; 1016 } 1017 1018 return crc32c; 1019 } 1020 1021 static uint32_t 1022 dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t num_blocks, 1023 uint32_t crc32c, const struct spdk_dif_ctx *ctx) 1024 { 1025 uint32_t offset_blocks; 1026 1027 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1028 crc32c = _dif_update_crc32c_split(sgl, 0, ctx->block_size, crc32c, ctx); 1029 } 1030 1031 return crc32c; 1032 } 1033 1034 int 1035 spdk_dif_update_crc32c(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1036 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 1037 { 1038 struct _dif_sgl sgl; 1039 1040 if (_crc32c == NULL) { 1041 return -EINVAL; 1042 } 1043 1044 _dif_sgl_init(&sgl, iovs, iovcnt); 1045 1046 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1047 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1048 return -EINVAL; 1049 } 1050 1051 if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) { 1052 *_crc32c = dif_update_crc32c(&sgl, num_blocks, *_crc32c, ctx); 1053 } else { 1054 *_crc32c = dif_update_crc32c_split(&sgl, num_blocks, *_crc32c, ctx); 1055 } 1056 1057 return 0; 1058 } 1059 1060 static void 1061 dif_generate_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1062 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1063 { 1064 uint32_t offset_blocks = 0, data_block_size; 1065 uint8_t *src, *dst; 1066 uint64_t guard; 1067 1068 data_block_size = ctx->block_size - ctx->md_size; 1069 1070 while (offset_blocks < num_blocks) { 1071 _dif_sgl_get_buf(src_sgl, &src, NULL); 1072 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1073 1074 guard = 0; 1075 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1076 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1077 ctx->dif_pi_format); 1078 guard = _dif_generate_guard(guard, dst + data_block_size, 1079 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1080 } else { 1081 memcpy(dst, src, data_block_size); 1082 } 1083 1084 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 1085 1086 _dif_sgl_advance(src_sgl, data_block_size); 1087 _dif_sgl_advance(dst_sgl, ctx->block_size); 1088 offset_blocks++; 1089 } 1090 } 1091 1092 static void 1093 _dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1094 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1095 { 1096 uint32_t offset_in_block, src_len, data_block_size; 1097 uint8_t *src, *dst; 1098 uint64_t guard = 0; 1099 1100 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1101 1102 data_block_size = ctx->block_size - ctx->md_size; 1103 1104 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1105 guard = ctx->guard_seed; 1106 } 1107 offset_in_block = 0; 1108 1109 while (offset_in_block < data_block_size) { 1110 /* Compute CRC over split logical block data and copy 1111 * data to bounce buffer. 1112 */ 1113 _dif_sgl_get_buf(src_sgl, &src, &src_len); 1114 src_len = spdk_min(src_len, data_block_size - offset_in_block); 1115 1116 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1117 guard = _dif_generate_guard_copy(guard, dst + offset_in_block, 1118 src, src_len, ctx->dif_pi_format); 1119 } else { 1120 memcpy(dst + offset_in_block, src, src_len); 1121 } 1122 1123 _dif_sgl_advance(src_sgl, src_len); 1124 offset_in_block += src_len; 1125 } 1126 1127 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1128 guard = _dif_generate_guard(guard, dst + data_block_size, 1129 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1130 } 1131 1132 _dif_sgl_advance(dst_sgl, ctx->block_size); 1133 1134 _dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx); 1135 } 1136 1137 static void 1138 dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1139 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1140 { 1141 uint32_t offset_blocks; 1142 1143 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1144 _dif_generate_copy_split(src_sgl, dst_sgl, offset_blocks, ctx); 1145 } 1146 } 1147 1148 int 1149 spdk_dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1150 int bounce_iovcnt, uint32_t num_blocks, 1151 const struct spdk_dif_ctx *ctx) 1152 { 1153 struct _dif_sgl src_sgl, dst_sgl; 1154 uint32_t data_block_size; 1155 1156 _dif_sgl_init(&src_sgl, iovs, iovcnt); 1157 _dif_sgl_init(&dst_sgl, bounce_iovs, bounce_iovcnt); 1158 1159 data_block_size = ctx->block_size - ctx->md_size; 1160 1161 if (!_dif_sgl_is_valid(&src_sgl, data_block_size * num_blocks)) { 1162 SPDK_ERRLOG("Size of iovec arrays are not valid.\n"); 1163 return -EINVAL; 1164 } 1165 1166 if (!_dif_sgl_is_valid_block_aligned(&dst_sgl, num_blocks, ctx->block_size)) { 1167 SPDK_ERRLOG("Size of bounce_iovs arrays are not valid or misaligned with block_size.\n"); 1168 return -EINVAL; 1169 } 1170 1171 if (_dif_is_disabled(ctx->dif_type)) { 1172 return 0; 1173 } 1174 1175 if (_dif_sgl_is_bytes_multiple(&src_sgl, data_block_size)) { 1176 dif_generate_copy(&src_sgl, &dst_sgl, num_blocks, ctx); 1177 } else { 1178 dif_generate_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx); 1179 } 1180 1181 return 0; 1182 } 1183 1184 static int 1185 dif_verify_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1186 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1187 struct spdk_dif_error *err_blk) 1188 { 1189 uint32_t offset_blocks = 0, data_block_size; 1190 uint8_t *src, *dst; 1191 int rc; 1192 uint64_t guard; 1193 1194 data_block_size = ctx->block_size - ctx->md_size; 1195 1196 while (offset_blocks < num_blocks) { 1197 _dif_sgl_get_buf(src_sgl, &src, NULL); 1198 _dif_sgl_get_buf(dst_sgl, &dst, NULL); 1199 1200 guard = 0; 1201 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1202 guard = _dif_generate_guard_copy(ctx->guard_seed, dst, src, data_block_size, 1203 ctx->dif_pi_format); 1204 guard = _dif_generate_guard(guard, src + data_block_size, 1205 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1206 } else { 1207 memcpy(dst, src, data_block_size); 1208 } 1209 1210 rc = _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1211 if (rc != 0) { 1212 return rc; 1213 } 1214 1215 _dif_sgl_advance(src_sgl, ctx->block_size); 1216 _dif_sgl_advance(dst_sgl, data_block_size); 1217 offset_blocks++; 1218 } 1219 1220 return 0; 1221 } 1222 1223 static int 1224 _dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1225 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1226 struct spdk_dif_error *err_blk) 1227 { 1228 uint32_t offset_in_block, dst_len, data_block_size; 1229 uint8_t *src, *dst; 1230 uint64_t guard = 0; 1231 1232 _dif_sgl_get_buf(src_sgl, &src, NULL); 1233 1234 data_block_size = ctx->block_size - ctx->md_size; 1235 1236 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1237 guard = ctx->guard_seed; 1238 } 1239 offset_in_block = 0; 1240 1241 while (offset_in_block < data_block_size) { 1242 /* Compute CRC over split logical block data and copy 1243 * data to bounce buffer. 1244 */ 1245 _dif_sgl_get_buf(dst_sgl, &dst, &dst_len); 1246 dst_len = spdk_min(dst_len, data_block_size - offset_in_block); 1247 1248 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1249 guard = _dif_generate_guard_copy(guard, dst, src + offset_in_block, 1250 dst_len, ctx->dif_pi_format); 1251 } else { 1252 memcpy(dst, src + offset_in_block, dst_len); 1253 } 1254 1255 _dif_sgl_advance(dst_sgl, dst_len); 1256 offset_in_block += dst_len; 1257 } 1258 1259 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1260 guard = _dif_generate_guard(guard, src + data_block_size, 1261 ctx->guard_interval - data_block_size, ctx->dif_pi_format); 1262 } 1263 1264 _dif_sgl_advance(src_sgl, ctx->block_size); 1265 1266 return _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1267 } 1268 1269 static int 1270 dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl, 1271 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1272 struct spdk_dif_error *err_blk) 1273 { 1274 uint32_t offset_blocks; 1275 int rc; 1276 1277 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1278 rc = _dif_verify_copy_split(src_sgl, dst_sgl, offset_blocks, ctx, err_blk); 1279 if (rc != 0) { 1280 return rc; 1281 } 1282 } 1283 1284 return 0; 1285 } 1286 1287 int 1288 spdk_dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iovs, 1289 int bounce_iovcnt, uint32_t num_blocks, 1290 const struct spdk_dif_ctx *ctx, 1291 struct spdk_dif_error *err_blk) 1292 { 1293 struct _dif_sgl src_sgl, dst_sgl; 1294 uint32_t data_block_size; 1295 1296 _dif_sgl_init(&src_sgl, bounce_iovs, bounce_iovcnt); 1297 _dif_sgl_init(&dst_sgl, iovs, iovcnt); 1298 1299 data_block_size = ctx->block_size - ctx->md_size; 1300 1301 if (!_dif_sgl_is_valid(&dst_sgl, data_block_size * num_blocks)) { 1302 SPDK_ERRLOG("Size of iovec arrays are not valid\n"); 1303 return -EINVAL; 1304 } 1305 1306 if (!_dif_sgl_is_valid_block_aligned(&src_sgl, num_blocks, ctx->block_size)) { 1307 SPDK_ERRLOG("Size of bounce_iovs arrays are not valid or misaligned with block_size.\n"); 1308 return -EINVAL; 1309 } 1310 1311 if (_dif_is_disabled(ctx->dif_type)) { 1312 return 0; 1313 } 1314 1315 if (_dif_sgl_is_bytes_multiple(&dst_sgl, data_block_size)) { 1316 return dif_verify_copy(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1317 } else { 1318 return dif_verify_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk); 1319 } 1320 } 1321 1322 static void 1323 _bit_flip(uint8_t *buf, uint32_t flip_bit) 1324 { 1325 uint8_t byte; 1326 1327 byte = *buf; 1328 byte ^= 1 << flip_bit; 1329 *buf = byte; 1330 } 1331 1332 static int 1333 _dif_inject_error(struct _dif_sgl *sgl, 1334 uint32_t block_size, uint32_t num_blocks, 1335 uint32_t inject_offset_blocks, 1336 uint32_t inject_offset_bytes, 1337 uint32_t inject_offset_bits) 1338 { 1339 uint32_t offset_in_block, buf_len; 1340 uint8_t *buf; 1341 1342 _dif_sgl_advance(sgl, block_size * inject_offset_blocks); 1343 1344 offset_in_block = 0; 1345 1346 while (offset_in_block < block_size) { 1347 _dif_sgl_get_buf(sgl, &buf, &buf_len); 1348 buf_len = spdk_min(buf_len, block_size - offset_in_block); 1349 1350 if (inject_offset_bytes >= offset_in_block && 1351 inject_offset_bytes < offset_in_block + buf_len) { 1352 buf += inject_offset_bytes - offset_in_block; 1353 _bit_flip(buf, inject_offset_bits); 1354 return 0; 1355 } 1356 1357 _dif_sgl_advance(sgl, buf_len); 1358 offset_in_block += buf_len; 1359 } 1360 1361 return -1; 1362 } 1363 1364 static int 1365 dif_inject_error(struct _dif_sgl *sgl, uint32_t block_size, uint32_t num_blocks, 1366 uint32_t start_inject_bytes, uint32_t inject_range_bytes, 1367 uint32_t *inject_offset) 1368 { 1369 uint32_t inject_offset_blocks, inject_offset_bytes, inject_offset_bits; 1370 uint32_t offset_blocks; 1371 int rc; 1372 1373 srand(time(0)); 1374 1375 inject_offset_blocks = rand() % num_blocks; 1376 inject_offset_bytes = start_inject_bytes + (rand() % inject_range_bytes); 1377 inject_offset_bits = rand() % 8; 1378 1379 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1380 if (offset_blocks == inject_offset_blocks) { 1381 rc = _dif_inject_error(sgl, block_size, num_blocks, 1382 inject_offset_blocks, 1383 inject_offset_bytes, 1384 inject_offset_bits); 1385 if (rc == 0) { 1386 *inject_offset = inject_offset_blocks; 1387 } 1388 return rc; 1389 } 1390 } 1391 1392 return -1; 1393 } 1394 1395 int 1396 spdk_dif_inject_error(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 1397 const struct spdk_dif_ctx *ctx, uint32_t inject_flags, 1398 uint32_t *inject_offset) 1399 { 1400 struct _dif_sgl sgl; 1401 int rc; 1402 1403 _dif_sgl_init(&sgl, iovs, iovcnt); 1404 1405 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 1406 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1407 return -EINVAL; 1408 } 1409 1410 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1411 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1412 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1413 _dif_reftag_size(ctx->dif_pi_format), 1414 inject_offset); 1415 if (rc != 0) { 1416 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1417 return rc; 1418 } 1419 } 1420 1421 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1422 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1423 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1424 _dif_apptag_size(), 1425 inject_offset); 1426 if (rc != 0) { 1427 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1428 return rc; 1429 } 1430 } 1431 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1432 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1433 ctx->guard_interval, 1434 _dif_guard_size(ctx->dif_pi_format), 1435 inject_offset); 1436 if (rc != 0) { 1437 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1438 return rc; 1439 } 1440 } 1441 1442 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1443 /* If the DIF information is contained within the last 8/16 bytes of 1444 * metadata (depending on the PI format), then the CRC covers all metadata 1445 * bytes up to but excluding the last 8/16 bytes. But error injection does not 1446 * cover these metadata because classification is not determined yet. 1447 * 1448 * Note: Error injection to data block is expected to be detected as 1449 * guard error. 1450 */ 1451 rc = dif_inject_error(&sgl, ctx->block_size, num_blocks, 1452 0, 1453 ctx->block_size - ctx->md_size, 1454 inject_offset); 1455 if (rc != 0) { 1456 SPDK_ERRLOG("Failed to inject error to data block.\n"); 1457 return rc; 1458 } 1459 } 1460 1461 return 0; 1462 } 1463 1464 static void 1465 dix_generate(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1466 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1467 { 1468 uint32_t offset_blocks = 0; 1469 uint8_t *data_buf, *md_buf; 1470 uint64_t guard; 1471 1472 while (offset_blocks < num_blocks) { 1473 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1474 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1475 1476 guard = 0; 1477 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1478 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1479 ctx->dif_pi_format); 1480 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1481 ctx->dif_pi_format); 1482 } 1483 1484 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1485 1486 _dif_sgl_advance(data_sgl, ctx->block_size); 1487 _dif_sgl_advance(md_sgl, ctx->md_size); 1488 offset_blocks++; 1489 } 1490 } 1491 1492 static void 1493 _dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1494 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx) 1495 { 1496 uint32_t offset_in_block, data_buf_len; 1497 uint8_t *data_buf, *md_buf; 1498 uint64_t guard = 0; 1499 1500 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1501 1502 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1503 guard = ctx->guard_seed; 1504 } 1505 offset_in_block = 0; 1506 1507 while (offset_in_block < ctx->block_size) { 1508 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1509 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1510 1511 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1512 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1513 ctx->dif_pi_format); 1514 } 1515 1516 _dif_sgl_advance(data_sgl, data_buf_len); 1517 offset_in_block += data_buf_len; 1518 } 1519 1520 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1521 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1522 ctx->dif_pi_format); 1523 } 1524 1525 _dif_sgl_advance(md_sgl, ctx->md_size); 1526 1527 _dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx); 1528 } 1529 1530 static void 1531 dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1532 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1533 { 1534 uint32_t offset_blocks; 1535 1536 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1537 _dix_generate_split(data_sgl, md_sgl, offset_blocks, ctx); 1538 } 1539 } 1540 1541 int 1542 spdk_dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1543 uint32_t num_blocks, const struct spdk_dif_ctx *ctx) 1544 { 1545 struct _dif_sgl data_sgl, md_sgl; 1546 1547 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1548 _dif_sgl_init(&md_sgl, md_iov, 1); 1549 1550 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1551 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1552 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1553 return -EINVAL; 1554 } 1555 1556 if (_dif_is_disabled(ctx->dif_type)) { 1557 return 0; 1558 } 1559 1560 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1561 dix_generate(&data_sgl, &md_sgl, num_blocks, ctx); 1562 } else { 1563 dix_generate_split(&data_sgl, &md_sgl, num_blocks, ctx); 1564 } 1565 1566 return 0; 1567 } 1568 1569 static int 1570 dix_verify(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1571 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1572 struct spdk_dif_error *err_blk) 1573 { 1574 uint32_t offset_blocks = 0; 1575 uint8_t *data_buf, *md_buf; 1576 uint64_t guard; 1577 int rc; 1578 1579 while (offset_blocks < num_blocks) { 1580 _dif_sgl_get_buf(data_sgl, &data_buf, NULL); 1581 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1582 1583 guard = 0; 1584 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1585 guard = _dif_generate_guard(ctx->guard_seed, data_buf, ctx->block_size, 1586 ctx->dif_pi_format); 1587 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1588 ctx->dif_pi_format); 1589 } 1590 1591 rc = _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1592 if (rc != 0) { 1593 return rc; 1594 } 1595 1596 _dif_sgl_advance(data_sgl, ctx->block_size); 1597 _dif_sgl_advance(md_sgl, ctx->md_size); 1598 offset_blocks++; 1599 } 1600 1601 return 0; 1602 } 1603 1604 static int 1605 _dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1606 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx, 1607 struct spdk_dif_error *err_blk) 1608 { 1609 uint32_t offset_in_block, data_buf_len; 1610 uint8_t *data_buf, *md_buf; 1611 uint64_t guard = 0; 1612 1613 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 1614 1615 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1616 guard = ctx->guard_seed; 1617 } 1618 offset_in_block = 0; 1619 1620 while (offset_in_block < ctx->block_size) { 1621 _dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len); 1622 data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block); 1623 1624 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1625 guard = _dif_generate_guard(guard, data_buf, data_buf_len, 1626 ctx->dif_pi_format); 1627 } 1628 1629 _dif_sgl_advance(data_sgl, data_buf_len); 1630 offset_in_block += data_buf_len; 1631 } 1632 1633 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1634 guard = _dif_generate_guard(guard, md_buf, ctx->guard_interval, 1635 ctx->dif_pi_format); 1636 } 1637 1638 _dif_sgl_advance(md_sgl, ctx->md_size); 1639 1640 return _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk); 1641 } 1642 1643 static int 1644 dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl, 1645 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1646 struct spdk_dif_error *err_blk) 1647 { 1648 uint32_t offset_blocks; 1649 int rc; 1650 1651 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 1652 rc = _dix_verify_split(data_sgl, md_sgl, offset_blocks, ctx, err_blk); 1653 if (rc != 0) { 1654 return rc; 1655 } 1656 } 1657 1658 return 0; 1659 } 1660 1661 int 1662 spdk_dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1663 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1664 struct spdk_dif_error *err_blk) 1665 { 1666 struct _dif_sgl data_sgl, md_sgl; 1667 1668 if (md_iov->iov_base == NULL) { 1669 SPDK_ERRLOG("Metadata buffer is NULL.\n"); 1670 return -EINVAL; 1671 } 1672 1673 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1674 _dif_sgl_init(&md_sgl, md_iov, 1); 1675 1676 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1677 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1678 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1679 return -EINVAL; 1680 } 1681 1682 if (_dif_is_disabled(ctx->dif_type)) { 1683 return 0; 1684 } 1685 1686 if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) { 1687 return dix_verify(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1688 } else { 1689 return dix_verify_split(&data_sgl, &md_sgl, num_blocks, ctx, err_blk); 1690 } 1691 } 1692 1693 int 1694 spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov, 1695 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1696 uint32_t inject_flags, uint32_t *inject_offset) 1697 { 1698 struct _dif_sgl data_sgl, md_sgl; 1699 int rc; 1700 1701 _dif_sgl_init(&data_sgl, iovs, iovcnt); 1702 _dif_sgl_init(&md_sgl, md_iov, 1); 1703 1704 if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) || 1705 !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 1706 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 1707 return -EINVAL; 1708 } 1709 1710 if (inject_flags & SPDK_DIF_REFTAG_ERROR) { 1711 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1712 ctx->guard_interval + _dif_reftag_offset(ctx->dif_pi_format), 1713 _dif_reftag_size(ctx->dif_pi_format), 1714 inject_offset); 1715 if (rc != 0) { 1716 SPDK_ERRLOG("Failed to inject error to Reference Tag.\n"); 1717 return rc; 1718 } 1719 } 1720 1721 if (inject_flags & SPDK_DIF_APPTAG_ERROR) { 1722 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1723 ctx->guard_interval + _dif_apptag_offset(ctx->dif_pi_format), 1724 _dif_apptag_size(), 1725 inject_offset); 1726 if (rc != 0) { 1727 SPDK_ERRLOG("Failed to inject error to Application Tag.\n"); 1728 return rc; 1729 } 1730 } 1731 1732 if (inject_flags & SPDK_DIF_GUARD_ERROR) { 1733 rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks, 1734 ctx->guard_interval, 1735 _dif_guard_size(ctx->dif_pi_format), 1736 inject_offset); 1737 if (rc != 0) { 1738 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1739 return rc; 1740 } 1741 } 1742 1743 if (inject_flags & SPDK_DIF_DATA_ERROR) { 1744 /* Note: Error injection to data block is expected to be detected 1745 * as guard error. 1746 */ 1747 rc = dif_inject_error(&data_sgl, ctx->block_size, num_blocks, 1748 0, 1749 ctx->block_size, 1750 inject_offset); 1751 if (rc != 0) { 1752 SPDK_ERRLOG("Failed to inject error to Guard.\n"); 1753 return rc; 1754 } 1755 } 1756 1757 return 0; 1758 } 1759 1760 static uint32_t 1761 _to_next_boundary(uint32_t offset, uint32_t boundary) 1762 { 1763 return boundary - (offset % boundary); 1764 } 1765 1766 static uint32_t 1767 _to_size_with_md(uint32_t size, uint32_t data_block_size, uint32_t block_size) 1768 { 1769 return (size / data_block_size) * block_size + (size % data_block_size); 1770 } 1771 1772 int 1773 spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int iovcnt, 1774 struct iovec *buf_iovs, int buf_iovcnt, 1775 uint32_t data_offset, uint32_t data_len, 1776 uint32_t *_mapped_len, 1777 const struct spdk_dif_ctx *ctx) 1778 { 1779 uint32_t data_block_size, data_unalign, buf_len, buf_offset, len; 1780 struct _dif_sgl dif_sgl; 1781 struct _dif_sgl buf_sgl; 1782 1783 if (iovs == NULL || iovcnt == 0 || buf_iovs == NULL || buf_iovcnt == 0) { 1784 return -EINVAL; 1785 } 1786 1787 data_block_size = ctx->block_size - ctx->md_size; 1788 1789 data_unalign = ctx->data_offset % data_block_size; 1790 1791 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1792 ctx->block_size); 1793 buf_len -= data_unalign; 1794 1795 _dif_sgl_init(&dif_sgl, iovs, iovcnt); 1796 _dif_sgl_init(&buf_sgl, buf_iovs, buf_iovcnt); 1797 1798 if (!_dif_sgl_is_valid(&buf_sgl, buf_len)) { 1799 SPDK_ERRLOG("Buffer overflow will occur.\n"); 1800 return -ERANGE; 1801 } 1802 1803 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1804 buf_offset -= data_unalign; 1805 1806 _dif_sgl_advance(&buf_sgl, buf_offset); 1807 1808 while (data_len != 0) { 1809 len = spdk_min(data_len, _to_next_boundary(ctx->data_offset + data_offset, data_block_size)); 1810 if (!_dif_sgl_append_split(&dif_sgl, &buf_sgl, len)) { 1811 break; 1812 } 1813 _dif_sgl_advance(&buf_sgl, ctx->md_size); 1814 data_offset += len; 1815 data_len -= len; 1816 } 1817 1818 if (_mapped_len != NULL) { 1819 *_mapped_len = dif_sgl.total_size; 1820 } 1821 1822 return iovcnt - dif_sgl.iovcnt; 1823 } 1824 1825 static int 1826 _dif_sgl_setup_stream(struct _dif_sgl *sgl, uint32_t *_buf_offset, uint32_t *_buf_len, 1827 uint32_t data_offset, uint32_t data_len, 1828 const struct spdk_dif_ctx *ctx) 1829 { 1830 uint32_t data_block_size, data_unalign, buf_len, buf_offset; 1831 1832 data_block_size = ctx->block_size - ctx->md_size; 1833 1834 data_unalign = ctx->data_offset % data_block_size; 1835 1836 /* If the last data block is complete, DIF of the data block is 1837 * inserted or verified in this turn. 1838 */ 1839 buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size, 1840 ctx->block_size); 1841 buf_len -= data_unalign; 1842 1843 if (!_dif_sgl_is_valid(sgl, buf_len)) { 1844 return -ERANGE; 1845 } 1846 1847 buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size); 1848 buf_offset -= data_unalign; 1849 1850 _dif_sgl_advance(sgl, buf_offset); 1851 buf_len -= buf_offset; 1852 1853 buf_offset += data_unalign; 1854 1855 *_buf_offset = buf_offset; 1856 *_buf_len = buf_len; 1857 1858 return 0; 1859 } 1860 1861 int 1862 spdk_dif_generate_stream(struct iovec *iovs, int iovcnt, 1863 uint32_t data_offset, uint32_t data_len, 1864 struct spdk_dif_ctx *ctx) 1865 { 1866 uint32_t buf_len = 0, buf_offset = 0; 1867 uint32_t len, offset_in_block, offset_blocks; 1868 uint64_t guard = 0; 1869 struct _dif_sgl sgl; 1870 int rc; 1871 1872 if (iovs == NULL || iovcnt == 0) { 1873 return -EINVAL; 1874 } 1875 1876 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1877 guard = ctx->last_guard; 1878 } 1879 1880 _dif_sgl_init(&sgl, iovs, iovcnt); 1881 1882 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 1883 if (rc != 0) { 1884 return rc; 1885 } 1886 1887 while (buf_len != 0) { 1888 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 1889 offset_in_block = buf_offset % ctx->block_size; 1890 offset_blocks = buf_offset / ctx->block_size; 1891 1892 guard = _dif_generate_split(&sgl, offset_in_block, len, guard, offset_blocks, ctx); 1893 1894 buf_len -= len; 1895 buf_offset += len; 1896 } 1897 1898 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1899 ctx->last_guard = guard; 1900 } 1901 1902 return 0; 1903 } 1904 1905 int 1906 spdk_dif_verify_stream(struct iovec *iovs, int iovcnt, 1907 uint32_t data_offset, uint32_t data_len, 1908 struct spdk_dif_ctx *ctx, 1909 struct spdk_dif_error *err_blk) 1910 { 1911 uint32_t buf_len = 0, buf_offset = 0; 1912 uint32_t len, offset_in_block, offset_blocks; 1913 uint64_t guard = 0; 1914 struct _dif_sgl sgl; 1915 int rc = 0; 1916 1917 if (iovs == NULL || iovcnt == 0) { 1918 return -EINVAL; 1919 } 1920 1921 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1922 guard = ctx->last_guard; 1923 } 1924 1925 _dif_sgl_init(&sgl, iovs, iovcnt); 1926 1927 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 1928 if (rc != 0) { 1929 return rc; 1930 } 1931 1932 while (buf_len != 0) { 1933 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 1934 offset_in_block = buf_offset % ctx->block_size; 1935 offset_blocks = buf_offset / ctx->block_size; 1936 1937 rc = _dif_verify_split(&sgl, offset_in_block, len, &guard, offset_blocks, 1938 ctx, err_blk); 1939 if (rc != 0) { 1940 goto error; 1941 } 1942 1943 buf_len -= len; 1944 buf_offset += len; 1945 } 1946 1947 if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) { 1948 ctx->last_guard = guard; 1949 } 1950 error: 1951 return rc; 1952 } 1953 1954 int 1955 spdk_dif_update_crc32c_stream(struct iovec *iovs, int iovcnt, 1956 uint32_t data_offset, uint32_t data_len, 1957 uint32_t *_crc32c, const struct spdk_dif_ctx *ctx) 1958 { 1959 uint32_t buf_len = 0, buf_offset = 0, len, offset_in_block; 1960 uint32_t crc32c; 1961 struct _dif_sgl sgl; 1962 int rc; 1963 1964 if (iovs == NULL || iovcnt == 0) { 1965 return -EINVAL; 1966 } 1967 1968 crc32c = *_crc32c; 1969 _dif_sgl_init(&sgl, iovs, iovcnt); 1970 1971 rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx); 1972 if (rc != 0) { 1973 return rc; 1974 } 1975 1976 while (buf_len != 0) { 1977 len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size)); 1978 offset_in_block = buf_offset % ctx->block_size; 1979 1980 crc32c = _dif_update_crc32c_split(&sgl, offset_in_block, len, crc32c, ctx); 1981 1982 buf_len -= len; 1983 buf_offset += len; 1984 } 1985 1986 *_crc32c = crc32c; 1987 1988 return 0; 1989 } 1990 1991 void 1992 spdk_dif_get_range_with_md(uint32_t data_offset, uint32_t data_len, 1993 uint32_t *_buf_offset, uint32_t *_buf_len, 1994 const struct spdk_dif_ctx *ctx) 1995 { 1996 uint32_t data_block_size, data_unalign, buf_offset, buf_len; 1997 1998 if (!ctx->md_interleave) { 1999 buf_offset = data_offset; 2000 buf_len = data_len; 2001 } else { 2002 data_block_size = ctx->block_size - ctx->md_size; 2003 2004 data_unalign = data_offset % data_block_size; 2005 2006 buf_offset = _to_size_with_md(data_offset, data_block_size, ctx->block_size); 2007 buf_len = _to_size_with_md(data_unalign + data_len, data_block_size, ctx->block_size) - 2008 data_unalign; 2009 } 2010 2011 if (_buf_offset != NULL) { 2012 *_buf_offset = buf_offset; 2013 } 2014 2015 if (_buf_len != NULL) { 2016 *_buf_len = buf_len; 2017 } 2018 } 2019 2020 uint32_t 2021 spdk_dif_get_length_with_md(uint32_t data_len, const struct spdk_dif_ctx *ctx) 2022 { 2023 uint32_t data_block_size; 2024 2025 if (!ctx->md_interleave) { 2026 return data_len; 2027 } else { 2028 data_block_size = ctx->block_size - ctx->md_size; 2029 2030 return _to_size_with_md(data_len, data_block_size, ctx->block_size); 2031 } 2032 } 2033 2034 static int 2035 _dif_remap_ref_tag(struct _dif_sgl *sgl, uint32_t offset_blocks, 2036 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 2037 { 2038 uint32_t offset, buf_len; 2039 uint64_t expected = 0, _actual, remapped; 2040 uint8_t *buf; 2041 struct _dif_sgl tmp_sgl; 2042 struct spdk_dif dif; 2043 2044 /* Fast forward to DIF field. */ 2045 _dif_sgl_advance(sgl, ctx->guard_interval); 2046 _dif_sgl_copy(&tmp_sgl, sgl); 2047 2048 /* Copy the split DIF field to the temporary DIF buffer */ 2049 offset = 0; 2050 while (offset < _dif_size(ctx->dif_pi_format)) { 2051 _dif_sgl_get_buf(sgl, &buf, &buf_len); 2052 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2053 2054 memcpy((uint8_t *)&dif + offset, buf, buf_len); 2055 2056 _dif_sgl_advance(sgl, buf_len); 2057 offset += buf_len; 2058 } 2059 2060 switch (ctx->dif_type) { 2061 case SPDK_DIF_TYPE1: 2062 case SPDK_DIF_TYPE2: 2063 /* If Type 1 or 2 is used, then all DIF checks are disabled when 2064 * the Application Tag is 0xFFFF. 2065 */ 2066 if (_dif_apptag_ignore(&dif, ctx->dif_pi_format)) { 2067 goto end; 2068 } 2069 break; 2070 case SPDK_DIF_TYPE3: 2071 /* If Type 3 is used, then all DIF checks are disabled when the 2072 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF 2073 * or 0xFFFFFFFFFFFFFFFF depending on the PI format. 2074 */ 2075 if (_dif_apptag_ignore(&dif, ctx->dif_pi_format) && 2076 _dif_reftag_ignore(&dif, ctx->dif_pi_format)) { 2077 goto end; 2078 } 2079 break; 2080 default: 2081 break; 2082 } 2083 2084 /* For type 1 and 2, the Reference Tag is incremented for each 2085 * subsequent logical block. For type 3, the Reference Tag 2086 * remains the same as the initial Reference Tag. 2087 */ 2088 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2089 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2090 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2091 } else { 2092 remapped = ctx->remapped_init_ref_tag; 2093 } 2094 2095 /* Verify the stored Reference Tag. */ 2096 switch (ctx->dif_type) { 2097 case SPDK_DIF_TYPE1: 2098 case SPDK_DIF_TYPE2: 2099 /* Compare the DIF Reference Tag field to the passed Reference Tag. 2100 * The passed Reference Tag will be the least significant 4 bytes 2101 * or 8 bytes (depending on the PI format) 2102 * of the LBA when Type 1 is used, and application specific value 2103 * if Type 2 is used. 2104 */ 2105 if (!_dif_reftag_match(&dif, expected, ctx->dif_pi_format)) { 2106 _actual = _dif_get_reftag(&dif, ctx->dif_pi_format); 2107 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected, 2108 _actual, offset_blocks); 2109 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu64 "," \ 2110 " Expected=%lx, Actual=%lx\n", 2111 expected, expected, _actual); 2112 return -1; 2113 } 2114 break; 2115 case SPDK_DIF_TYPE3: 2116 /* For type 3, the computed Reference Tag remains unchanged. 2117 * Hence ignore the Reference Tag field. 2118 */ 2119 break; 2120 default: 2121 break; 2122 } 2123 2124 /* Update the stored Reference Tag to the remapped one. */ 2125 _dif_set_reftag(&dif, remapped, ctx->dif_pi_format); 2126 2127 offset = 0; 2128 while (offset < _dif_size(ctx->dif_pi_format)) { 2129 _dif_sgl_get_buf(&tmp_sgl, &buf, &buf_len); 2130 buf_len = spdk_min(buf_len, _dif_size(ctx->dif_pi_format) - offset); 2131 2132 memcpy(buf, (uint8_t *)&dif + offset, buf_len); 2133 2134 _dif_sgl_advance(&tmp_sgl, buf_len); 2135 offset += buf_len; 2136 } 2137 2138 end: 2139 _dif_sgl_advance(sgl, ctx->block_size - ctx->guard_interval - _dif_size(ctx->dif_pi_format)); 2140 2141 return 0; 2142 } 2143 2144 int 2145 spdk_dif_remap_ref_tag(struct iovec *iovs, int iovcnt, uint32_t num_blocks, 2146 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 2147 { 2148 struct _dif_sgl sgl; 2149 uint32_t offset_blocks; 2150 int rc; 2151 2152 _dif_sgl_init(&sgl, iovs, iovcnt); 2153 2154 if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) { 2155 SPDK_ERRLOG("Size of iovec array is not valid.\n"); 2156 return -EINVAL; 2157 } 2158 2159 if (_dif_is_disabled(ctx->dif_type)) { 2160 return 0; 2161 } 2162 2163 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2164 return 0; 2165 } 2166 2167 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2168 rc = _dif_remap_ref_tag(&sgl, offset_blocks, ctx, err_blk); 2169 if (rc != 0) { 2170 return rc; 2171 } 2172 } 2173 2174 return 0; 2175 } 2176 2177 static int 2178 _dix_remap_ref_tag(struct _dif_sgl *md_sgl, uint32_t offset_blocks, 2179 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk) 2180 { 2181 uint64_t expected = 0, _actual, remapped; 2182 uint8_t *md_buf; 2183 struct spdk_dif *dif; 2184 2185 _dif_sgl_get_buf(md_sgl, &md_buf, NULL); 2186 2187 dif = (struct spdk_dif *)(md_buf + ctx->guard_interval); 2188 2189 switch (ctx->dif_type) { 2190 case SPDK_DIF_TYPE1: 2191 case SPDK_DIF_TYPE2: 2192 /* If Type 1 or 2 is used, then all DIF checks are disabled when 2193 * the Application Tag is 0xFFFF. 2194 */ 2195 if (_dif_apptag_ignore(dif, ctx->dif_pi_format)) { 2196 goto end; 2197 } 2198 break; 2199 case SPDK_DIF_TYPE3: 2200 /* If Type 3 is used, then all DIF checks are disabled when the 2201 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF 2202 * or 0xFFFFFFFFFFFFFFFF depending on the PI format. 2203 */ 2204 if (_dif_apptag_ignore(dif, ctx->dif_pi_format) && 2205 _dif_reftag_ignore(dif, ctx->dif_pi_format)) { 2206 goto end; 2207 } 2208 break; 2209 default: 2210 break; 2211 } 2212 2213 /* For type 1 and 2, the Reference Tag is incremented for each 2214 * subsequent logical block. For type 3, the Reference Tag 2215 * remains the same as the initialReference Tag. 2216 */ 2217 if (ctx->dif_type != SPDK_DIF_TYPE3) { 2218 expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2219 remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks; 2220 } else { 2221 remapped = ctx->remapped_init_ref_tag; 2222 } 2223 2224 /* Verify the stored Reference Tag. */ 2225 switch (ctx->dif_type) { 2226 case SPDK_DIF_TYPE1: 2227 case SPDK_DIF_TYPE2: 2228 /* Compare the DIF Reference Tag field to the passed Reference Tag. 2229 * The passed Reference Tag will be the least significant 4 bytes 2230 * or 8 bytes (depending on the PI format) 2231 * of the LBA when Type 1 is used, and application specific value 2232 * if Type 2 is used. 2233 */ 2234 if (!_dif_reftag_match(dif, expected, ctx->dif_pi_format)) { 2235 _actual = _dif_get_reftag(dif, ctx->dif_pi_format); 2236 _dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected, 2237 _actual, offset_blocks); 2238 SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu64 "," \ 2239 " Expected=%lx, Actual=%lx\n", 2240 expected, expected, _actual); 2241 return -1; 2242 } 2243 break; 2244 case SPDK_DIF_TYPE3: 2245 /* For type 3, the computed Reference Tag remains unchanged. 2246 * Hence ignore the Reference Tag field. 2247 */ 2248 break; 2249 default: 2250 break; 2251 } 2252 2253 /* Update the stored Reference Tag to the remapped one. */ 2254 _dif_set_reftag(dif, remapped, ctx->dif_pi_format); 2255 2256 end: 2257 _dif_sgl_advance(md_sgl, ctx->md_size); 2258 2259 return 0; 2260 } 2261 2262 int 2263 spdk_dix_remap_ref_tag(struct iovec *md_iov, uint32_t num_blocks, 2264 const struct spdk_dif_ctx *ctx, 2265 struct spdk_dif_error *err_blk) 2266 { 2267 struct _dif_sgl md_sgl; 2268 uint32_t offset_blocks; 2269 int rc; 2270 2271 _dif_sgl_init(&md_sgl, md_iov, 1); 2272 2273 if (!_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) { 2274 SPDK_ERRLOG("Size of metadata iovec array is not valid.\n"); 2275 return -EINVAL; 2276 } 2277 2278 if (_dif_is_disabled(ctx->dif_type)) { 2279 return 0; 2280 } 2281 2282 if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) { 2283 return 0; 2284 } 2285 2286 for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) { 2287 rc = _dix_remap_ref_tag(&md_sgl, offset_blocks, ctx, err_blk); 2288 if (rc != 0) { 2289 return rc; 2290 } 2291 } 2292 2293 return 0; 2294 } 2295