xref: /spdk/lib/util/dif.c (revision 94a84ae98590bea46939eb1dcd7a9876bd393b54)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/dif.h"
35 #include "spdk/crc16.h"
36 #include "spdk/crc32.h"
37 #include "spdk/endian.h"
38 #include "spdk/log.h"
39 #include "spdk/util.h"
40 
41 /* Context to iterate or create a iovec array.
42  * Each sgl is either iterated or created at a time.
43  */
44 struct _dif_sgl {
45 	/* Current iovec in the iteration or creation */
46 	struct iovec *iov;
47 
48 	/* Remaining count of iovecs in the iteration or creation. */
49 	int iovcnt;
50 
51 	/* Current offset in the iovec */
52 	uint32_t iov_offset;
53 
54 	/* Size of the created iovec array in bytes */
55 	uint32_t total_size;
56 };
57 
58 static inline void
59 _dif_sgl_init(struct _dif_sgl *s, struct iovec *iovs, int iovcnt)
60 {
61 	s->iov = iovs;
62 	s->iovcnt = iovcnt;
63 	s->iov_offset = 0;
64 	s->total_size = 0;
65 }
66 
67 static void
68 _dif_sgl_advance(struct _dif_sgl *s, uint32_t step)
69 {
70 	s->iov_offset += step;
71 	while (s->iovcnt != 0) {
72 		if (s->iov_offset < s->iov->iov_len) {
73 			break;
74 		}
75 
76 		s->iov_offset -= s->iov->iov_len;
77 		s->iov++;
78 		s->iovcnt--;
79 	}
80 }
81 
82 static inline void
83 _dif_sgl_get_buf(struct _dif_sgl *s, void **_buf, uint32_t *_buf_len)
84 {
85 	if (_buf != NULL) {
86 		*_buf = s->iov->iov_base + s->iov_offset;
87 	}
88 	if (_buf_len != NULL) {
89 		*_buf_len = s->iov->iov_len - s->iov_offset;
90 	}
91 }
92 
93 static inline bool
94 _dif_sgl_append(struct _dif_sgl *s, uint8_t *data, uint32_t data_len)
95 {
96 	assert(s->iovcnt > 0);
97 	s->iov->iov_base = data;
98 	s->iov->iov_len = data_len;
99 	s->total_size += data_len;
100 	s->iov++;
101 	s->iovcnt--;
102 
103 	if (s->iovcnt > 0) {
104 		return true;
105 	} else {
106 		return false;
107 	}
108 }
109 
110 static inline bool
111 _dif_sgl_append_split(struct _dif_sgl *dst, struct _dif_sgl *src, uint32_t data_len)
112 {
113 	uint8_t *buf;
114 	uint32_t buf_len;
115 
116 	while (data_len != 0) {
117 		_dif_sgl_get_buf(src, (void *)&buf, &buf_len);
118 		buf_len = spdk_min(buf_len, data_len);
119 
120 		if (!_dif_sgl_append(dst, buf, buf_len)) {
121 			return false;
122 		}
123 
124 		_dif_sgl_advance(src, buf_len);
125 		data_len -= buf_len;
126 	}
127 
128 	return true;
129 }
130 
131 /* This function must be used before starting iteration. */
132 static bool
133 _dif_sgl_is_bytes_multiple(struct _dif_sgl *s, uint32_t bytes)
134 {
135 	int i;
136 
137 	for (i = 0; i < s->iovcnt; i++) {
138 		if (s->iov[i].iov_len % bytes) {
139 			return false;
140 		}
141 	}
142 
143 	return true;
144 }
145 
146 /* This function must be used before starting iteration. */
147 static bool
148 _dif_sgl_is_valid(struct _dif_sgl *s, uint32_t bytes)
149 {
150 	uint64_t total = 0;
151 	int i;
152 
153 	for (i = 0; i < s->iovcnt; i++) {
154 		total += s->iov[i].iov_len;
155 	}
156 
157 	return total >= bytes;
158 }
159 
160 static void
161 _dif_sgl_copy(struct _dif_sgl *to, struct _dif_sgl *from)
162 {
163 	memcpy(to, from, sizeof(struct _dif_sgl));
164 }
165 
166 static bool
167 _dif_type_is_valid(enum spdk_dif_type dif_type, uint32_t dif_flags)
168 {
169 	switch (dif_type) {
170 	case SPDK_DIF_TYPE1:
171 	case SPDK_DIF_TYPE2:
172 	case SPDK_DIF_DISABLE:
173 		break;
174 	case SPDK_DIF_TYPE3:
175 		if (dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) {
176 			SPDK_ERRLOG("Reference Tag should not be checked for Type 3\n");
177 			return false;
178 		}
179 		break;
180 	default:
181 		SPDK_ERRLOG("Unknown DIF Type: %d\n", dif_type);
182 		return false;
183 	}
184 
185 	return true;
186 }
187 
188 static bool
189 _dif_is_disabled(enum spdk_dif_type dif_type)
190 {
191 	if (dif_type == SPDK_DIF_DISABLE) {
192 		return true;
193 	} else {
194 		return false;
195 	}
196 }
197 
198 
199 static uint32_t
200 _get_guard_interval(uint32_t block_size, uint32_t md_size, bool dif_loc, bool md_interleave)
201 {
202 	if (!dif_loc) {
203 		/* For metadata formats with more than 8 bytes, if the DIF is
204 		 * contained in the last 8 bytes of metadata, then the CRC
205 		 * covers all metadata up to but excluding these last 8 bytes.
206 		 */
207 		if (md_interleave) {
208 			return block_size - sizeof(struct spdk_dif);
209 		} else {
210 			return md_size - sizeof(struct spdk_dif);
211 		}
212 	} else {
213 		/* For metadata formats with more than 8 bytes, if the DIF is
214 		 * contained in the first 8 bytes of metadata, then the CRC
215 		 * does not cover any metadata.
216 		 */
217 		if (md_interleave) {
218 			return block_size - md_size;
219 		} else {
220 			return 0;
221 		}
222 	}
223 }
224 
225 int
226 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
227 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
228 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
229 		  uint32_t data_offset, uint16_t guard_seed)
230 {
231 	uint32_t data_block_size;
232 
233 	if (md_size < sizeof(struct spdk_dif)) {
234 		SPDK_ERRLOG("Metadata size is smaller than DIF size.\n");
235 		return -EINVAL;
236 	}
237 
238 	if (md_interleave) {
239 		if (block_size < md_size) {
240 			SPDK_ERRLOG("Block size is smaller than DIF size.\n");
241 			return -EINVAL;
242 		}
243 		data_block_size = block_size - md_size;
244 	} else {
245 		if (block_size == 0 || (block_size % 512) != 0) {
246 			SPDK_ERRLOG("Zero block size is not allowed\n");
247 			return -EINVAL;
248 		}
249 		data_block_size = block_size;
250 	}
251 
252 	if (!_dif_type_is_valid(dif_type, dif_flags)) {
253 		SPDK_ERRLOG("DIF type is invalid.\n");
254 		return -EINVAL;
255 	}
256 
257 	ctx->block_size = block_size;
258 	ctx->md_size = md_size;
259 	ctx->md_interleave = md_interleave;
260 	ctx->guard_interval = _get_guard_interval(block_size, md_size, dif_loc, md_interleave);
261 	ctx->dif_type = dif_type;
262 	ctx->dif_flags = dif_flags;
263 	ctx->init_ref_tag = init_ref_tag;
264 	ctx->apptag_mask = apptag_mask;
265 	ctx->app_tag = app_tag;
266 	ctx->data_offset = data_offset;
267 	ctx->ref_tag_offset = data_offset / data_block_size;
268 	ctx->last_guard = guard_seed;
269 	ctx->guard_seed = guard_seed;
270 	ctx->remapped_init_ref_tag = 0;
271 
272 	return 0;
273 }
274 
275 void
276 spdk_dif_ctx_set_data_offset(struct spdk_dif_ctx *ctx, uint32_t data_offset)
277 {
278 	uint32_t data_block_size;
279 
280 	if (ctx->md_interleave) {
281 		data_block_size = ctx->block_size - ctx->md_size;
282 	} else {
283 		data_block_size = ctx->block_size;
284 	}
285 
286 	ctx->data_offset = data_offset;
287 	ctx->ref_tag_offset = data_offset / data_block_size;
288 }
289 
290 void
291 spdk_dif_ctx_set_remapped_init_ref_tag(struct spdk_dif_ctx *ctx,
292 				       uint32_t remapped_init_ref_tag)
293 {
294 	ctx->remapped_init_ref_tag = remapped_init_ref_tag;
295 }
296 
297 static void
298 _dif_generate(void *_dif, uint16_t guard, uint32_t offset_blocks,
299 	      const struct spdk_dif_ctx *ctx)
300 {
301 	struct spdk_dif *dif = _dif;
302 	uint32_t ref_tag;
303 
304 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
305 		to_be16(&dif->guard, guard);
306 	}
307 
308 	if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) {
309 		to_be16(&dif->app_tag, ctx->app_tag);
310 	}
311 
312 	if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) {
313 		/* For type 1 and 2, the reference tag is incremented for each
314 		 * subsequent logical block. For type 3, the reference tag
315 		 * remains the same as the initial reference tag.
316 		 */
317 		if (ctx->dif_type != SPDK_DIF_TYPE3) {
318 			ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks;
319 		} else {
320 			ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset;
321 		}
322 
323 		to_be32(&dif->ref_tag, ref_tag);
324 	}
325 }
326 
327 static void
328 dif_generate(struct _dif_sgl *sgl, uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
329 {
330 	uint32_t offset_blocks = 0;
331 	void *buf;
332 	uint16_t guard = 0;
333 
334 	while (offset_blocks < num_blocks) {
335 		_dif_sgl_get_buf(sgl, &buf, NULL);
336 
337 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
338 			guard = spdk_crc16_t10dif(ctx->guard_seed, buf, ctx->guard_interval);
339 		}
340 
341 		_dif_generate(buf + ctx->guard_interval, guard, offset_blocks, ctx);
342 
343 		_dif_sgl_advance(sgl, ctx->block_size);
344 		offset_blocks++;
345 	}
346 }
347 
348 static uint16_t
349 _dif_generate_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len,
350 		    uint16_t guard, uint32_t offset_blocks, const struct spdk_dif_ctx *ctx)
351 {
352 	uint32_t offset_in_dif, buf_len;
353 	void *buf;
354 	struct spdk_dif dif = {};
355 
356 	assert(offset_in_block < ctx->guard_interval);
357 	assert(offset_in_block + data_len < ctx->guard_interval ||
358 	       offset_in_block + data_len == ctx->block_size);
359 
360 	/* Compute CRC over split logical block data. */
361 	while (data_len != 0 && offset_in_block < ctx->guard_interval) {
362 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
363 		buf_len = spdk_min(buf_len, data_len);
364 		buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block);
365 
366 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
367 			guard = spdk_crc16_t10dif(guard, buf, buf_len);
368 		}
369 
370 		_dif_sgl_advance(sgl, buf_len);
371 		offset_in_block += buf_len;
372 		data_len -= buf_len;
373 	}
374 
375 	if (offset_in_block < ctx->guard_interval) {
376 		return guard;
377 	}
378 
379 	/* If a whole logical block data is parsed, generate DIF
380 	 * and save it to the temporary DIF area.
381 	 */
382 	_dif_generate(&dif, guard, offset_blocks, ctx);
383 
384 	/* Copy generated DIF field to the split DIF field, and then
385 	 * skip metadata field after DIF field (if any).
386 	 */
387 	while (offset_in_block < ctx->block_size) {
388 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
389 
390 		if (offset_in_block < ctx->guard_interval + sizeof(struct spdk_dif)) {
391 			offset_in_dif = offset_in_block - ctx->guard_interval;
392 			buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset_in_dif);
393 
394 			memcpy(buf, ((uint8_t *)&dif) + offset_in_dif, buf_len);
395 		} else {
396 			buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block);
397 		}
398 
399 		_dif_sgl_advance(sgl, buf_len);
400 		offset_in_block += buf_len;
401 	}
402 
403 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
404 		guard = ctx->guard_seed;
405 	}
406 
407 	return guard;
408 }
409 
410 static void
411 dif_generate_split(struct _dif_sgl *sgl, uint32_t num_blocks,
412 		   const struct spdk_dif_ctx *ctx)
413 {
414 	uint32_t offset_blocks;
415 	uint16_t guard = 0;
416 
417 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
418 		guard = ctx->guard_seed;
419 	}
420 
421 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
422 		_dif_generate_split(sgl, 0, ctx->block_size, guard, offset_blocks, ctx);
423 	}
424 }
425 
426 int
427 spdk_dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
428 		  const struct spdk_dif_ctx *ctx)
429 {
430 	struct _dif_sgl sgl;
431 
432 	_dif_sgl_init(&sgl, iovs, iovcnt);
433 
434 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
435 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
436 		return -EINVAL;
437 	}
438 
439 	if (_dif_is_disabled(ctx->dif_type)) {
440 		return 0;
441 	}
442 
443 	if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) {
444 		dif_generate(&sgl, num_blocks, ctx);
445 	} else {
446 		dif_generate_split(&sgl, num_blocks, ctx);
447 	}
448 
449 	return 0;
450 }
451 
452 static void
453 _dif_error_set(struct spdk_dif_error *err_blk, uint8_t err_type,
454 	       uint32_t expected, uint32_t actual, uint32_t err_offset)
455 {
456 	if (err_blk) {
457 		err_blk->err_type = err_type;
458 		err_blk->expected = expected;
459 		err_blk->actual = actual;
460 		err_blk->err_offset = err_offset;
461 	}
462 }
463 
464 static int
465 _dif_verify(void *_dif, uint16_t guard, uint32_t offset_blocks,
466 	    const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
467 {
468 	struct spdk_dif *dif = _dif;
469 	uint16_t _guard;
470 	uint16_t _app_tag;
471 	uint32_t ref_tag, _ref_tag;
472 
473 	switch (ctx->dif_type) {
474 	case SPDK_DIF_TYPE1:
475 	case SPDK_DIF_TYPE2:
476 		/* If Type 1 or 2 is used, then all DIF checks are disabled when
477 		 * the Application Tag is 0xFFFF.
478 		 */
479 		if (dif->app_tag == 0xFFFF) {
480 			return 0;
481 		}
482 		break;
483 	case SPDK_DIF_TYPE3:
484 		/* If Type 3 is used, then all DIF checks are disabled when the
485 		 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF.
486 		 */
487 		if (dif->app_tag == 0xFFFF && dif->ref_tag == 0xFFFFFFFF) {
488 			return 0;
489 		}
490 		break;
491 	default:
492 		break;
493 	}
494 
495 	/* For type 1 and 2, the reference tag is incremented for each
496 	 * subsequent logical block. For type 3, the reference tag
497 	 * remains the same as the initial reference tag.
498 	 */
499 	if (ctx->dif_type != SPDK_DIF_TYPE3) {
500 		ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks;
501 	} else {
502 		ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset;
503 	}
504 
505 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
506 		/* Compare the DIF Guard field to the CRC computed over the logical
507 		 * block data.
508 		 */
509 		_guard = from_be16(&dif->guard);
510 		if (_guard != guard) {
511 			_dif_error_set(err_blk, SPDK_DIF_GUARD_ERROR, _guard, guard,
512 				       offset_blocks);
513 			SPDK_ERRLOG("Failed to compare Guard: LBA=%" PRIu32 "," \
514 				    "  Expected=%x, Actual=%x\n",
515 				    ref_tag, _guard, guard);
516 			return -1;
517 		}
518 	}
519 
520 	if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) {
521 		/* Compare unmasked bits in the DIF Application Tag field to the
522 		 * passed Application Tag.
523 		 */
524 		_app_tag = from_be16(&dif->app_tag);
525 		if ((_app_tag & ctx->apptag_mask) != ctx->app_tag) {
526 			_dif_error_set(err_blk, SPDK_DIF_APPTAG_ERROR, ctx->app_tag,
527 				       (_app_tag & ctx->apptag_mask), offset_blocks);
528 			SPDK_ERRLOG("Failed to compare App Tag: LBA=%" PRIu32 "," \
529 				    "  Expected=%x, Actual=%x\n",
530 				    ref_tag, ctx->app_tag, (_app_tag & ctx->apptag_mask));
531 			return -1;
532 		}
533 	}
534 
535 	if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) {
536 		switch (ctx->dif_type) {
537 		case SPDK_DIF_TYPE1:
538 		case SPDK_DIF_TYPE2:
539 			/* Compare the DIF Reference Tag field to the passed Reference Tag.
540 			 * The passed Reference Tag will be the least significant 4 bytes
541 			 * of the LBA when Type 1 is used, and application specific value
542 			 * if Type 2 is used,
543 			 */
544 			_ref_tag = from_be32(&dif->ref_tag);
545 			if (_ref_tag != ref_tag) {
546 				_dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, ref_tag,
547 					       _ref_tag, offset_blocks);
548 				SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \
549 					    " Expected=%x, Actual=%x\n",
550 					    ref_tag, ref_tag, _ref_tag);
551 				return -1;
552 			}
553 			break;
554 		case SPDK_DIF_TYPE3:
555 			/* For Type 3, computed Reference Tag remains unchanged.
556 			 * Hence ignore the Reference Tag field.
557 			 */
558 			break;
559 		default:
560 			break;
561 		}
562 	}
563 
564 	return 0;
565 }
566 
567 static int
568 dif_verify(struct _dif_sgl *sgl, uint32_t num_blocks,
569 	   const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
570 {
571 	uint32_t offset_blocks = 0;
572 	int rc;
573 	void *buf;
574 	uint16_t guard = 0;
575 
576 	while (offset_blocks < num_blocks) {
577 		_dif_sgl_get_buf(sgl, &buf, NULL);
578 
579 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
580 			guard = spdk_crc16_t10dif(ctx->guard_seed, buf, ctx->guard_interval);
581 		}
582 
583 		rc = _dif_verify(buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
584 		if (rc != 0) {
585 			return rc;
586 		}
587 
588 		_dif_sgl_advance(sgl, ctx->block_size);
589 		offset_blocks++;
590 	}
591 
592 	return 0;
593 }
594 
595 static int
596 _dif_verify_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len,
597 		  uint16_t *_guard, uint32_t offset_blocks,
598 		  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
599 {
600 	uint32_t offset_in_dif, buf_len;
601 	void *buf;
602 	uint16_t guard;
603 	struct spdk_dif dif = {};
604 	int rc;
605 
606 	assert(_guard != NULL);
607 	assert(offset_in_block < ctx->guard_interval);
608 	assert(offset_in_block + data_len < ctx->guard_interval ||
609 	       offset_in_block + data_len == ctx->block_size);
610 
611 	guard = *_guard;
612 
613 	/* Compute CRC over split logical block data. */
614 	while (data_len != 0 && offset_in_block < ctx->guard_interval) {
615 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
616 		buf_len = spdk_min(buf_len, data_len);
617 		buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block);
618 
619 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
620 			guard = spdk_crc16_t10dif(guard, buf, buf_len);
621 		}
622 
623 		_dif_sgl_advance(sgl, buf_len);
624 		offset_in_block += buf_len;
625 		data_len -= buf_len;
626 	}
627 
628 	if (offset_in_block < ctx->guard_interval) {
629 		*_guard = guard;
630 		return 0;
631 	}
632 
633 	/* Copy the split DIF field to the temporary DIF buffer, and then
634 	 * skip metadata field after DIF field (if any). */
635 	while (offset_in_block < ctx->block_size) {
636 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
637 
638 		if (offset_in_block < ctx->guard_interval + sizeof(struct spdk_dif)) {
639 			offset_in_dif = offset_in_block - ctx->guard_interval;
640 			buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset_in_dif);
641 
642 			memcpy((uint8_t *)&dif + offset_in_dif, buf, buf_len);
643 		} else {
644 			buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block);
645 		}
646 		_dif_sgl_advance(sgl, buf_len);
647 		offset_in_block += buf_len;
648 	}
649 
650 	rc = _dif_verify(&dif, guard, offset_blocks, ctx, err_blk);
651 	if (rc != 0) {
652 		return rc;
653 	}
654 
655 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
656 		guard = ctx->guard_seed;
657 	}
658 
659 	*_guard = guard;
660 	return 0;
661 }
662 
663 static int
664 dif_verify_split(struct _dif_sgl *sgl, uint32_t num_blocks,
665 		 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
666 {
667 	uint32_t offset_blocks;
668 	uint16_t guard = 0;
669 	int rc;
670 
671 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
672 		guard = ctx->guard_seed;
673 	}
674 
675 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
676 		rc = _dif_verify_split(sgl, 0, ctx->block_size, &guard, offset_blocks,
677 				       ctx, err_blk);
678 		if (rc != 0) {
679 			return rc;
680 		}
681 	}
682 
683 	return 0;
684 }
685 
686 int
687 spdk_dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
688 		const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
689 {
690 	struct _dif_sgl sgl;
691 
692 	_dif_sgl_init(&sgl, iovs, iovcnt);
693 
694 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
695 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
696 		return -EINVAL;
697 	}
698 
699 	if (_dif_is_disabled(ctx->dif_type)) {
700 		return 0;
701 	}
702 
703 	if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) {
704 		return dif_verify(&sgl, num_blocks, ctx, err_blk);
705 	} else {
706 		return dif_verify_split(&sgl, num_blocks, ctx, err_blk);
707 	}
708 }
709 
710 static uint32_t
711 dif_update_crc32c(struct _dif_sgl *sgl, uint32_t num_blocks,
712 		  uint32_t crc32c,  const struct spdk_dif_ctx *ctx)
713 {
714 	uint32_t offset_blocks;
715 	void *buf;
716 
717 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
718 		_dif_sgl_get_buf(sgl, &buf, NULL);
719 
720 		crc32c = spdk_crc32c_update(buf, ctx->block_size - ctx->md_size, crc32c);
721 
722 		_dif_sgl_advance(sgl, ctx->block_size);
723 	}
724 
725 	return crc32c;
726 }
727 
728 static uint32_t
729 _dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len,
730 			 uint32_t crc32c, const struct spdk_dif_ctx *ctx)
731 {
732 	uint32_t data_block_size, buf_len;
733 	void *buf;
734 
735 	data_block_size = ctx->block_size - ctx->md_size;
736 
737 	assert(offset_in_block + data_len <= ctx->block_size);
738 
739 	while (data_len != 0) {
740 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
741 		buf_len = spdk_min(buf_len, data_len);
742 
743 		if (offset_in_block < data_block_size) {
744 			buf_len = spdk_min(buf_len, data_block_size - offset_in_block);
745 			crc32c = spdk_crc32c_update(buf, buf_len, crc32c);
746 		}
747 
748 		_dif_sgl_advance(sgl, buf_len);
749 		offset_in_block += buf_len;
750 		data_len -= buf_len;
751 	}
752 
753 	return crc32c;
754 }
755 
756 static uint32_t
757 dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t num_blocks,
758 			uint32_t crc32c, const struct spdk_dif_ctx *ctx)
759 {
760 	uint32_t offset_blocks;
761 
762 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
763 		crc32c = _dif_update_crc32c_split(sgl, 0, ctx->block_size, crc32c, ctx);
764 	}
765 
766 	return crc32c;
767 }
768 
769 int
770 spdk_dif_update_crc32c(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
771 		       uint32_t *_crc32c, const struct spdk_dif_ctx *ctx)
772 {
773 	struct _dif_sgl sgl;
774 
775 	if (_crc32c == NULL) {
776 		return -EINVAL;
777 	}
778 
779 	_dif_sgl_init(&sgl, iovs, iovcnt);
780 
781 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
782 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
783 		return -EINVAL;
784 	}
785 
786 	if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) {
787 		*_crc32c = dif_update_crc32c(&sgl, num_blocks, *_crc32c, ctx);
788 	} else {
789 		*_crc32c = dif_update_crc32c_split(&sgl, num_blocks, *_crc32c, ctx);
790 	}
791 
792 	return 0;
793 }
794 
795 static void
796 dif_generate_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
797 		  uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
798 {
799 	uint32_t offset_blocks = 0, data_block_size;
800 	void *src, *dst;
801 	uint16_t guard;
802 
803 	data_block_size = ctx->block_size - ctx->md_size;
804 
805 	while (offset_blocks < num_blocks) {
806 		_dif_sgl_get_buf(src_sgl, &src, NULL);
807 		_dif_sgl_get_buf(dst_sgl, &dst, NULL);
808 
809 		guard = 0;
810 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
811 			guard = spdk_crc16_t10dif_copy(ctx->guard_seed, dst, src, data_block_size);
812 			guard = spdk_crc16_t10dif(guard, dst + data_block_size,
813 						  ctx->guard_interval - data_block_size);
814 		} else {
815 			memcpy(dst, src, data_block_size);
816 		}
817 
818 		_dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx);
819 
820 		_dif_sgl_advance(src_sgl, data_block_size);
821 		_dif_sgl_advance(dst_sgl, ctx->block_size);
822 		offset_blocks++;
823 	}
824 }
825 
826 static void
827 _dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
828 			 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx)
829 {
830 	uint32_t offset_in_block, src_len, data_block_size;
831 	uint16_t guard = 0;
832 	void *src, *dst;
833 
834 	_dif_sgl_get_buf(dst_sgl, &dst, NULL);
835 
836 	data_block_size = ctx->block_size - ctx->md_size;
837 
838 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
839 		guard = ctx->guard_seed;
840 	}
841 	offset_in_block = 0;
842 
843 	while (offset_in_block < data_block_size) {
844 		/* Compute CRC over split logical block data and copy
845 		 * data to bounce buffer.
846 		 */
847 		_dif_sgl_get_buf(src_sgl, &src, &src_len);
848 		src_len = spdk_min(src_len, data_block_size - offset_in_block);
849 
850 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
851 			guard = spdk_crc16_t10dif_copy(guard, dst + offset_in_block,
852 						       src, src_len);
853 		} else {
854 			memcpy(dst + offset_in_block, src, src_len);
855 		}
856 
857 		_dif_sgl_advance(src_sgl, src_len);
858 		offset_in_block += src_len;
859 	}
860 
861 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
862 		guard = spdk_crc16_t10dif(guard, dst + data_block_size,
863 					  ctx->guard_interval - data_block_size);
864 	}
865 
866 	_dif_sgl_advance(dst_sgl, ctx->block_size);
867 
868 	_dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx);
869 }
870 
871 static void
872 dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
873 			uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
874 {
875 	uint32_t offset_blocks;
876 
877 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
878 		_dif_generate_copy_split(src_sgl, dst_sgl, offset_blocks, ctx);
879 	}
880 }
881 
882 int
883 spdk_dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
884 		       uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
885 {
886 	struct _dif_sgl src_sgl, dst_sgl;
887 	uint32_t data_block_size;
888 
889 	_dif_sgl_init(&src_sgl, iovs, iovcnt);
890 	_dif_sgl_init(&dst_sgl, bounce_iov, 1);
891 
892 	data_block_size = ctx->block_size - ctx->md_size;
893 
894 	if (!_dif_sgl_is_valid(&src_sgl, data_block_size * num_blocks) ||
895 	    !_dif_sgl_is_valid(&dst_sgl, ctx->block_size * num_blocks)) {
896 		SPDK_ERRLOG("Size of iovec arrays are not valid.\n");
897 		return -EINVAL;
898 	}
899 
900 	if (_dif_is_disabled(ctx->dif_type)) {
901 		return 0;
902 	}
903 
904 	if (_dif_sgl_is_bytes_multiple(&src_sgl, data_block_size)) {
905 		dif_generate_copy(&src_sgl, &dst_sgl, num_blocks, ctx);
906 	} else {
907 		dif_generate_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx);
908 	}
909 
910 	return 0;
911 }
912 
913 static int
914 dif_verify_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
915 		uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
916 		struct spdk_dif_error *err_blk)
917 {
918 	uint32_t offset_blocks = 0, data_block_size;
919 	void *src, *dst;
920 	int rc;
921 	uint16_t guard;
922 
923 	data_block_size = ctx->block_size - ctx->md_size;
924 
925 	while (offset_blocks < num_blocks) {
926 		_dif_sgl_get_buf(src_sgl, &src, NULL);
927 		_dif_sgl_get_buf(dst_sgl, &dst, NULL);
928 
929 		guard = 0;
930 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
931 			guard = spdk_crc16_t10dif_copy(ctx->guard_seed, dst, src, data_block_size);
932 			guard = spdk_crc16_t10dif(guard, src + data_block_size,
933 						  ctx->guard_interval - data_block_size);
934 		} else {
935 			memcpy(dst, src, data_block_size);
936 		}
937 
938 		rc = _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
939 		if (rc != 0) {
940 			return rc;
941 		}
942 
943 		_dif_sgl_advance(src_sgl, ctx->block_size);
944 		_dif_sgl_advance(dst_sgl, data_block_size);
945 		offset_blocks++;
946 	}
947 
948 	return 0;
949 }
950 
951 static int
952 _dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
953 		       uint32_t offset_blocks, const struct spdk_dif_ctx *ctx,
954 		       struct spdk_dif_error *err_blk)
955 {
956 	uint32_t offset_in_block, dst_len, data_block_size;
957 	uint16_t guard = 0;
958 	void *src, *dst;
959 
960 	_dif_sgl_get_buf(src_sgl, &src, NULL);
961 
962 	data_block_size = ctx->block_size - ctx->md_size;
963 
964 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
965 		guard = ctx->guard_seed;
966 	}
967 	offset_in_block = 0;
968 
969 	while (offset_in_block < data_block_size) {
970 		/* Compute CRC over split logical block data and copy
971 		 * data to bounce buffer.
972 		 */
973 		_dif_sgl_get_buf(dst_sgl, &dst, &dst_len);
974 		dst_len = spdk_min(dst_len, data_block_size - offset_in_block);
975 
976 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
977 			guard = spdk_crc16_t10dif_copy(guard, dst,
978 						       src + offset_in_block, dst_len);
979 		} else {
980 			memcpy(dst, src + offset_in_block, dst_len);
981 		}
982 
983 		_dif_sgl_advance(dst_sgl, dst_len);
984 		offset_in_block += dst_len;
985 	}
986 
987 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
988 		guard = spdk_crc16_t10dif(guard, src + data_block_size,
989 					  ctx->guard_interval - data_block_size);
990 	}
991 
992 	_dif_sgl_advance(src_sgl, ctx->block_size);
993 
994 	return _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
995 }
996 
997 static int
998 dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
999 		      uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1000 		      struct spdk_dif_error *err_blk)
1001 {
1002 	uint32_t offset_blocks;
1003 	int rc;
1004 
1005 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1006 		rc = _dif_verify_copy_split(src_sgl, dst_sgl, offset_blocks, ctx, err_blk);
1007 		if (rc != 0) {
1008 			return rc;
1009 		}
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 int
1016 spdk_dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
1017 		     uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1018 		     struct spdk_dif_error *err_blk)
1019 {
1020 	struct _dif_sgl src_sgl, dst_sgl;
1021 	uint32_t data_block_size;
1022 
1023 	_dif_sgl_init(&src_sgl, bounce_iov, 1);
1024 	_dif_sgl_init(&dst_sgl, iovs, iovcnt);
1025 
1026 	data_block_size = ctx->block_size - ctx->md_size;
1027 
1028 	if (!_dif_sgl_is_valid(&dst_sgl, data_block_size * num_blocks) ||
1029 	    !_dif_sgl_is_valid(&src_sgl, ctx->block_size * num_blocks)) {
1030 		SPDK_ERRLOG("Size of iovec arrays are not valid\n");
1031 		return -EINVAL;
1032 	}
1033 
1034 	if (_dif_is_disabled(ctx->dif_type)) {
1035 		return 0;
1036 	}
1037 
1038 	if (_dif_sgl_is_bytes_multiple(&dst_sgl, data_block_size)) {
1039 		return dif_verify_copy(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk);
1040 	} else {
1041 		return dif_verify_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk);
1042 	}
1043 }
1044 
1045 static void
1046 _bit_flip(uint8_t *buf, uint32_t flip_bit)
1047 {
1048 	uint8_t byte;
1049 
1050 	byte = *buf;
1051 	byte ^= 1 << flip_bit;
1052 	*buf = byte;
1053 }
1054 
1055 static int
1056 _dif_inject_error(struct _dif_sgl *sgl,
1057 		  uint32_t block_size, uint32_t num_blocks,
1058 		  uint32_t inject_offset_blocks,
1059 		  uint32_t inject_offset_bytes,
1060 		  uint32_t inject_offset_bits)
1061 {
1062 	uint32_t offset_in_block, buf_len;
1063 	void *buf;
1064 
1065 	_dif_sgl_advance(sgl, block_size * inject_offset_blocks);
1066 
1067 	offset_in_block = 0;
1068 
1069 	while (offset_in_block < block_size) {
1070 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
1071 		buf_len = spdk_min(buf_len, block_size - offset_in_block);
1072 
1073 		if (inject_offset_bytes >= offset_in_block &&
1074 		    inject_offset_bytes < offset_in_block + buf_len) {
1075 			buf += inject_offset_bytes - offset_in_block;
1076 			_bit_flip(buf, inject_offset_bits);
1077 			return 0;
1078 		}
1079 
1080 		_dif_sgl_advance(sgl, buf_len);
1081 		offset_in_block += buf_len;
1082 	}
1083 
1084 	return -1;
1085 }
1086 
1087 static int
1088 dif_inject_error(struct _dif_sgl *sgl, uint32_t block_size, uint32_t num_blocks,
1089 		 uint32_t start_inject_bytes, uint32_t inject_range_bytes,
1090 		 uint32_t *inject_offset)
1091 {
1092 	uint32_t inject_offset_blocks, inject_offset_bytes, inject_offset_bits;
1093 	uint32_t offset_blocks;
1094 	int rc;
1095 
1096 	srand(time(0));
1097 
1098 	inject_offset_blocks = rand() % num_blocks;
1099 	inject_offset_bytes = start_inject_bytes + (rand() % inject_range_bytes);
1100 	inject_offset_bits = rand() % 8;
1101 
1102 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1103 		if (offset_blocks == inject_offset_blocks) {
1104 			rc = _dif_inject_error(sgl, block_size, num_blocks,
1105 					       inject_offset_blocks,
1106 					       inject_offset_bytes,
1107 					       inject_offset_bits);
1108 			if (rc == 0) {
1109 				*inject_offset = inject_offset_blocks;
1110 			}
1111 			return rc;
1112 		}
1113 	}
1114 
1115 	return -1;
1116 }
1117 
1118 #define _member_size(type, member)	sizeof(((type *)0)->member)
1119 
1120 int
1121 spdk_dif_inject_error(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
1122 		      const struct spdk_dif_ctx *ctx, uint32_t inject_flags,
1123 		      uint32_t *inject_offset)
1124 {
1125 	struct _dif_sgl sgl;
1126 	int rc;
1127 
1128 	_dif_sgl_init(&sgl, iovs, iovcnt);
1129 
1130 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
1131 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1132 		return -EINVAL;
1133 	}
1134 
1135 	if (inject_flags & SPDK_DIF_REFTAG_ERROR) {
1136 		rc = dif_inject_error(&sgl, ctx->block_size, num_blocks,
1137 				      ctx->guard_interval + offsetof(struct spdk_dif, ref_tag),
1138 				      _member_size(struct spdk_dif, ref_tag),
1139 				      inject_offset);
1140 		if (rc != 0) {
1141 			SPDK_ERRLOG("Failed to inject error to Reference Tag.\n");
1142 			return rc;
1143 		}
1144 	}
1145 
1146 	if (inject_flags & SPDK_DIF_APPTAG_ERROR) {
1147 		rc = dif_inject_error(&sgl, ctx->block_size, num_blocks,
1148 				      ctx->guard_interval + offsetof(struct spdk_dif, app_tag),
1149 				      _member_size(struct spdk_dif, app_tag),
1150 				      inject_offset);
1151 		if (rc != 0) {
1152 			SPDK_ERRLOG("Failed to inject error to Application Tag.\n");
1153 			return rc;
1154 		}
1155 	}
1156 	if (inject_flags & SPDK_DIF_GUARD_ERROR) {
1157 		rc = dif_inject_error(&sgl, ctx->block_size, num_blocks,
1158 				      ctx->guard_interval,
1159 				      _member_size(struct spdk_dif, guard),
1160 				      inject_offset);
1161 		if (rc != 0) {
1162 			SPDK_ERRLOG("Failed to inject error to Guard.\n");
1163 			return rc;
1164 		}
1165 	}
1166 
1167 	if (inject_flags & SPDK_DIF_DATA_ERROR) {
1168 		/* If the DIF information is contained within the last 8 bytes of
1169 		 * metadata, then the CRC covers all metadata bytes up to but excluding
1170 		 * the last 8 bytes. But error injection does not cover these metadata
1171 		 * because classification is not determined yet.
1172 		 *
1173 		 * Note: Error injection to data block is expected to be detected as
1174 		 * guard error.
1175 		 */
1176 		rc = dif_inject_error(&sgl, ctx->block_size, num_blocks,
1177 				      0,
1178 				      ctx->block_size - ctx->md_size,
1179 				      inject_offset);
1180 		if (rc != 0) {
1181 			SPDK_ERRLOG("Failed to inject error to data block.\n");
1182 			return rc;
1183 		}
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 static void
1190 dix_generate(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1191 	     uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
1192 {
1193 	uint32_t offset_blocks = 0;
1194 	uint16_t guard;
1195 	void *data_buf, *md_buf;
1196 
1197 	while (offset_blocks < num_blocks) {
1198 		_dif_sgl_get_buf(data_sgl, &data_buf, NULL);
1199 		_dif_sgl_get_buf(md_sgl, &md_buf, NULL);
1200 
1201 		guard = 0;
1202 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1203 			guard = spdk_crc16_t10dif(ctx->guard_seed, data_buf, ctx->block_size);
1204 			guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval);
1205 		}
1206 
1207 		_dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx);
1208 
1209 		_dif_sgl_advance(data_sgl, ctx->block_size);
1210 		_dif_sgl_advance(md_sgl, ctx->md_size);
1211 		offset_blocks++;
1212 	}
1213 }
1214 
1215 static void
1216 _dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1217 		    uint32_t offset_blocks, const struct spdk_dif_ctx *ctx)
1218 {
1219 	uint32_t offset_in_block, data_buf_len;
1220 	uint16_t guard = 0;
1221 	void *data_buf, *md_buf;
1222 
1223 	_dif_sgl_get_buf(md_sgl, &md_buf, NULL);
1224 
1225 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1226 		guard = ctx->guard_seed;
1227 	}
1228 	offset_in_block = 0;
1229 
1230 	while (offset_in_block < ctx->block_size) {
1231 		_dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len);
1232 		data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block);
1233 
1234 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1235 			guard = spdk_crc16_t10dif(guard, data_buf, data_buf_len);
1236 		}
1237 
1238 		_dif_sgl_advance(data_sgl, data_buf_len);
1239 		offset_in_block += data_buf_len;
1240 	}
1241 
1242 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1243 		guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval);
1244 	}
1245 
1246 	_dif_sgl_advance(md_sgl, ctx->md_size);
1247 
1248 	_dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx);
1249 }
1250 
1251 static void
1252 dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1253 		   uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
1254 {
1255 	uint32_t offset_blocks;
1256 
1257 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1258 		_dix_generate_split(data_sgl, md_sgl, offset_blocks, ctx);
1259 	}
1260 }
1261 
1262 int
1263 spdk_dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
1264 		  uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
1265 {
1266 	struct _dif_sgl data_sgl, md_sgl;
1267 
1268 	_dif_sgl_init(&data_sgl, iovs, iovcnt);
1269 	_dif_sgl_init(&md_sgl, md_iov, 1);
1270 
1271 	if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) ||
1272 	    !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) {
1273 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1274 		return -EINVAL;
1275 	}
1276 
1277 	if (_dif_is_disabled(ctx->dif_type)) {
1278 		return 0;
1279 	}
1280 
1281 	if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) {
1282 		dix_generate(&data_sgl, &md_sgl, num_blocks, ctx);
1283 	} else {
1284 		dix_generate_split(&data_sgl, &md_sgl, num_blocks, ctx);
1285 	}
1286 
1287 	return 0;
1288 }
1289 
1290 static int
1291 dix_verify(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1292 	   uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1293 	   struct spdk_dif_error *err_blk)
1294 {
1295 	uint32_t offset_blocks = 0;
1296 	uint16_t guard;
1297 	void *data_buf, *md_buf;
1298 	int rc;
1299 
1300 	while (offset_blocks < num_blocks) {
1301 		_dif_sgl_get_buf(data_sgl, &data_buf, NULL);
1302 		_dif_sgl_get_buf(md_sgl, &md_buf, NULL);
1303 
1304 		guard = 0;
1305 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1306 			guard = spdk_crc16_t10dif(ctx->guard_seed, data_buf, ctx->block_size);
1307 			guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval);
1308 		}
1309 
1310 		rc = _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
1311 		if (rc != 0) {
1312 			return rc;
1313 		}
1314 
1315 		_dif_sgl_advance(data_sgl, ctx->block_size);
1316 		_dif_sgl_advance(md_sgl, ctx->md_size);
1317 		offset_blocks++;
1318 	}
1319 
1320 	return 0;
1321 }
1322 
1323 static int
1324 _dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1325 		  uint32_t offset_blocks, const struct spdk_dif_ctx *ctx,
1326 		  struct spdk_dif_error *err_blk)
1327 {
1328 	uint32_t offset_in_block, data_buf_len;
1329 	uint16_t guard = 0;
1330 	void *data_buf, *md_buf;
1331 
1332 	_dif_sgl_get_buf(md_sgl, &md_buf, NULL);
1333 
1334 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1335 		guard = ctx->guard_seed;
1336 	}
1337 	offset_in_block = 0;
1338 
1339 	while (offset_in_block < ctx->block_size) {
1340 		_dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len);
1341 		data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block);
1342 
1343 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1344 			guard = spdk_crc16_t10dif(guard, data_buf, data_buf_len);
1345 		}
1346 
1347 		_dif_sgl_advance(data_sgl, data_buf_len);
1348 		offset_in_block += data_buf_len;
1349 	}
1350 
1351 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1352 		guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval);
1353 	}
1354 
1355 	_dif_sgl_advance(md_sgl, ctx->md_size);
1356 
1357 	return _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
1358 }
1359 
1360 static int
1361 dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1362 		 uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1363 		 struct spdk_dif_error *err_blk)
1364 {
1365 	uint32_t offset_blocks;
1366 	int rc;
1367 
1368 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1369 		rc = _dix_verify_split(data_sgl, md_sgl, offset_blocks, ctx, err_blk);
1370 		if (rc != 0) {
1371 			return rc;
1372 		}
1373 	}
1374 
1375 	return 0;
1376 }
1377 
1378 int
1379 spdk_dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
1380 		uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1381 		struct spdk_dif_error *err_blk)
1382 {
1383 	struct _dif_sgl data_sgl, md_sgl;
1384 
1385 	_dif_sgl_init(&data_sgl, iovs, iovcnt);
1386 	_dif_sgl_init(&md_sgl, md_iov, 1);
1387 
1388 	if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) ||
1389 	    !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) {
1390 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1391 		return -EINVAL;
1392 	}
1393 
1394 	if (_dif_is_disabled(ctx->dif_type)) {
1395 		return 0;
1396 	}
1397 
1398 	if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) {
1399 		return dix_verify(&data_sgl, &md_sgl, num_blocks, ctx, err_blk);
1400 	} else {
1401 		return dix_verify_split(&data_sgl, &md_sgl, num_blocks, ctx, err_blk);
1402 	}
1403 }
1404 
1405 int
1406 spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
1407 		      uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1408 		      uint32_t inject_flags, uint32_t *inject_offset)
1409 {
1410 	struct _dif_sgl data_sgl, md_sgl;
1411 	int rc;
1412 
1413 	_dif_sgl_init(&data_sgl, iovs, iovcnt);
1414 	_dif_sgl_init(&md_sgl, md_iov, 1);
1415 
1416 	if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) ||
1417 	    !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) {
1418 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1419 		return -EINVAL;
1420 	}
1421 
1422 	if (inject_flags & SPDK_DIF_REFTAG_ERROR) {
1423 		rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks,
1424 				      ctx->guard_interval + offsetof(struct spdk_dif, ref_tag),
1425 				      _member_size(struct spdk_dif, ref_tag),
1426 				      inject_offset);
1427 		if (rc != 0) {
1428 			SPDK_ERRLOG("Failed to inject error to Reference Tag.\n");
1429 			return rc;
1430 		}
1431 	}
1432 
1433 	if (inject_flags & SPDK_DIF_APPTAG_ERROR) {
1434 		rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks,
1435 				      ctx->guard_interval + offsetof(struct spdk_dif, app_tag),
1436 				      _member_size(struct spdk_dif, app_tag),
1437 				      inject_offset);
1438 		if (rc != 0) {
1439 			SPDK_ERRLOG("Failed to inject error to Application Tag.\n");
1440 			return rc;
1441 		}
1442 	}
1443 
1444 	if (inject_flags & SPDK_DIF_GUARD_ERROR) {
1445 		rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks,
1446 				      ctx->guard_interval,
1447 				      _member_size(struct spdk_dif, guard),
1448 				      inject_offset);
1449 		if (rc != 0) {
1450 			SPDK_ERRLOG("Failed to inject error to Guard.\n");
1451 			return rc;
1452 		}
1453 	}
1454 
1455 	if (inject_flags & SPDK_DIF_DATA_ERROR) {
1456 		/* Note: Error injection to data block is expected to be detected
1457 		 * as guard error.
1458 		 */
1459 		rc = dif_inject_error(&data_sgl, ctx->block_size, num_blocks,
1460 				      0,
1461 				      ctx->block_size,
1462 				      inject_offset);
1463 		if (rc != 0) {
1464 			SPDK_ERRLOG("Failed to inject error to Guard.\n");
1465 			return rc;
1466 		}
1467 	}
1468 
1469 	return 0;
1470 }
1471 
1472 static uint32_t
1473 _to_next_boundary(uint32_t offset, uint32_t boundary)
1474 {
1475 	return boundary - (offset % boundary);
1476 }
1477 
1478 static uint32_t
1479 _to_size_with_md(uint32_t size, uint32_t data_block_size, uint32_t block_size)
1480 {
1481 	return (size / data_block_size) * block_size + (size % data_block_size);
1482 }
1483 
1484 int
1485 spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int iovcnt,
1486 				struct iovec *buf_iovs, int buf_iovcnt,
1487 				uint32_t data_offset, uint32_t data_len,
1488 				uint32_t *_mapped_len,
1489 				const struct spdk_dif_ctx *ctx)
1490 {
1491 	uint32_t data_block_size, data_unalign, buf_len, buf_offset, len;
1492 	struct _dif_sgl dif_sgl;
1493 	struct _dif_sgl buf_sgl;
1494 
1495 	if (iovs == NULL || iovcnt == 0 || buf_iovs == NULL || buf_iovcnt == 0) {
1496 		return -EINVAL;
1497 	}
1498 
1499 	data_block_size = ctx->block_size - ctx->md_size;
1500 
1501 	data_unalign = ctx->data_offset % data_block_size;
1502 
1503 	buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size,
1504 				   ctx->block_size);
1505 	buf_len -= data_unalign;
1506 
1507 	_dif_sgl_init(&dif_sgl, iovs, iovcnt);
1508 	_dif_sgl_init(&buf_sgl, buf_iovs, buf_iovcnt);
1509 
1510 	if (!_dif_sgl_is_valid(&buf_sgl, buf_len)) {
1511 		SPDK_ERRLOG("Buffer overflow will occur.\n");
1512 		return -ERANGE;
1513 	}
1514 
1515 	buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size);
1516 	buf_offset -= data_unalign;
1517 
1518 	_dif_sgl_advance(&buf_sgl, buf_offset);
1519 
1520 	while (data_len != 0) {
1521 		len = spdk_min(data_len, _to_next_boundary(ctx->data_offset + data_offset, data_block_size));
1522 		if (!_dif_sgl_append_split(&dif_sgl, &buf_sgl, len)) {
1523 			break;
1524 		}
1525 		_dif_sgl_advance(&buf_sgl, ctx->md_size);
1526 		data_offset += len;
1527 		data_len -= len;
1528 	}
1529 
1530 	if (_mapped_len != NULL) {
1531 		*_mapped_len = dif_sgl.total_size;
1532 	}
1533 
1534 	return iovcnt - dif_sgl.iovcnt;
1535 }
1536 
1537 static int
1538 _dif_sgl_setup_stream(struct _dif_sgl *sgl, uint32_t *_buf_offset, uint32_t *_buf_len,
1539 		      uint32_t data_offset, uint32_t data_len,
1540 		      const struct spdk_dif_ctx *ctx)
1541 {
1542 	uint32_t data_block_size, data_unalign, buf_len, buf_offset;
1543 
1544 	data_block_size = ctx->block_size - ctx->md_size;
1545 
1546 	data_unalign = ctx->data_offset % data_block_size;
1547 
1548 	/* If the last data block is complete, DIF of the data block is
1549 	 * inserted or verified in this turn.
1550 	 */
1551 	buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size,
1552 				   ctx->block_size);
1553 	buf_len -= data_unalign;
1554 
1555 	if (!_dif_sgl_is_valid(sgl, buf_len)) {
1556 		return -ERANGE;
1557 	}
1558 
1559 	buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size);
1560 	buf_offset -= data_unalign;
1561 
1562 	_dif_sgl_advance(sgl, buf_offset);
1563 	buf_len -= buf_offset;
1564 
1565 	buf_offset += data_unalign;
1566 
1567 	*_buf_offset = buf_offset;
1568 	*_buf_len = buf_len;
1569 
1570 	return 0;
1571 }
1572 
1573 int
1574 spdk_dif_generate_stream(struct iovec *iovs, int iovcnt,
1575 			 uint32_t data_offset, uint32_t data_len,
1576 			 struct spdk_dif_ctx *ctx)
1577 {
1578 	uint32_t buf_len = 0, buf_offset = 0;
1579 	uint32_t len, offset_in_block, offset_blocks;
1580 	uint16_t guard = 0;
1581 	struct _dif_sgl sgl;
1582 	int rc;
1583 
1584 	if (iovs == NULL || iovcnt == 0) {
1585 		return -EINVAL;
1586 	}
1587 
1588 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1589 		guard = ctx->last_guard;
1590 	}
1591 
1592 	_dif_sgl_init(&sgl, iovs, iovcnt);
1593 
1594 	rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx);
1595 	if (rc != 0) {
1596 		return rc;
1597 	}
1598 
1599 	while (buf_len != 0) {
1600 		len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size));
1601 		offset_in_block = buf_offset % ctx->block_size;
1602 		offset_blocks = buf_offset / ctx->block_size;
1603 
1604 		guard = _dif_generate_split(&sgl, offset_in_block, len, guard, offset_blocks, ctx);
1605 
1606 		buf_len -= len;
1607 		buf_offset += len;
1608 	}
1609 
1610 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1611 		ctx->last_guard = guard;
1612 	}
1613 
1614 	return 0;
1615 }
1616 
1617 int
1618 spdk_dif_verify_stream(struct iovec *iovs, int iovcnt,
1619 		       uint32_t data_offset, uint32_t data_len,
1620 		       struct spdk_dif_ctx *ctx,
1621 		       struct spdk_dif_error *err_blk)
1622 {
1623 	uint32_t buf_len = 0, buf_offset = 0;
1624 	uint32_t len, offset_in_block, offset_blocks;
1625 	uint16_t guard = 0;
1626 	struct _dif_sgl sgl;
1627 	int rc = 0;
1628 
1629 	if (iovs == NULL || iovcnt == 0) {
1630 		return -EINVAL;
1631 	}
1632 
1633 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1634 		guard = ctx->last_guard;
1635 	}
1636 
1637 	_dif_sgl_init(&sgl, iovs, iovcnt);
1638 
1639 	rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx);
1640 	if (rc != 0) {
1641 		return rc;
1642 	}
1643 
1644 	while (buf_len != 0) {
1645 		len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size));
1646 		offset_in_block = buf_offset % ctx->block_size;
1647 		offset_blocks = buf_offset / ctx->block_size;
1648 
1649 		rc = _dif_verify_split(&sgl, offset_in_block, len, &guard, offset_blocks,
1650 				       ctx, err_blk);
1651 		if (rc != 0) {
1652 			goto error;
1653 		}
1654 
1655 		buf_len -= len;
1656 		buf_offset += len;
1657 	}
1658 
1659 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1660 		ctx->last_guard = guard;
1661 	}
1662 error:
1663 	return rc;
1664 }
1665 
1666 int
1667 spdk_dif_update_crc32c_stream(struct iovec *iovs, int iovcnt,
1668 			      uint32_t data_offset, uint32_t data_len,
1669 			      uint32_t *_crc32c, const struct spdk_dif_ctx *ctx)
1670 {
1671 	uint32_t buf_len = 0, buf_offset = 0, len, offset_in_block;
1672 	uint32_t crc32c;
1673 	struct _dif_sgl sgl;
1674 	int rc;
1675 
1676 	if (iovs == NULL || iovcnt == 0) {
1677 		return -EINVAL;
1678 	}
1679 
1680 	crc32c = *_crc32c;
1681 	_dif_sgl_init(&sgl, iovs, iovcnt);
1682 
1683 	rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx);
1684 	if (rc != 0) {
1685 		return rc;
1686 	}
1687 
1688 	while (buf_len != 0) {
1689 		len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size));
1690 		offset_in_block = buf_offset % ctx->block_size;
1691 
1692 		crc32c = _dif_update_crc32c_split(&sgl, offset_in_block, len, crc32c, ctx);
1693 
1694 		buf_len -= len;
1695 		buf_offset += len;
1696 	}
1697 
1698 	*_crc32c = crc32c;
1699 
1700 	return 0;
1701 }
1702 
1703 void
1704 spdk_dif_get_range_with_md(uint32_t data_offset, uint32_t data_len,
1705 			   uint32_t *_buf_offset, uint32_t *_buf_len,
1706 			   const struct spdk_dif_ctx *ctx)
1707 {
1708 	uint32_t data_block_size, data_unalign, buf_offset, buf_len;
1709 
1710 	if (!ctx->md_interleave) {
1711 		buf_offset = data_offset;
1712 		buf_len = data_len;
1713 	} else {
1714 		data_block_size = ctx->block_size - ctx->md_size;
1715 
1716 		data_unalign = data_offset % data_block_size;
1717 
1718 		buf_offset = _to_size_with_md(data_offset, data_block_size, ctx->block_size);
1719 		buf_len = _to_size_with_md(data_unalign + data_len, data_block_size, ctx->block_size) -
1720 			  data_unalign;
1721 	}
1722 
1723 	if (_buf_offset != NULL) {
1724 		*_buf_offset = buf_offset;
1725 	}
1726 
1727 	if (_buf_len != NULL) {
1728 		*_buf_len = buf_len;
1729 	}
1730 }
1731 
1732 uint32_t
1733 spdk_dif_get_length_with_md(uint32_t data_len, const struct spdk_dif_ctx *ctx)
1734 {
1735 	uint32_t data_block_size;
1736 
1737 	if (!ctx->md_interleave) {
1738 		return data_len;
1739 	} else {
1740 		data_block_size = ctx->block_size - ctx->md_size;
1741 
1742 		return _to_size_with_md(data_len, data_block_size, ctx->block_size);
1743 	}
1744 }
1745 
1746 static int
1747 _dif_remap_ref_tag(struct _dif_sgl *sgl, uint32_t offset_blocks,
1748 		   const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
1749 {
1750 	uint32_t offset, buf_len, expected = 0, _actual, remapped;
1751 	void *buf;
1752 	struct _dif_sgl tmp_sgl;
1753 	struct spdk_dif dif;
1754 
1755 	/* Fast forward to DIF field. */
1756 	_dif_sgl_advance(sgl, ctx->guard_interval);
1757 	_dif_sgl_copy(&tmp_sgl, sgl);
1758 
1759 	/* Copy the split DIF field to the temporary DIF buffer */
1760 	offset = 0;
1761 	while (offset < sizeof(struct spdk_dif)) {
1762 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
1763 		buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset);
1764 
1765 		memcpy((uint8_t *)&dif + offset, buf, buf_len);
1766 
1767 		_dif_sgl_advance(sgl, buf_len);
1768 		offset += buf_len;
1769 	}
1770 
1771 	switch (ctx->dif_type) {
1772 	case SPDK_DIF_TYPE1:
1773 	case SPDK_DIF_TYPE2:
1774 		/* If Type 1 or 2 is used, then all DIF checks are disabled when
1775 		 * the Application Tag is 0xFFFF.
1776 		 */
1777 		if (dif.app_tag == 0xFFFF) {
1778 			goto end;
1779 		}
1780 		break;
1781 	case SPDK_DIF_TYPE3:
1782 		/* If Type 3 is used, then all DIF checks are disabled when the
1783 		 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF.
1784 		 */
1785 		if (dif.app_tag == 0xFFFF && dif.ref_tag == 0xFFFFFFFF) {
1786 			goto end;
1787 		}
1788 		break;
1789 	default:
1790 		break;
1791 	}
1792 
1793 	/* For type 1 and 2, the Reference Tag is incremented for each
1794 	 * subsequent logical block. For type 3, the Reference Tag
1795 	 * remains the same as the initial Reference Tag.
1796 	 */
1797 	if (ctx->dif_type != SPDK_DIF_TYPE3) {
1798 		expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks;
1799 		remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks;
1800 	} else {
1801 		remapped = ctx->remapped_init_ref_tag;
1802 	}
1803 
1804 	/* Verify the stored Reference Tag. */
1805 	switch (ctx->dif_type) {
1806 	case SPDK_DIF_TYPE1:
1807 	case SPDK_DIF_TYPE2:
1808 		/* Compare the DIF Reference Tag field to the computed Reference Tag.
1809 		 * The computed Reference Tag will be the least significant 4 bytes
1810 		 * of the LBA when Type 1 is used, and application specific value
1811 		 * if Type 2 is used.
1812 		 */
1813 		_actual = from_be32(&dif.ref_tag);
1814 		if (_actual != expected) {
1815 			_dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected,
1816 				       _actual, offset_blocks);
1817 			SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \
1818 				    " Expected=%x, Actual=%x\n",
1819 				    expected, expected, _actual);
1820 			return -1;
1821 		}
1822 		break;
1823 	case SPDK_DIF_TYPE3:
1824 		/* For type 3, the computed Reference Tag remains unchanged.
1825 		 * Hence ignore the Reference Tag field.
1826 		 */
1827 		break;
1828 	default:
1829 		break;
1830 	}
1831 
1832 	/* Update the stored Reference Tag to the remapped one. */
1833 	to_be32(&dif.ref_tag, remapped);
1834 
1835 	offset = 0;
1836 	while (offset < sizeof(struct spdk_dif)) {
1837 		_dif_sgl_get_buf(&tmp_sgl, &buf, &buf_len);
1838 		buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset);
1839 
1840 		memcpy(buf, (uint8_t *)&dif + offset, buf_len);
1841 
1842 		_dif_sgl_advance(&tmp_sgl, buf_len);
1843 		offset += buf_len;
1844 	}
1845 
1846 end:
1847 	_dif_sgl_advance(sgl, ctx->block_size - ctx->guard_interval - sizeof(struct spdk_dif));
1848 
1849 	return 0;
1850 }
1851 
1852 int
1853 spdk_dif_remap_ref_tag(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
1854 		       const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
1855 {
1856 	struct _dif_sgl sgl;
1857 	uint32_t offset_blocks;
1858 	int rc;
1859 
1860 	_dif_sgl_init(&sgl, iovs, iovcnt);
1861 
1862 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
1863 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1864 		return -EINVAL;
1865 	}
1866 
1867 	if (_dif_is_disabled(ctx->dif_type)) {
1868 		return 0;
1869 	}
1870 
1871 	if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) {
1872 		return 0;
1873 	}
1874 
1875 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1876 		rc = _dif_remap_ref_tag(&sgl, offset_blocks, ctx, err_blk);
1877 		if (rc != 0) {
1878 			return rc;
1879 		}
1880 	}
1881 
1882 	return 0;
1883 }
1884 
1885 static int
1886 _dix_remap_ref_tag(struct _dif_sgl *md_sgl, uint32_t offset_blocks,
1887 		   const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
1888 {
1889 	uint32_t expected = 0, _actual, remapped;
1890 	uint8_t *md_buf;
1891 	struct spdk_dif *dif;
1892 
1893 	_dif_sgl_get_buf(md_sgl, (void *)&md_buf, NULL);
1894 
1895 	dif = (struct spdk_dif *)(md_buf + ctx->guard_interval);
1896 
1897 	switch (ctx->dif_type) {
1898 	case SPDK_DIF_TYPE1:
1899 	case SPDK_DIF_TYPE2:
1900 		/* If Type 1 or 2 is used, then all DIF checks are disabled when
1901 		 * the Application Tag is 0xFFFF.
1902 		 */
1903 		if (dif->app_tag == 0xFFFF) {
1904 			goto end;
1905 		}
1906 		break;
1907 	case SPDK_DIF_TYPE3:
1908 		/* If Type 3 is used, then all DIF checks are disabled when the
1909 		 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF.
1910 		 */
1911 		if (dif->app_tag == 0xFFFF && dif->ref_tag == 0xFFFFFFFF) {
1912 			goto end;
1913 		}
1914 		break;
1915 	default:
1916 		break;
1917 	}
1918 
1919 	/* For type 1 and 2, the Reference Tag is incremented for each
1920 	 * subsequent logical block. For type 3, the Reference Tag
1921 	 * remains the same as the initialReference Tag.
1922 	 */
1923 	if (ctx->dif_type != SPDK_DIF_TYPE3) {
1924 		expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks;
1925 		remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks;
1926 	} else {
1927 		remapped = ctx->remapped_init_ref_tag;
1928 	}
1929 
1930 	/* Verify the stored Reference Tag. */
1931 	switch (ctx->dif_type) {
1932 	case SPDK_DIF_TYPE1:
1933 	case SPDK_DIF_TYPE2:
1934 		/* Compare the DIF Reference Tag field to the computed Reference Tag.
1935 		 * The computed Reference Tag will be the least significant 4 bytes
1936 		 * of the LBA when Type 1 is used, and application specific value
1937 		 * if Type 2 is used.
1938 		 */
1939 		_actual = from_be32(&dif->ref_tag);
1940 		if (_actual != expected) {
1941 			_dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected,
1942 				       _actual, offset_blocks);
1943 			SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \
1944 				    " Expected=%x, Actual=%x\n",
1945 				    expected, expected, _actual);
1946 			return -1;
1947 		}
1948 		break;
1949 	case SPDK_DIF_TYPE3:
1950 		/* For type 3, the computed Reference Tag remains unchanged.
1951 		 * Hence ignore the Reference Tag field.
1952 		 */
1953 		break;
1954 	default:
1955 		break;
1956 	}
1957 
1958 	/* Update the stored Reference Tag to the remapped one. */
1959 	to_be32(&dif->ref_tag, remapped);
1960 
1961 end:
1962 	_dif_sgl_advance(md_sgl, ctx->md_size);
1963 
1964 	return 0;
1965 }
1966 
1967 int
1968 spdk_dix_remap_ref_tag(struct iovec *md_iov, uint32_t num_blocks,
1969 		       const struct spdk_dif_ctx *ctx,
1970 		       struct spdk_dif_error *err_blk)
1971 {
1972 	struct _dif_sgl md_sgl;
1973 	uint32_t offset_blocks;
1974 	int rc;
1975 
1976 	_dif_sgl_init(&md_sgl, md_iov, 1);
1977 
1978 	if (!_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) {
1979 		SPDK_ERRLOG("Size of metadata iovec array is not valid.\n");
1980 		return -EINVAL;
1981 	}
1982 
1983 	if (_dif_is_disabled(ctx->dif_type)) {
1984 		return 0;
1985 	}
1986 
1987 	if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) {
1988 		return 0;
1989 	}
1990 
1991 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1992 		rc = _dix_remap_ref_tag(&md_sgl, offset_blocks, ctx, err_blk);
1993 		if (rc != 0) {
1994 			return rc;
1995 		}
1996 	}
1997 
1998 	return 0;
1999 }
2000