xref: /spdk/lib/util/dif.c (revision c39647df83e4be9bcc49025132c48bf2414ef8b1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/dif.h"
35 #include "spdk/crc16.h"
36 #include "spdk/crc32.h"
37 #include "spdk/endian.h"
38 #include "spdk/log.h"
39 #include "spdk/util.h"
40 
41 /* Context to iterate or create a iovec array.
42  * Each sgl is either iterated or created at a time.
43  */
44 struct _dif_sgl {
45 	/* Current iovec in the iteration or creation */
46 	struct iovec *iov;
47 
48 	/* Remaining count of iovecs in the iteration or creation. */
49 	int iovcnt;
50 
51 	/* Current offset in the iovec */
52 	uint32_t iov_offset;
53 
54 	/* Size of the created iovec array in bytes */
55 	uint32_t total_size;
56 };
57 
58 static inline void
59 _dif_sgl_init(struct _dif_sgl *s, struct iovec *iovs, int iovcnt)
60 {
61 	s->iov = iovs;
62 	s->iovcnt = iovcnt;
63 	s->iov_offset = 0;
64 	s->total_size = 0;
65 }
66 
67 static void
68 _dif_sgl_advance(struct _dif_sgl *s, uint32_t step)
69 {
70 	s->iov_offset += step;
71 	while (s->iovcnt != 0) {
72 		if (s->iov_offset < s->iov->iov_len) {
73 			break;
74 		}
75 
76 		s->iov_offset -= s->iov->iov_len;
77 		s->iov++;
78 		s->iovcnt--;
79 	}
80 }
81 
82 static inline void
83 _dif_sgl_get_buf(struct _dif_sgl *s, void **_buf, uint32_t *_buf_len)
84 {
85 	if (_buf != NULL) {
86 		*_buf = s->iov->iov_base + s->iov_offset;
87 	}
88 	if (_buf_len != NULL) {
89 		*_buf_len = s->iov->iov_len - s->iov_offset;
90 	}
91 }
92 
93 static inline bool
94 _dif_sgl_append(struct _dif_sgl *s, uint8_t *data, uint32_t data_len)
95 {
96 	assert(s->iovcnt > 0);
97 	s->iov->iov_base = data;
98 	s->iov->iov_len = data_len;
99 	s->total_size += data_len;
100 	s->iov++;
101 	s->iovcnt--;
102 
103 	if (s->iovcnt > 0) {
104 		return true;
105 	} else {
106 		return false;
107 	}
108 }
109 
110 static inline bool
111 _dif_sgl_append_split(struct _dif_sgl *dst, struct _dif_sgl *src, uint32_t data_len)
112 {
113 	uint8_t *buf;
114 	uint32_t buf_len;
115 
116 	while (data_len != 0) {
117 		_dif_sgl_get_buf(src, (void *)&buf, &buf_len);
118 		buf_len = spdk_min(buf_len, data_len);
119 
120 		if (!_dif_sgl_append(dst, buf, buf_len)) {
121 			return false;
122 		}
123 
124 		_dif_sgl_advance(src, buf_len);
125 		data_len -= buf_len;
126 	}
127 
128 	return true;
129 }
130 
131 /* This function must be used before starting iteration. */
132 static bool
133 _dif_sgl_is_bytes_multiple(struct _dif_sgl *s, uint32_t bytes)
134 {
135 	int i;
136 
137 	for (i = 0; i < s->iovcnt; i++) {
138 		if (s->iov[i].iov_len % bytes) {
139 			return false;
140 		}
141 	}
142 
143 	return true;
144 }
145 
146 /* This function must be used before starting iteration. */
147 static bool
148 _dif_sgl_is_valid(struct _dif_sgl *s, uint32_t bytes)
149 {
150 	uint64_t total = 0;
151 	int i;
152 
153 	for (i = 0; i < s->iovcnt; i++) {
154 		total += s->iov[i].iov_len;
155 	}
156 
157 	return total >= bytes;
158 }
159 
160 static void
161 _dif_sgl_copy(struct _dif_sgl *to, struct _dif_sgl *from)
162 {
163 	memcpy(to, from, sizeof(struct _dif_sgl));
164 }
165 
166 static bool
167 _dif_type_is_valid(enum spdk_dif_type dif_type, uint32_t dif_flags)
168 {
169 	switch (dif_type) {
170 	case SPDK_DIF_TYPE1:
171 	case SPDK_DIF_TYPE2:
172 	case SPDK_DIF_DISABLE:
173 		break;
174 	case SPDK_DIF_TYPE3:
175 		if (dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) {
176 			SPDK_ERRLOG("Reference Tag should not be checked for Type 3\n");
177 			return false;
178 		}
179 		break;
180 	default:
181 		SPDK_ERRLOG("Unknown DIF Type: %d\n", dif_type);
182 		return false;
183 	}
184 
185 	return true;
186 }
187 
188 static bool
189 _dif_is_disabled(enum spdk_dif_type dif_type)
190 {
191 	if (dif_type == SPDK_DIF_DISABLE) {
192 		return true;
193 	} else {
194 		return false;
195 	}
196 }
197 
198 
199 static uint32_t
200 _get_guard_interval(uint32_t block_size, uint32_t md_size, bool dif_loc, bool md_interleave)
201 {
202 	if (!dif_loc) {
203 		/* For metadata formats with more than 8 bytes, if the DIF is
204 		 * contained in the last 8 bytes of metadata, then the CRC
205 		 * covers all metadata up to but excluding these last 8 bytes.
206 		 */
207 		if (md_interleave) {
208 			return block_size - sizeof(struct spdk_dif);
209 		} else {
210 			return md_size - sizeof(struct spdk_dif);
211 		}
212 	} else {
213 		/* For metadata formats with more than 8 bytes, if the DIF is
214 		 * contained in the first 8 bytes of metadata, then the CRC
215 		 * does not cover any metadata.
216 		 */
217 		if (md_interleave) {
218 			return block_size - md_size;
219 		} else {
220 			return 0;
221 		}
222 	}
223 }
224 
225 int
226 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
227 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
228 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
229 		  uint32_t data_offset, uint16_t guard_seed)
230 {
231 	uint32_t data_block_size;
232 
233 	if (md_size < sizeof(struct spdk_dif)) {
234 		SPDK_ERRLOG("Metadata size is smaller than DIF size.\n");
235 		return -EINVAL;
236 	}
237 
238 	if (md_interleave) {
239 		if (block_size < md_size) {
240 			SPDK_ERRLOG("Block size is smaller than DIF size.\n");
241 			return -EINVAL;
242 		}
243 		data_block_size = block_size - md_size;
244 	} else {
245 		if (block_size == 0 || (block_size % 512) != 0) {
246 			SPDK_ERRLOG("Zero block size is not allowed\n");
247 			return -EINVAL;
248 		}
249 		data_block_size = block_size;
250 	}
251 
252 	if (!_dif_type_is_valid(dif_type, dif_flags)) {
253 		SPDK_ERRLOG("DIF type is invalid.\n");
254 		return -EINVAL;
255 	}
256 
257 	ctx->block_size = block_size;
258 	ctx->md_size = md_size;
259 	ctx->md_interleave = md_interleave;
260 	ctx->guard_interval = _get_guard_interval(block_size, md_size, dif_loc, md_interleave);
261 	ctx->dif_type = dif_type;
262 	ctx->dif_flags = dif_flags;
263 	ctx->init_ref_tag = init_ref_tag;
264 	ctx->apptag_mask = apptag_mask;
265 	ctx->app_tag = app_tag;
266 	ctx->data_offset = data_offset;
267 	ctx->ref_tag_offset = data_offset / data_block_size;
268 	ctx->last_guard = guard_seed;
269 	ctx->guard_seed = guard_seed;
270 	ctx->remapped_init_ref_tag = 0;
271 
272 	return 0;
273 }
274 
275 void
276 spdk_dif_ctx_set_data_offset(struct spdk_dif_ctx *ctx, uint32_t data_offset)
277 {
278 	uint32_t data_block_size;
279 
280 	if (ctx->md_interleave) {
281 		data_block_size = ctx->block_size - ctx->md_size;
282 	} else {
283 		data_block_size = ctx->block_size;
284 	}
285 
286 	ctx->data_offset = data_offset;
287 	ctx->ref_tag_offset = data_offset / data_block_size;
288 }
289 
290 void
291 spdk_dif_ctx_set_remapped_init_ref_tag(struct spdk_dif_ctx *ctx,
292 				       uint32_t remapped_init_ref_tag)
293 {
294 	ctx->remapped_init_ref_tag = remapped_init_ref_tag;
295 }
296 
297 static void
298 _dif_generate(void *_dif, uint16_t guard, uint32_t offset_blocks,
299 	      const struct spdk_dif_ctx *ctx)
300 {
301 	struct spdk_dif *dif = _dif;
302 	uint32_t ref_tag;
303 
304 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
305 		to_be16(&dif->guard, guard);
306 	}
307 
308 	if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) {
309 		to_be16(&dif->app_tag, ctx->app_tag);
310 	}
311 
312 	if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) {
313 		/* For type 1 and 2, the reference tag is incremented for each
314 		 * subsequent logical block. For type 3, the reference tag
315 		 * remains the same as the initial reference tag.
316 		 */
317 		if (ctx->dif_type != SPDK_DIF_TYPE3) {
318 			ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks;
319 		} else {
320 			ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset;
321 		}
322 
323 		to_be32(&dif->ref_tag, ref_tag);
324 	}
325 }
326 
327 static void
328 dif_generate(struct _dif_sgl *sgl, uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
329 {
330 	uint32_t offset_blocks = 0;
331 	void *buf;
332 	uint16_t guard = 0;
333 
334 	while (offset_blocks < num_blocks) {
335 		_dif_sgl_get_buf(sgl, &buf, NULL);
336 
337 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
338 			guard = spdk_crc16_t10dif(ctx->guard_seed, buf, ctx->guard_interval);
339 		}
340 
341 		_dif_generate(buf + ctx->guard_interval, guard, offset_blocks, ctx);
342 
343 		_dif_sgl_advance(sgl, ctx->block_size);
344 		offset_blocks++;
345 	}
346 }
347 
348 static uint16_t
349 _dif_generate_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len,
350 		    uint16_t guard, uint32_t offset_blocks, const struct spdk_dif_ctx *ctx)
351 {
352 	uint32_t offset_in_dif, buf_len;
353 	void *buf;
354 	struct spdk_dif dif = {};
355 
356 	assert(offset_in_block < ctx->guard_interval);
357 	assert(offset_in_block + data_len < ctx->guard_interval ||
358 	       offset_in_block + data_len == ctx->block_size);
359 
360 	/* Compute CRC over split logical block data. */
361 	while (data_len != 0 && offset_in_block < ctx->guard_interval) {
362 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
363 		buf_len = spdk_min(buf_len, data_len);
364 		buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block);
365 
366 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
367 			guard = spdk_crc16_t10dif(guard, buf, buf_len);
368 		}
369 
370 		_dif_sgl_advance(sgl, buf_len);
371 		offset_in_block += buf_len;
372 		data_len -= buf_len;
373 	}
374 
375 	if (offset_in_block < ctx->guard_interval) {
376 		return guard;
377 	}
378 
379 	/* If a whole logical block data is parsed, generate DIF
380 	 * and save it to the temporary DIF area.
381 	 */
382 	_dif_generate(&dif, guard, offset_blocks, ctx);
383 
384 	/* Copy generated DIF field to the split DIF field, and then
385 	 * skip metadata field after DIF field (if any).
386 	 */
387 	while (offset_in_block < ctx->block_size) {
388 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
389 
390 		if (offset_in_block < ctx->guard_interval + sizeof(struct spdk_dif)) {
391 			offset_in_dif = offset_in_block - ctx->guard_interval;
392 			buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset_in_dif);
393 
394 			memcpy(buf, ((uint8_t *)&dif) + offset_in_dif, buf_len);
395 		} else {
396 			buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block);
397 		}
398 
399 		_dif_sgl_advance(sgl, buf_len);
400 		offset_in_block += buf_len;
401 	}
402 
403 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
404 		guard = ctx->guard_seed;
405 	}
406 
407 	return guard;
408 }
409 
410 static void
411 dif_generate_split(struct _dif_sgl *sgl, uint32_t num_blocks,
412 		   const struct spdk_dif_ctx *ctx)
413 {
414 	uint32_t offset_blocks;
415 	uint16_t guard = 0;
416 
417 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
418 		guard = ctx->guard_seed;
419 	}
420 
421 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
422 		_dif_generate_split(sgl, 0, ctx->block_size, guard, offset_blocks, ctx);
423 	}
424 }
425 
426 int
427 spdk_dif_generate(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
428 		  const struct spdk_dif_ctx *ctx)
429 {
430 	struct _dif_sgl sgl;
431 
432 	_dif_sgl_init(&sgl, iovs, iovcnt);
433 
434 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
435 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
436 		return -EINVAL;
437 	}
438 
439 	if (_dif_is_disabled(ctx->dif_type)) {
440 		return 0;
441 	}
442 
443 	if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) {
444 		dif_generate(&sgl, num_blocks, ctx);
445 	} else {
446 		dif_generate_split(&sgl, num_blocks, ctx);
447 	}
448 
449 	return 0;
450 }
451 
452 static void
453 _dif_error_set(struct spdk_dif_error *err_blk, uint8_t err_type,
454 	       uint32_t expected, uint32_t actual, uint32_t err_offset)
455 {
456 	if (err_blk) {
457 		err_blk->err_type = err_type;
458 		err_blk->expected = expected;
459 		err_blk->actual = actual;
460 		err_blk->err_offset = err_offset;
461 	}
462 }
463 
464 static int
465 _dif_verify(void *_dif, uint16_t guard, uint32_t offset_blocks,
466 	    const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
467 {
468 	struct spdk_dif *dif = _dif;
469 	uint16_t _guard;
470 	uint16_t _app_tag;
471 	uint32_t ref_tag, _ref_tag;
472 
473 	switch (ctx->dif_type) {
474 	case SPDK_DIF_TYPE1:
475 	case SPDK_DIF_TYPE2:
476 		/* If Type 1 or 2 is used, then all DIF checks are disabled when
477 		 * the Application Tag is 0xFFFF.
478 		 */
479 		if (dif->app_tag == 0xFFFF) {
480 			return 0;
481 		}
482 		break;
483 	case SPDK_DIF_TYPE3:
484 		/* If Type 3 is used, then all DIF checks are disabled when the
485 		 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF.
486 		 */
487 		if (dif->app_tag == 0xFFFF && dif->ref_tag == 0xFFFFFFFF) {
488 			return 0;
489 		}
490 		break;
491 	default:
492 		break;
493 	}
494 
495 	/* For type 1 and 2, the reference tag is incremented for each
496 	 * subsequent logical block. For type 3, the reference tag
497 	 * remains the same as the initial reference tag.
498 	 */
499 	if (ctx->dif_type != SPDK_DIF_TYPE3) {
500 		ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks;
501 	} else {
502 		ref_tag = ctx->init_ref_tag + ctx->ref_tag_offset;
503 	}
504 
505 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
506 		/* Compare the DIF Guard field to the CRC computed over the logical
507 		 * block data.
508 		 */
509 		_guard = from_be16(&dif->guard);
510 		if (_guard != guard) {
511 			_dif_error_set(err_blk, SPDK_DIF_GUARD_ERROR, _guard, guard,
512 				       offset_blocks);
513 			SPDK_ERRLOG("Failed to compare Guard: LBA=%" PRIu32 "," \
514 				    "  Expected=%x, Actual=%x\n",
515 				    ref_tag, _guard, guard);
516 			return -1;
517 		}
518 	}
519 
520 	if (ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) {
521 		/* Compare unmasked bits in the DIF Application Tag field to the
522 		 * passed Application Tag.
523 		 */
524 		_app_tag = from_be16(&dif->app_tag);
525 		if ((_app_tag & ctx->apptag_mask) != ctx->app_tag) {
526 			_dif_error_set(err_blk, SPDK_DIF_APPTAG_ERROR, ctx->app_tag,
527 				       (_app_tag & ctx->apptag_mask), offset_blocks);
528 			SPDK_ERRLOG("Failed to compare App Tag: LBA=%" PRIu32 "," \
529 				    "  Expected=%x, Actual=%x\n",
530 				    ref_tag, ctx->app_tag, (_app_tag & ctx->apptag_mask));
531 			return -1;
532 		}
533 	}
534 
535 	if (ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) {
536 		switch (ctx->dif_type) {
537 		case SPDK_DIF_TYPE1:
538 		case SPDK_DIF_TYPE2:
539 			/* Compare the DIF Reference Tag field to the passed Reference Tag.
540 			 * The passed Reference Tag will be the least significant 4 bytes
541 			 * of the LBA when Type 1 is used, and application specific value
542 			 * if Type 2 is used,
543 			 */
544 			_ref_tag = from_be32(&dif->ref_tag);
545 			if (_ref_tag != ref_tag) {
546 				_dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, ref_tag,
547 					       _ref_tag, offset_blocks);
548 				SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \
549 					    " Expected=%x, Actual=%x\n",
550 					    ref_tag, ref_tag, _ref_tag);
551 				return -1;
552 			}
553 			break;
554 		case SPDK_DIF_TYPE3:
555 			/* For Type 3, computed Reference Tag remains unchanged.
556 			 * Hence ignore the Reference Tag field.
557 			 */
558 			break;
559 		default:
560 			break;
561 		}
562 	}
563 
564 	return 0;
565 }
566 
567 static int
568 dif_verify(struct _dif_sgl *sgl, uint32_t num_blocks,
569 	   const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
570 {
571 	uint32_t offset_blocks = 0;
572 	int rc;
573 	void *buf;
574 	uint16_t guard = 0;
575 
576 	while (offset_blocks < num_blocks) {
577 		_dif_sgl_get_buf(sgl, &buf, NULL);
578 
579 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
580 			guard = spdk_crc16_t10dif(ctx->guard_seed, buf, ctx->guard_interval);
581 		}
582 
583 		rc = _dif_verify(buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
584 		if (rc != 0) {
585 			return rc;
586 		}
587 
588 		_dif_sgl_advance(sgl, ctx->block_size);
589 		offset_blocks++;
590 	}
591 
592 	return 0;
593 }
594 
595 static int
596 _dif_verify_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len,
597 		  uint16_t *_guard, uint32_t offset_blocks,
598 		  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
599 {
600 	uint32_t offset_in_dif, buf_len;
601 	void *buf;
602 	uint16_t guard;
603 	struct spdk_dif dif = {};
604 	int rc;
605 
606 	assert(_guard != NULL);
607 	assert(offset_in_block < ctx->guard_interval);
608 	assert(offset_in_block + data_len < ctx->guard_interval ||
609 	       offset_in_block + data_len == ctx->block_size);
610 
611 	guard = *_guard;
612 
613 	/* Compute CRC over split logical block data. */
614 	while (data_len != 0 && offset_in_block < ctx->guard_interval) {
615 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
616 		buf_len = spdk_min(buf_len, data_len);
617 		buf_len = spdk_min(buf_len, ctx->guard_interval - offset_in_block);
618 
619 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
620 			guard = spdk_crc16_t10dif(guard, buf, buf_len);
621 		}
622 
623 		_dif_sgl_advance(sgl, buf_len);
624 		offset_in_block += buf_len;
625 		data_len -= buf_len;
626 	}
627 
628 	if (offset_in_block < ctx->guard_interval) {
629 		*_guard = guard;
630 		return 0;
631 	}
632 
633 	/* Copy the split DIF field to the temporary DIF buffer, and then
634 	 * skip metadata field after DIF field (if any). */
635 	while (offset_in_block < ctx->block_size) {
636 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
637 
638 		if (offset_in_block < ctx->guard_interval + sizeof(struct spdk_dif)) {
639 			offset_in_dif = offset_in_block - ctx->guard_interval;
640 			buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset_in_dif);
641 
642 			memcpy((uint8_t *)&dif + offset_in_dif, buf, buf_len);
643 		} else {
644 			buf_len = spdk_min(buf_len, ctx->block_size - offset_in_block);
645 		}
646 		_dif_sgl_advance(sgl, buf_len);
647 		offset_in_block += buf_len;
648 	}
649 
650 	rc = _dif_verify(&dif, guard, offset_blocks, ctx, err_blk);
651 	if (rc != 0) {
652 		return rc;
653 	}
654 
655 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
656 		guard = ctx->guard_seed;
657 	}
658 
659 	*_guard = guard;
660 	return 0;
661 }
662 
663 static int
664 dif_verify_split(struct _dif_sgl *sgl, uint32_t num_blocks,
665 		 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
666 {
667 	uint32_t offset_blocks;
668 	uint16_t guard = 0;
669 	int rc;
670 
671 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
672 		guard = ctx->guard_seed;
673 	}
674 
675 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
676 		rc = _dif_verify_split(sgl, 0, ctx->block_size, &guard, offset_blocks,
677 				       ctx, err_blk);
678 		if (rc != 0) {
679 			return rc;
680 		}
681 	}
682 
683 	return 0;
684 }
685 
686 int
687 spdk_dif_verify(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
688 		const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
689 {
690 	struct _dif_sgl sgl;
691 
692 	_dif_sgl_init(&sgl, iovs, iovcnt);
693 
694 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
695 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
696 		return -EINVAL;
697 	}
698 
699 	if (_dif_is_disabled(ctx->dif_type)) {
700 		return 0;
701 	}
702 
703 	if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) {
704 		return dif_verify(&sgl, num_blocks, ctx, err_blk);
705 	} else {
706 		return dif_verify_split(&sgl, num_blocks, ctx, err_blk);
707 	}
708 }
709 
710 static uint32_t
711 dif_update_crc32c(struct _dif_sgl *sgl, uint32_t num_blocks,
712 		  uint32_t crc32c,  const struct spdk_dif_ctx *ctx)
713 {
714 	uint32_t offset_blocks;
715 	void *buf;
716 
717 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
718 		_dif_sgl_get_buf(sgl, &buf, NULL);
719 
720 		crc32c = spdk_crc32c_update(buf, ctx->block_size - ctx->md_size, crc32c);
721 
722 		_dif_sgl_advance(sgl, ctx->block_size);
723 	}
724 
725 	return crc32c;
726 }
727 
728 static uint32_t
729 _dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t offset_in_block, uint32_t data_len,
730 			 uint32_t crc32c, const struct spdk_dif_ctx *ctx)
731 {
732 	uint32_t data_block_size, buf_len;
733 	void *buf;
734 
735 	data_block_size = ctx->block_size - ctx->md_size;
736 
737 	assert(offset_in_block + data_len <= ctx->block_size);
738 
739 	while (data_len != 0) {
740 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
741 		buf_len = spdk_min(buf_len, data_len);
742 
743 		if (offset_in_block < data_block_size) {
744 			buf_len = spdk_min(buf_len, data_block_size - offset_in_block);
745 			crc32c = spdk_crc32c_update(buf, buf_len, crc32c);
746 		}
747 
748 		_dif_sgl_advance(sgl, buf_len);
749 		offset_in_block += buf_len;
750 		data_len -= buf_len;
751 	}
752 
753 	return crc32c;
754 }
755 
756 static uint32_t
757 dif_update_crc32c_split(struct _dif_sgl *sgl, uint32_t num_blocks,
758 			uint32_t crc32c, const struct spdk_dif_ctx *ctx)
759 {
760 	uint32_t offset_blocks;
761 
762 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
763 		crc32c = _dif_update_crc32c_split(sgl, 0, ctx->block_size, crc32c, ctx);
764 	}
765 
766 	return crc32c;
767 }
768 
769 int
770 spdk_dif_update_crc32c(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
771 		       uint32_t *_crc32c, const struct spdk_dif_ctx *ctx)
772 {
773 	struct _dif_sgl sgl;
774 
775 	if (_crc32c == NULL) {
776 		return -EINVAL;
777 	}
778 
779 	_dif_sgl_init(&sgl, iovs, iovcnt);
780 
781 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
782 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
783 		return -EINVAL;
784 	}
785 
786 	if (_dif_sgl_is_bytes_multiple(&sgl, ctx->block_size)) {
787 		*_crc32c = dif_update_crc32c(&sgl, num_blocks, *_crc32c, ctx);
788 	} else {
789 		*_crc32c = dif_update_crc32c_split(&sgl, num_blocks, *_crc32c, ctx);
790 	}
791 
792 	return 0;
793 }
794 
795 static void
796 dif_generate_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
797 		  uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
798 {
799 	uint32_t offset_blocks = 0, data_block_size;
800 	void *src, *dst;
801 	uint16_t guard;
802 
803 	data_block_size = ctx->block_size - ctx->md_size;
804 
805 	while (offset_blocks < num_blocks) {
806 		_dif_sgl_get_buf(src_sgl, &src, NULL);
807 		_dif_sgl_get_buf(dst_sgl, &dst, NULL);
808 
809 		guard = 0;
810 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
811 			guard = spdk_crc16_t10dif_copy(ctx->guard_seed, dst, src, data_block_size);
812 			guard = spdk_crc16_t10dif(guard, dst + data_block_size,
813 						  ctx->guard_interval - data_block_size);
814 		} else {
815 			memcpy(dst, src, data_block_size);
816 		}
817 
818 		_dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx);
819 
820 		_dif_sgl_advance(src_sgl, data_block_size);
821 		_dif_sgl_advance(dst_sgl, ctx->block_size);
822 		offset_blocks++;
823 	}
824 }
825 
826 static void
827 _dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
828 			 uint32_t offset_blocks, const struct spdk_dif_ctx *ctx)
829 {
830 	uint32_t offset_in_block, src_len, data_block_size;
831 	uint16_t guard = 0;
832 	void *src, *dst;
833 
834 	_dif_sgl_get_buf(dst_sgl, &dst, NULL);
835 
836 	data_block_size = ctx->block_size - ctx->md_size;
837 
838 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
839 		guard = ctx->guard_seed;
840 	}
841 	offset_in_block = 0;
842 
843 	while (offset_in_block < data_block_size) {
844 		/* Compute CRC over split logical block data and copy
845 		 * data to bounce buffer.
846 		 */
847 		_dif_sgl_get_buf(src_sgl, &src, &src_len);
848 		src_len = spdk_min(src_len, data_block_size - offset_in_block);
849 
850 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
851 			guard = spdk_crc16_t10dif_copy(guard, dst + offset_in_block,
852 						       src, src_len);
853 		} else {
854 			memcpy(dst + offset_in_block, src, src_len);
855 		}
856 
857 		_dif_sgl_advance(src_sgl, src_len);
858 		offset_in_block += src_len;
859 	}
860 
861 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
862 		guard = spdk_crc16_t10dif(guard, dst + data_block_size,
863 					  ctx->guard_interval - data_block_size);
864 	}
865 
866 	_dif_sgl_advance(dst_sgl, ctx->block_size);
867 
868 	_dif_generate(dst + ctx->guard_interval, guard, offset_blocks, ctx);
869 }
870 
871 static void
872 dif_generate_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
873 			uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
874 {
875 	uint32_t offset_blocks;
876 
877 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
878 		_dif_generate_copy_split(src_sgl, dst_sgl, offset_blocks, ctx);
879 	}
880 }
881 
882 int
883 spdk_dif_generate_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
884 		       uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
885 {
886 	struct _dif_sgl src_sgl, dst_sgl;
887 	uint32_t data_block_size;
888 
889 	_dif_sgl_init(&src_sgl, iovs, iovcnt);
890 	_dif_sgl_init(&dst_sgl, bounce_iov, 1);
891 
892 	data_block_size = ctx->block_size - ctx->md_size;
893 
894 	if (!_dif_sgl_is_valid(&src_sgl, data_block_size * num_blocks) ||
895 	    !_dif_sgl_is_valid(&dst_sgl, ctx->block_size * num_blocks)) {
896 		SPDK_ERRLOG("Size of iovec arrays are not valid.\n");
897 		return -EINVAL;
898 	}
899 
900 	if (_dif_is_disabled(ctx->dif_type)) {
901 		return 0;
902 	}
903 
904 	if (_dif_sgl_is_bytes_multiple(&src_sgl, data_block_size)) {
905 		dif_generate_copy(&src_sgl, &dst_sgl, num_blocks, ctx);
906 	} else {
907 		dif_generate_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx);
908 	}
909 
910 	return 0;
911 }
912 
913 static int
914 dif_verify_copy(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
915 		uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
916 		struct spdk_dif_error *err_blk)
917 {
918 	uint32_t offset_blocks = 0, data_block_size;
919 	void *src, *dst;
920 	int rc;
921 	uint16_t guard;
922 
923 	data_block_size = ctx->block_size - ctx->md_size;
924 
925 	while (offset_blocks < num_blocks) {
926 		_dif_sgl_get_buf(src_sgl, &src, NULL);
927 		_dif_sgl_get_buf(dst_sgl, &dst, NULL);
928 
929 		guard = 0;
930 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
931 			guard = spdk_crc16_t10dif_copy(ctx->guard_seed, dst, src, data_block_size);
932 			guard = spdk_crc16_t10dif(guard, src + data_block_size,
933 						  ctx->guard_interval - data_block_size);
934 		} else {
935 			memcpy(dst, src, data_block_size);
936 		}
937 
938 		rc = _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
939 		if (rc != 0) {
940 			return rc;
941 		}
942 
943 		_dif_sgl_advance(src_sgl, ctx->block_size);
944 		_dif_sgl_advance(dst_sgl, data_block_size);
945 		offset_blocks++;
946 	}
947 
948 	return 0;
949 }
950 
951 static int
952 _dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
953 		       uint32_t offset_blocks, const struct spdk_dif_ctx *ctx,
954 		       struct spdk_dif_error *err_blk)
955 {
956 	uint32_t offset_in_block, dst_len, data_block_size;
957 	uint16_t guard = 0;
958 	void *src, *dst;
959 
960 	_dif_sgl_get_buf(src_sgl, &src, NULL);
961 
962 	data_block_size = ctx->block_size - ctx->md_size;
963 
964 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
965 		guard = ctx->guard_seed;
966 	}
967 	offset_in_block = 0;
968 
969 	while (offset_in_block < data_block_size) {
970 		/* Compute CRC over split logical block data and copy
971 		 * data to bounce buffer.
972 		 */
973 		_dif_sgl_get_buf(dst_sgl, &dst, &dst_len);
974 		dst_len = spdk_min(dst_len, data_block_size - offset_in_block);
975 
976 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
977 			guard = spdk_crc16_t10dif_copy(guard, dst,
978 						       src + offset_in_block, dst_len);
979 		} else {
980 			memcpy(dst, src + offset_in_block, dst_len);
981 		}
982 
983 		_dif_sgl_advance(dst_sgl, dst_len);
984 		offset_in_block += dst_len;
985 	}
986 
987 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
988 		guard = spdk_crc16_t10dif(guard, src + data_block_size,
989 					  ctx->guard_interval - data_block_size);
990 	}
991 
992 	_dif_sgl_advance(src_sgl, ctx->block_size);
993 
994 	return _dif_verify(src + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
995 }
996 
997 static int
998 dif_verify_copy_split(struct _dif_sgl *src_sgl, struct _dif_sgl *dst_sgl,
999 		      uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1000 		      struct spdk_dif_error *err_blk)
1001 {
1002 	uint32_t offset_blocks;
1003 	int rc;
1004 
1005 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1006 		rc = _dif_verify_copy_split(src_sgl, dst_sgl, offset_blocks, ctx, err_blk);
1007 		if (rc != 0) {
1008 			return rc;
1009 		}
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 int
1016 spdk_dif_verify_copy(struct iovec *iovs, int iovcnt, struct iovec *bounce_iov,
1017 		     uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1018 		     struct spdk_dif_error *err_blk)
1019 {
1020 	struct _dif_sgl src_sgl, dst_sgl;
1021 	uint32_t data_block_size;
1022 
1023 	_dif_sgl_init(&src_sgl, bounce_iov, 1);
1024 	_dif_sgl_init(&dst_sgl, iovs, iovcnt);
1025 
1026 	data_block_size = ctx->block_size - ctx->md_size;
1027 
1028 	if (!_dif_sgl_is_valid(&dst_sgl, data_block_size * num_blocks) ||
1029 	    !_dif_sgl_is_valid(&src_sgl, ctx->block_size * num_blocks)) {
1030 		SPDK_ERRLOG("Size of iovec arrays are not valid\n");
1031 		return -EINVAL;
1032 	}
1033 
1034 	if (_dif_is_disabled(ctx->dif_type)) {
1035 		return 0;
1036 	}
1037 
1038 	if (_dif_sgl_is_bytes_multiple(&dst_sgl, data_block_size)) {
1039 		return dif_verify_copy(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk);
1040 	} else {
1041 		return dif_verify_copy_split(&src_sgl, &dst_sgl, num_blocks, ctx, err_blk);
1042 	}
1043 }
1044 
1045 static void
1046 _bit_flip(uint8_t *buf, uint32_t flip_bit)
1047 {
1048 	uint8_t byte;
1049 
1050 	byte = *buf;
1051 	byte ^= 1 << flip_bit;
1052 	*buf = byte;
1053 }
1054 
1055 static int
1056 _dif_inject_error(struct _dif_sgl *sgl,
1057 		  uint32_t block_size, uint32_t num_blocks,
1058 		  uint32_t inject_offset_blocks,
1059 		  uint32_t inject_offset_bytes,
1060 		  uint32_t inject_offset_bits)
1061 {
1062 	uint32_t offset_in_block, buf_len;
1063 	void *buf;
1064 
1065 	_dif_sgl_advance(sgl, block_size * inject_offset_blocks);
1066 
1067 	offset_in_block = 0;
1068 
1069 	while (offset_in_block < block_size) {
1070 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
1071 		buf_len = spdk_min(buf_len, block_size - offset_in_block);
1072 
1073 		if (inject_offset_bytes >= offset_in_block &&
1074 		    inject_offset_bytes < offset_in_block + buf_len) {
1075 			buf += inject_offset_bytes - offset_in_block;
1076 			_bit_flip(buf, inject_offset_bits);
1077 			return 0;
1078 		}
1079 
1080 		_dif_sgl_advance(sgl, buf_len);
1081 		offset_in_block += buf_len;
1082 	}
1083 
1084 	return -1;
1085 }
1086 
1087 static int
1088 dif_inject_error(struct _dif_sgl *sgl, uint32_t block_size, uint32_t num_blocks,
1089 		 uint32_t start_inject_bytes, uint32_t inject_range_bytes,
1090 		 uint32_t *inject_offset)
1091 {
1092 	uint32_t inject_offset_blocks, inject_offset_bytes, inject_offset_bits;
1093 	uint32_t offset_blocks;
1094 	int rc;
1095 
1096 	srand(time(0));
1097 
1098 	inject_offset_blocks = rand() % num_blocks;
1099 	inject_offset_bytes = start_inject_bytes + (rand() % inject_range_bytes);
1100 	inject_offset_bits = rand() % 8;
1101 
1102 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1103 		if (offset_blocks == inject_offset_blocks) {
1104 			rc = _dif_inject_error(sgl, block_size, num_blocks,
1105 					       inject_offset_blocks,
1106 					       inject_offset_bytes,
1107 					       inject_offset_bits);
1108 			if (rc == 0) {
1109 				*inject_offset = inject_offset_blocks;
1110 			}
1111 			return rc;
1112 		}
1113 	}
1114 
1115 	return -1;
1116 }
1117 
1118 int
1119 spdk_dif_inject_error(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
1120 		      const struct spdk_dif_ctx *ctx, uint32_t inject_flags,
1121 		      uint32_t *inject_offset)
1122 {
1123 	struct _dif_sgl sgl;
1124 	int rc;
1125 
1126 	_dif_sgl_init(&sgl, iovs, iovcnt);
1127 
1128 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
1129 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1130 		return -EINVAL;
1131 	}
1132 
1133 	if (inject_flags & SPDK_DIF_REFTAG_ERROR) {
1134 		rc = dif_inject_error(&sgl, ctx->block_size, num_blocks,
1135 				      ctx->guard_interval + offsetof(struct spdk_dif, ref_tag),
1136 				      SPDK_SIZEOF_MEMBER(struct spdk_dif, ref_tag),
1137 				      inject_offset);
1138 		if (rc != 0) {
1139 			SPDK_ERRLOG("Failed to inject error to Reference Tag.\n");
1140 			return rc;
1141 		}
1142 	}
1143 
1144 	if (inject_flags & SPDK_DIF_APPTAG_ERROR) {
1145 		rc = dif_inject_error(&sgl, ctx->block_size, num_blocks,
1146 				      ctx->guard_interval + offsetof(struct spdk_dif, app_tag),
1147 				      SPDK_SIZEOF_MEMBER(struct spdk_dif, app_tag),
1148 				      inject_offset);
1149 		if (rc != 0) {
1150 			SPDK_ERRLOG("Failed to inject error to Application Tag.\n");
1151 			return rc;
1152 		}
1153 	}
1154 	if (inject_flags & SPDK_DIF_GUARD_ERROR) {
1155 		rc = dif_inject_error(&sgl, ctx->block_size, num_blocks,
1156 				      ctx->guard_interval,
1157 				      SPDK_SIZEOF_MEMBER(struct spdk_dif, guard),
1158 				      inject_offset);
1159 		if (rc != 0) {
1160 			SPDK_ERRLOG("Failed to inject error to Guard.\n");
1161 			return rc;
1162 		}
1163 	}
1164 
1165 	if (inject_flags & SPDK_DIF_DATA_ERROR) {
1166 		/* If the DIF information is contained within the last 8 bytes of
1167 		 * metadata, then the CRC covers all metadata bytes up to but excluding
1168 		 * the last 8 bytes. But error injection does not cover these metadata
1169 		 * because classification is not determined yet.
1170 		 *
1171 		 * Note: Error injection to data block is expected to be detected as
1172 		 * guard error.
1173 		 */
1174 		rc = dif_inject_error(&sgl, ctx->block_size, num_blocks,
1175 				      0,
1176 				      ctx->block_size - ctx->md_size,
1177 				      inject_offset);
1178 		if (rc != 0) {
1179 			SPDK_ERRLOG("Failed to inject error to data block.\n");
1180 			return rc;
1181 		}
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 static void
1188 dix_generate(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1189 	     uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
1190 {
1191 	uint32_t offset_blocks = 0;
1192 	uint16_t guard;
1193 	void *data_buf, *md_buf;
1194 
1195 	while (offset_blocks < num_blocks) {
1196 		_dif_sgl_get_buf(data_sgl, &data_buf, NULL);
1197 		_dif_sgl_get_buf(md_sgl, &md_buf, NULL);
1198 
1199 		guard = 0;
1200 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1201 			guard = spdk_crc16_t10dif(ctx->guard_seed, data_buf, ctx->block_size);
1202 			guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval);
1203 		}
1204 
1205 		_dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx);
1206 
1207 		_dif_sgl_advance(data_sgl, ctx->block_size);
1208 		_dif_sgl_advance(md_sgl, ctx->md_size);
1209 		offset_blocks++;
1210 	}
1211 }
1212 
1213 static void
1214 _dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1215 		    uint32_t offset_blocks, const struct spdk_dif_ctx *ctx)
1216 {
1217 	uint32_t offset_in_block, data_buf_len;
1218 	uint16_t guard = 0;
1219 	void *data_buf, *md_buf;
1220 
1221 	_dif_sgl_get_buf(md_sgl, &md_buf, NULL);
1222 
1223 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1224 		guard = ctx->guard_seed;
1225 	}
1226 	offset_in_block = 0;
1227 
1228 	while (offset_in_block < ctx->block_size) {
1229 		_dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len);
1230 		data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block);
1231 
1232 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1233 			guard = spdk_crc16_t10dif(guard, data_buf, data_buf_len);
1234 		}
1235 
1236 		_dif_sgl_advance(data_sgl, data_buf_len);
1237 		offset_in_block += data_buf_len;
1238 	}
1239 
1240 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1241 		guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval);
1242 	}
1243 
1244 	_dif_sgl_advance(md_sgl, ctx->md_size);
1245 
1246 	_dif_generate(md_buf + ctx->guard_interval, guard, offset_blocks, ctx);
1247 }
1248 
1249 static void
1250 dix_generate_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1251 		   uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
1252 {
1253 	uint32_t offset_blocks;
1254 
1255 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1256 		_dix_generate_split(data_sgl, md_sgl, offset_blocks, ctx);
1257 	}
1258 }
1259 
1260 int
1261 spdk_dix_generate(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
1262 		  uint32_t num_blocks, const struct spdk_dif_ctx *ctx)
1263 {
1264 	struct _dif_sgl data_sgl, md_sgl;
1265 
1266 	_dif_sgl_init(&data_sgl, iovs, iovcnt);
1267 	_dif_sgl_init(&md_sgl, md_iov, 1);
1268 
1269 	if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) ||
1270 	    !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) {
1271 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1272 		return -EINVAL;
1273 	}
1274 
1275 	if (_dif_is_disabled(ctx->dif_type)) {
1276 		return 0;
1277 	}
1278 
1279 	if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) {
1280 		dix_generate(&data_sgl, &md_sgl, num_blocks, ctx);
1281 	} else {
1282 		dix_generate_split(&data_sgl, &md_sgl, num_blocks, ctx);
1283 	}
1284 
1285 	return 0;
1286 }
1287 
1288 static int
1289 dix_verify(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1290 	   uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1291 	   struct spdk_dif_error *err_blk)
1292 {
1293 	uint32_t offset_blocks = 0;
1294 	uint16_t guard;
1295 	void *data_buf, *md_buf;
1296 	int rc;
1297 
1298 	while (offset_blocks < num_blocks) {
1299 		_dif_sgl_get_buf(data_sgl, &data_buf, NULL);
1300 		_dif_sgl_get_buf(md_sgl, &md_buf, NULL);
1301 
1302 		guard = 0;
1303 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1304 			guard = spdk_crc16_t10dif(ctx->guard_seed, data_buf, ctx->block_size);
1305 			guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval);
1306 		}
1307 
1308 		rc = _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
1309 		if (rc != 0) {
1310 			return rc;
1311 		}
1312 
1313 		_dif_sgl_advance(data_sgl, ctx->block_size);
1314 		_dif_sgl_advance(md_sgl, ctx->md_size);
1315 		offset_blocks++;
1316 	}
1317 
1318 	return 0;
1319 }
1320 
1321 static int
1322 _dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1323 		  uint32_t offset_blocks, const struct spdk_dif_ctx *ctx,
1324 		  struct spdk_dif_error *err_blk)
1325 {
1326 	uint32_t offset_in_block, data_buf_len;
1327 	uint16_t guard = 0;
1328 	void *data_buf, *md_buf;
1329 
1330 	_dif_sgl_get_buf(md_sgl, &md_buf, NULL);
1331 
1332 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1333 		guard = ctx->guard_seed;
1334 	}
1335 	offset_in_block = 0;
1336 
1337 	while (offset_in_block < ctx->block_size) {
1338 		_dif_sgl_get_buf(data_sgl, &data_buf, &data_buf_len);
1339 		data_buf_len = spdk_min(data_buf_len, ctx->block_size - offset_in_block);
1340 
1341 		if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1342 			guard = spdk_crc16_t10dif(guard, data_buf, data_buf_len);
1343 		}
1344 
1345 		_dif_sgl_advance(data_sgl, data_buf_len);
1346 		offset_in_block += data_buf_len;
1347 	}
1348 
1349 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1350 		guard = spdk_crc16_t10dif(guard, md_buf, ctx->guard_interval);
1351 	}
1352 
1353 	_dif_sgl_advance(md_sgl, ctx->md_size);
1354 
1355 	return _dif_verify(md_buf + ctx->guard_interval, guard, offset_blocks, ctx, err_blk);
1356 }
1357 
1358 static int
1359 dix_verify_split(struct _dif_sgl *data_sgl, struct _dif_sgl *md_sgl,
1360 		 uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1361 		 struct spdk_dif_error *err_blk)
1362 {
1363 	uint32_t offset_blocks;
1364 	int rc;
1365 
1366 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1367 		rc = _dix_verify_split(data_sgl, md_sgl, offset_blocks, ctx, err_blk);
1368 		if (rc != 0) {
1369 			return rc;
1370 		}
1371 	}
1372 
1373 	return 0;
1374 }
1375 
1376 int
1377 spdk_dix_verify(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
1378 		uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1379 		struct spdk_dif_error *err_blk)
1380 {
1381 	struct _dif_sgl data_sgl, md_sgl;
1382 
1383 	if (md_iov->iov_base == NULL) {
1384 		SPDK_ERRLOG("Metadata buffer is NULL.\n");
1385 		return -EINVAL;
1386 	}
1387 
1388 	_dif_sgl_init(&data_sgl, iovs, iovcnt);
1389 	_dif_sgl_init(&md_sgl, md_iov, 1);
1390 
1391 	if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) ||
1392 	    !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) {
1393 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1394 		return -EINVAL;
1395 	}
1396 
1397 	if (_dif_is_disabled(ctx->dif_type)) {
1398 		return 0;
1399 	}
1400 
1401 	if (_dif_sgl_is_bytes_multiple(&data_sgl, ctx->block_size)) {
1402 		return dix_verify(&data_sgl, &md_sgl, num_blocks, ctx, err_blk);
1403 	} else {
1404 		return dix_verify_split(&data_sgl, &md_sgl, num_blocks, ctx, err_blk);
1405 	}
1406 }
1407 
1408 int
1409 spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
1410 		      uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1411 		      uint32_t inject_flags, uint32_t *inject_offset)
1412 {
1413 	struct _dif_sgl data_sgl, md_sgl;
1414 	int rc;
1415 
1416 	_dif_sgl_init(&data_sgl, iovs, iovcnt);
1417 	_dif_sgl_init(&md_sgl, md_iov, 1);
1418 
1419 	if (!_dif_sgl_is_valid(&data_sgl, ctx->block_size * num_blocks) ||
1420 	    !_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) {
1421 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1422 		return -EINVAL;
1423 	}
1424 
1425 	if (inject_flags & SPDK_DIF_REFTAG_ERROR) {
1426 		rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks,
1427 				      ctx->guard_interval + offsetof(struct spdk_dif, ref_tag),
1428 				      SPDK_SIZEOF_MEMBER(struct spdk_dif, ref_tag),
1429 				      inject_offset);
1430 		if (rc != 0) {
1431 			SPDK_ERRLOG("Failed to inject error to Reference Tag.\n");
1432 			return rc;
1433 		}
1434 	}
1435 
1436 	if (inject_flags & SPDK_DIF_APPTAG_ERROR) {
1437 		rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks,
1438 				      ctx->guard_interval + offsetof(struct spdk_dif, app_tag),
1439 				      SPDK_SIZEOF_MEMBER(struct spdk_dif, app_tag),
1440 				      inject_offset);
1441 		if (rc != 0) {
1442 			SPDK_ERRLOG("Failed to inject error to Application Tag.\n");
1443 			return rc;
1444 		}
1445 	}
1446 
1447 	if (inject_flags & SPDK_DIF_GUARD_ERROR) {
1448 		rc = dif_inject_error(&md_sgl, ctx->md_size, num_blocks,
1449 				      ctx->guard_interval,
1450 				      SPDK_SIZEOF_MEMBER(struct spdk_dif, guard),
1451 				      inject_offset);
1452 		if (rc != 0) {
1453 			SPDK_ERRLOG("Failed to inject error to Guard.\n");
1454 			return rc;
1455 		}
1456 	}
1457 
1458 	if (inject_flags & SPDK_DIF_DATA_ERROR) {
1459 		/* Note: Error injection to data block is expected to be detected
1460 		 * as guard error.
1461 		 */
1462 		rc = dif_inject_error(&data_sgl, ctx->block_size, num_blocks,
1463 				      0,
1464 				      ctx->block_size,
1465 				      inject_offset);
1466 		if (rc != 0) {
1467 			SPDK_ERRLOG("Failed to inject error to Guard.\n");
1468 			return rc;
1469 		}
1470 	}
1471 
1472 	return 0;
1473 }
1474 
1475 static uint32_t
1476 _to_next_boundary(uint32_t offset, uint32_t boundary)
1477 {
1478 	return boundary - (offset % boundary);
1479 }
1480 
1481 static uint32_t
1482 _to_size_with_md(uint32_t size, uint32_t data_block_size, uint32_t block_size)
1483 {
1484 	return (size / data_block_size) * block_size + (size % data_block_size);
1485 }
1486 
1487 int
1488 spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int iovcnt,
1489 				struct iovec *buf_iovs, int buf_iovcnt,
1490 				uint32_t data_offset, uint32_t data_len,
1491 				uint32_t *_mapped_len,
1492 				const struct spdk_dif_ctx *ctx)
1493 {
1494 	uint32_t data_block_size, data_unalign, buf_len, buf_offset, len;
1495 	struct _dif_sgl dif_sgl;
1496 	struct _dif_sgl buf_sgl;
1497 
1498 	if (iovs == NULL || iovcnt == 0 || buf_iovs == NULL || buf_iovcnt == 0) {
1499 		return -EINVAL;
1500 	}
1501 
1502 	data_block_size = ctx->block_size - ctx->md_size;
1503 
1504 	data_unalign = ctx->data_offset % data_block_size;
1505 
1506 	buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size,
1507 				   ctx->block_size);
1508 	buf_len -= data_unalign;
1509 
1510 	_dif_sgl_init(&dif_sgl, iovs, iovcnt);
1511 	_dif_sgl_init(&buf_sgl, buf_iovs, buf_iovcnt);
1512 
1513 	if (!_dif_sgl_is_valid(&buf_sgl, buf_len)) {
1514 		SPDK_ERRLOG("Buffer overflow will occur.\n");
1515 		return -ERANGE;
1516 	}
1517 
1518 	buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size);
1519 	buf_offset -= data_unalign;
1520 
1521 	_dif_sgl_advance(&buf_sgl, buf_offset);
1522 
1523 	while (data_len != 0) {
1524 		len = spdk_min(data_len, _to_next_boundary(ctx->data_offset + data_offset, data_block_size));
1525 		if (!_dif_sgl_append_split(&dif_sgl, &buf_sgl, len)) {
1526 			break;
1527 		}
1528 		_dif_sgl_advance(&buf_sgl, ctx->md_size);
1529 		data_offset += len;
1530 		data_len -= len;
1531 	}
1532 
1533 	if (_mapped_len != NULL) {
1534 		*_mapped_len = dif_sgl.total_size;
1535 	}
1536 
1537 	return iovcnt - dif_sgl.iovcnt;
1538 }
1539 
1540 static int
1541 _dif_sgl_setup_stream(struct _dif_sgl *sgl, uint32_t *_buf_offset, uint32_t *_buf_len,
1542 		      uint32_t data_offset, uint32_t data_len,
1543 		      const struct spdk_dif_ctx *ctx)
1544 {
1545 	uint32_t data_block_size, data_unalign, buf_len, buf_offset;
1546 
1547 	data_block_size = ctx->block_size - ctx->md_size;
1548 
1549 	data_unalign = ctx->data_offset % data_block_size;
1550 
1551 	/* If the last data block is complete, DIF of the data block is
1552 	 * inserted or verified in this turn.
1553 	 */
1554 	buf_len = _to_size_with_md(data_unalign + data_offset + data_len, data_block_size,
1555 				   ctx->block_size);
1556 	buf_len -= data_unalign;
1557 
1558 	if (!_dif_sgl_is_valid(sgl, buf_len)) {
1559 		return -ERANGE;
1560 	}
1561 
1562 	buf_offset = _to_size_with_md(data_unalign + data_offset, data_block_size, ctx->block_size);
1563 	buf_offset -= data_unalign;
1564 
1565 	_dif_sgl_advance(sgl, buf_offset);
1566 	buf_len -= buf_offset;
1567 
1568 	buf_offset += data_unalign;
1569 
1570 	*_buf_offset = buf_offset;
1571 	*_buf_len = buf_len;
1572 
1573 	return 0;
1574 }
1575 
1576 int
1577 spdk_dif_generate_stream(struct iovec *iovs, int iovcnt,
1578 			 uint32_t data_offset, uint32_t data_len,
1579 			 struct spdk_dif_ctx *ctx)
1580 {
1581 	uint32_t buf_len = 0, buf_offset = 0;
1582 	uint32_t len, offset_in_block, offset_blocks;
1583 	uint16_t guard = 0;
1584 	struct _dif_sgl sgl;
1585 	int rc;
1586 
1587 	if (iovs == NULL || iovcnt == 0) {
1588 		return -EINVAL;
1589 	}
1590 
1591 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1592 		guard = ctx->last_guard;
1593 	}
1594 
1595 	_dif_sgl_init(&sgl, iovs, iovcnt);
1596 
1597 	rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx);
1598 	if (rc != 0) {
1599 		return rc;
1600 	}
1601 
1602 	while (buf_len != 0) {
1603 		len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size));
1604 		offset_in_block = buf_offset % ctx->block_size;
1605 		offset_blocks = buf_offset / ctx->block_size;
1606 
1607 		guard = _dif_generate_split(&sgl, offset_in_block, len, guard, offset_blocks, ctx);
1608 
1609 		buf_len -= len;
1610 		buf_offset += len;
1611 	}
1612 
1613 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1614 		ctx->last_guard = guard;
1615 	}
1616 
1617 	return 0;
1618 }
1619 
1620 int
1621 spdk_dif_verify_stream(struct iovec *iovs, int iovcnt,
1622 		       uint32_t data_offset, uint32_t data_len,
1623 		       struct spdk_dif_ctx *ctx,
1624 		       struct spdk_dif_error *err_blk)
1625 {
1626 	uint32_t buf_len = 0, buf_offset = 0;
1627 	uint32_t len, offset_in_block, offset_blocks;
1628 	uint16_t guard = 0;
1629 	struct _dif_sgl sgl;
1630 	int rc = 0;
1631 
1632 	if (iovs == NULL || iovcnt == 0) {
1633 		return -EINVAL;
1634 	}
1635 
1636 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1637 		guard = ctx->last_guard;
1638 	}
1639 
1640 	_dif_sgl_init(&sgl, iovs, iovcnt);
1641 
1642 	rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx);
1643 	if (rc != 0) {
1644 		return rc;
1645 	}
1646 
1647 	while (buf_len != 0) {
1648 		len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size));
1649 		offset_in_block = buf_offset % ctx->block_size;
1650 		offset_blocks = buf_offset / ctx->block_size;
1651 
1652 		rc = _dif_verify_split(&sgl, offset_in_block, len, &guard, offset_blocks,
1653 				       ctx, err_blk);
1654 		if (rc != 0) {
1655 			goto error;
1656 		}
1657 
1658 		buf_len -= len;
1659 		buf_offset += len;
1660 	}
1661 
1662 	if (ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK) {
1663 		ctx->last_guard = guard;
1664 	}
1665 error:
1666 	return rc;
1667 }
1668 
1669 int
1670 spdk_dif_update_crc32c_stream(struct iovec *iovs, int iovcnt,
1671 			      uint32_t data_offset, uint32_t data_len,
1672 			      uint32_t *_crc32c, const struct spdk_dif_ctx *ctx)
1673 {
1674 	uint32_t buf_len = 0, buf_offset = 0, len, offset_in_block;
1675 	uint32_t crc32c;
1676 	struct _dif_sgl sgl;
1677 	int rc;
1678 
1679 	if (iovs == NULL || iovcnt == 0) {
1680 		return -EINVAL;
1681 	}
1682 
1683 	crc32c = *_crc32c;
1684 	_dif_sgl_init(&sgl, iovs, iovcnt);
1685 
1686 	rc = _dif_sgl_setup_stream(&sgl, &buf_offset, &buf_len, data_offset, data_len, ctx);
1687 	if (rc != 0) {
1688 		return rc;
1689 	}
1690 
1691 	while (buf_len != 0) {
1692 		len = spdk_min(buf_len, _to_next_boundary(buf_offset, ctx->block_size));
1693 		offset_in_block = buf_offset % ctx->block_size;
1694 
1695 		crc32c = _dif_update_crc32c_split(&sgl, offset_in_block, len, crc32c, ctx);
1696 
1697 		buf_len -= len;
1698 		buf_offset += len;
1699 	}
1700 
1701 	*_crc32c = crc32c;
1702 
1703 	return 0;
1704 }
1705 
1706 void
1707 spdk_dif_get_range_with_md(uint32_t data_offset, uint32_t data_len,
1708 			   uint32_t *_buf_offset, uint32_t *_buf_len,
1709 			   const struct spdk_dif_ctx *ctx)
1710 {
1711 	uint32_t data_block_size, data_unalign, buf_offset, buf_len;
1712 
1713 	if (!ctx->md_interleave) {
1714 		buf_offset = data_offset;
1715 		buf_len = data_len;
1716 	} else {
1717 		data_block_size = ctx->block_size - ctx->md_size;
1718 
1719 		data_unalign = data_offset % data_block_size;
1720 
1721 		buf_offset = _to_size_with_md(data_offset, data_block_size, ctx->block_size);
1722 		buf_len = _to_size_with_md(data_unalign + data_len, data_block_size, ctx->block_size) -
1723 			  data_unalign;
1724 	}
1725 
1726 	if (_buf_offset != NULL) {
1727 		*_buf_offset = buf_offset;
1728 	}
1729 
1730 	if (_buf_len != NULL) {
1731 		*_buf_len = buf_len;
1732 	}
1733 }
1734 
1735 uint32_t
1736 spdk_dif_get_length_with_md(uint32_t data_len, const struct spdk_dif_ctx *ctx)
1737 {
1738 	uint32_t data_block_size;
1739 
1740 	if (!ctx->md_interleave) {
1741 		return data_len;
1742 	} else {
1743 		data_block_size = ctx->block_size - ctx->md_size;
1744 
1745 		return _to_size_with_md(data_len, data_block_size, ctx->block_size);
1746 	}
1747 }
1748 
1749 static int
1750 _dif_remap_ref_tag(struct _dif_sgl *sgl, uint32_t offset_blocks,
1751 		   const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
1752 {
1753 	uint32_t offset, buf_len, expected = 0, _actual, remapped;
1754 	void *buf;
1755 	struct _dif_sgl tmp_sgl;
1756 	struct spdk_dif dif;
1757 
1758 	/* Fast forward to DIF field. */
1759 	_dif_sgl_advance(sgl, ctx->guard_interval);
1760 	_dif_sgl_copy(&tmp_sgl, sgl);
1761 
1762 	/* Copy the split DIF field to the temporary DIF buffer */
1763 	offset = 0;
1764 	while (offset < sizeof(struct spdk_dif)) {
1765 		_dif_sgl_get_buf(sgl, &buf, &buf_len);
1766 		buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset);
1767 
1768 		memcpy((uint8_t *)&dif + offset, buf, buf_len);
1769 
1770 		_dif_sgl_advance(sgl, buf_len);
1771 		offset += buf_len;
1772 	}
1773 
1774 	switch (ctx->dif_type) {
1775 	case SPDK_DIF_TYPE1:
1776 	case SPDK_DIF_TYPE2:
1777 		/* If Type 1 or 2 is used, then all DIF checks are disabled when
1778 		 * the Application Tag is 0xFFFF.
1779 		 */
1780 		if (dif.app_tag == 0xFFFF) {
1781 			goto end;
1782 		}
1783 		break;
1784 	case SPDK_DIF_TYPE3:
1785 		/* If Type 3 is used, then all DIF checks are disabled when the
1786 		 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF.
1787 		 */
1788 		if (dif.app_tag == 0xFFFF && dif.ref_tag == 0xFFFFFFFF) {
1789 			goto end;
1790 		}
1791 		break;
1792 	default:
1793 		break;
1794 	}
1795 
1796 	/* For type 1 and 2, the Reference Tag is incremented for each
1797 	 * subsequent logical block. For type 3, the Reference Tag
1798 	 * remains the same as the initial Reference Tag.
1799 	 */
1800 	if (ctx->dif_type != SPDK_DIF_TYPE3) {
1801 		expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks;
1802 		remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks;
1803 	} else {
1804 		remapped = ctx->remapped_init_ref_tag;
1805 	}
1806 
1807 	/* Verify the stored Reference Tag. */
1808 	switch (ctx->dif_type) {
1809 	case SPDK_DIF_TYPE1:
1810 	case SPDK_DIF_TYPE2:
1811 		/* Compare the DIF Reference Tag field to the computed Reference Tag.
1812 		 * The computed Reference Tag will be the least significant 4 bytes
1813 		 * of the LBA when Type 1 is used, and application specific value
1814 		 * if Type 2 is used.
1815 		 */
1816 		_actual = from_be32(&dif.ref_tag);
1817 		if (_actual != expected) {
1818 			_dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected,
1819 				       _actual, offset_blocks);
1820 			SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \
1821 				    " Expected=%x, Actual=%x\n",
1822 				    expected, expected, _actual);
1823 			return -1;
1824 		}
1825 		break;
1826 	case SPDK_DIF_TYPE3:
1827 		/* For type 3, the computed Reference Tag remains unchanged.
1828 		 * Hence ignore the Reference Tag field.
1829 		 */
1830 		break;
1831 	default:
1832 		break;
1833 	}
1834 
1835 	/* Update the stored Reference Tag to the remapped one. */
1836 	to_be32(&dif.ref_tag, remapped);
1837 
1838 	offset = 0;
1839 	while (offset < sizeof(struct spdk_dif)) {
1840 		_dif_sgl_get_buf(&tmp_sgl, &buf, &buf_len);
1841 		buf_len = spdk_min(buf_len, sizeof(struct spdk_dif) - offset);
1842 
1843 		memcpy(buf, (uint8_t *)&dif + offset, buf_len);
1844 
1845 		_dif_sgl_advance(&tmp_sgl, buf_len);
1846 		offset += buf_len;
1847 	}
1848 
1849 end:
1850 	_dif_sgl_advance(sgl, ctx->block_size - ctx->guard_interval - sizeof(struct spdk_dif));
1851 
1852 	return 0;
1853 }
1854 
1855 int
1856 spdk_dif_remap_ref_tag(struct iovec *iovs, int iovcnt, uint32_t num_blocks,
1857 		       const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
1858 {
1859 	struct _dif_sgl sgl;
1860 	uint32_t offset_blocks;
1861 	int rc;
1862 
1863 	_dif_sgl_init(&sgl, iovs, iovcnt);
1864 
1865 	if (!_dif_sgl_is_valid(&sgl, ctx->block_size * num_blocks)) {
1866 		SPDK_ERRLOG("Size of iovec array is not valid.\n");
1867 		return -EINVAL;
1868 	}
1869 
1870 	if (_dif_is_disabled(ctx->dif_type)) {
1871 		return 0;
1872 	}
1873 
1874 	if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) {
1875 		return 0;
1876 	}
1877 
1878 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1879 		rc = _dif_remap_ref_tag(&sgl, offset_blocks, ctx, err_blk);
1880 		if (rc != 0) {
1881 			return rc;
1882 		}
1883 	}
1884 
1885 	return 0;
1886 }
1887 
1888 static int
1889 _dix_remap_ref_tag(struct _dif_sgl *md_sgl, uint32_t offset_blocks,
1890 		   const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err_blk)
1891 {
1892 	uint32_t expected = 0, _actual, remapped;
1893 	uint8_t *md_buf;
1894 	struct spdk_dif *dif;
1895 
1896 	_dif_sgl_get_buf(md_sgl, (void *)&md_buf, NULL);
1897 
1898 	dif = (struct spdk_dif *)(md_buf + ctx->guard_interval);
1899 
1900 	switch (ctx->dif_type) {
1901 	case SPDK_DIF_TYPE1:
1902 	case SPDK_DIF_TYPE2:
1903 		/* If Type 1 or 2 is used, then all DIF checks are disabled when
1904 		 * the Application Tag is 0xFFFF.
1905 		 */
1906 		if (dif->app_tag == 0xFFFF) {
1907 			goto end;
1908 		}
1909 		break;
1910 	case SPDK_DIF_TYPE3:
1911 		/* If Type 3 is used, then all DIF checks are disabled when the
1912 		 * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF.
1913 		 */
1914 		if (dif->app_tag == 0xFFFF && dif->ref_tag == 0xFFFFFFFF) {
1915 			goto end;
1916 		}
1917 		break;
1918 	default:
1919 		break;
1920 	}
1921 
1922 	/* For type 1 and 2, the Reference Tag is incremented for each
1923 	 * subsequent logical block. For type 3, the Reference Tag
1924 	 * remains the same as the initialReference Tag.
1925 	 */
1926 	if (ctx->dif_type != SPDK_DIF_TYPE3) {
1927 		expected = ctx->init_ref_tag + ctx->ref_tag_offset + offset_blocks;
1928 		remapped = ctx->remapped_init_ref_tag + ctx->ref_tag_offset + offset_blocks;
1929 	} else {
1930 		remapped = ctx->remapped_init_ref_tag;
1931 	}
1932 
1933 	/* Verify the stored Reference Tag. */
1934 	switch (ctx->dif_type) {
1935 	case SPDK_DIF_TYPE1:
1936 	case SPDK_DIF_TYPE2:
1937 		/* Compare the DIF Reference Tag field to the computed Reference Tag.
1938 		 * The computed Reference Tag will be the least significant 4 bytes
1939 		 * of the LBA when Type 1 is used, and application specific value
1940 		 * if Type 2 is used.
1941 		 */
1942 		_actual = from_be32(&dif->ref_tag);
1943 		if (_actual != expected) {
1944 			_dif_error_set(err_blk, SPDK_DIF_REFTAG_ERROR, expected,
1945 				       _actual, offset_blocks);
1946 			SPDK_ERRLOG("Failed to compare Ref Tag: LBA=%" PRIu32 "," \
1947 				    " Expected=%x, Actual=%x\n",
1948 				    expected, expected, _actual);
1949 			return -1;
1950 		}
1951 		break;
1952 	case SPDK_DIF_TYPE3:
1953 		/* For type 3, the computed Reference Tag remains unchanged.
1954 		 * Hence ignore the Reference Tag field.
1955 		 */
1956 		break;
1957 	default:
1958 		break;
1959 	}
1960 
1961 	/* Update the stored Reference Tag to the remapped one. */
1962 	to_be32(&dif->ref_tag, remapped);
1963 
1964 end:
1965 	_dif_sgl_advance(md_sgl, ctx->md_size);
1966 
1967 	return 0;
1968 }
1969 
1970 int
1971 spdk_dix_remap_ref_tag(struct iovec *md_iov, uint32_t num_blocks,
1972 		       const struct spdk_dif_ctx *ctx,
1973 		       struct spdk_dif_error *err_blk)
1974 {
1975 	struct _dif_sgl md_sgl;
1976 	uint32_t offset_blocks;
1977 	int rc;
1978 
1979 	_dif_sgl_init(&md_sgl, md_iov, 1);
1980 
1981 	if (!_dif_sgl_is_valid(&md_sgl, ctx->md_size * num_blocks)) {
1982 		SPDK_ERRLOG("Size of metadata iovec array is not valid.\n");
1983 		return -EINVAL;
1984 	}
1985 
1986 	if (_dif_is_disabled(ctx->dif_type)) {
1987 		return 0;
1988 	}
1989 
1990 	if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) {
1991 		return 0;
1992 	}
1993 
1994 	for (offset_blocks = 0; offset_blocks < num_blocks; offset_blocks++) {
1995 		rc = _dix_remap_ref_tag(&md_sgl, offset_blocks, ctx, err_blk);
1996 		if (rc != 0) {
1997 			return rc;
1998 		}
1999 	}
2000 
2001 	return 0;
2002 }
2003