xref: /spdk/include/spdk_internal/nvme_tcp.h (revision 06b537bfdb4393dea857e204b85d8df46a351d8a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef SPDK_INTERNAL_NVME_TCP_H
35 #define SPDK_INTERNAL_NVME_TCP_H
36 
37 #include "spdk/likely.h"
38 #include "spdk/sock.h"
39 #include "spdk/dif.h"
40 
41 #define SPDK_CRC32C_XOR				0xffffffffUL
42 #define SPDK_NVME_TCP_DIGEST_LEN		4
43 #define SPDK_NVME_TCP_DIGEST_ALIGNMENT		4
44 #define SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT	30
45 #define SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR	8
46 
47 /*
48  * Maximum number of SGL elements.
49  */
50 #define NVME_TCP_MAX_SGL_DESCRIPTORS	(16)
51 
52 #define MAKE_DIGEST_WORD(BUF, CRC32C) \
53         (   ((*((uint8_t *)(BUF)+0)) = (uint8_t)((uint32_t)(CRC32C) >> 0)), \
54             ((*((uint8_t *)(BUF)+1)) = (uint8_t)((uint32_t)(CRC32C) >> 8)), \
55             ((*((uint8_t *)(BUF)+2)) = (uint8_t)((uint32_t)(CRC32C) >> 16)), \
56             ((*((uint8_t *)(BUF)+3)) = (uint8_t)((uint32_t)(CRC32C) >> 24)))
57 
58 #define MATCH_DIGEST_WORD(BUF, CRC32C) \
59         (    ((((uint32_t) *((uint8_t *)(BUF)+0)) << 0)         \
60             | (((uint32_t) *((uint8_t *)(BUF)+1)) << 8)         \
61             | (((uint32_t) *((uint8_t *)(BUF)+2)) << 16)        \
62             | (((uint32_t) *((uint8_t *)(BUF)+3)) << 24))       \
63             == (CRC32C))
64 
65 #define DGET32(B)                                                               \
66         (((  (uint32_t) *((uint8_t *)(B)+0)) << 0)                              \
67          | (((uint32_t) *((uint8_t *)(B)+1)) << 8)                              \
68          | (((uint32_t) *((uint8_t *)(B)+2)) << 16)                             \
69          | (((uint32_t) *((uint8_t *)(B)+3)) << 24))
70 
71 #define DSET32(B,D)                                                             \
72         (((*((uint8_t *)(B)+0)) = (uint8_t)((uint32_t)(D) >> 0)),               \
73          ((*((uint8_t *)(B)+1)) = (uint8_t)((uint32_t)(D) >> 8)),               \
74          ((*((uint8_t *)(B)+2)) = (uint8_t)((uint32_t)(D) >> 16)),              \
75          ((*((uint8_t *)(B)+3)) = (uint8_t)((uint32_t)(D) >> 24)))
76 
77 typedef void (*nvme_tcp_qpair_xfer_complete_cb)(void *cb_arg);
78 
79 struct _nvme_tcp_sgl {
80 	struct iovec	*iov;
81 	int		iovcnt;
82 	uint32_t	iov_offset;
83 	uint32_t	total_size;
84 };
85 
86 struct nvme_tcp_pdu {
87 	union {
88 		/* to hold error pdu data */
89 		uint8_t					raw[SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE];
90 		struct spdk_nvme_tcp_common_pdu_hdr	common;
91 		struct spdk_nvme_tcp_ic_req		ic_req;
92 		struct spdk_nvme_tcp_term_req_hdr	term_req;
93 		struct spdk_nvme_tcp_cmd		capsule_cmd;
94 		struct spdk_nvme_tcp_h2c_data_hdr	h2c_data;
95 		struct spdk_nvme_tcp_ic_resp		ic_resp;
96 		struct spdk_nvme_tcp_rsp		capsule_resp;
97 		struct spdk_nvme_tcp_c2h_data_hdr	c2h_data;
98 		struct spdk_nvme_tcp_r2t_hdr		r2t;
99 
100 	} hdr;
101 
102 	bool						has_hdgst;
103 	bool						ddgst_enable;
104 	uint8_t						data_digest[SPDK_NVME_TCP_DIGEST_LEN];
105 
106 	uint8_t						ch_valid_bytes;
107 	uint8_t						psh_valid_bytes;
108 	uint8_t						psh_len;
109 
110 	nvme_tcp_qpair_xfer_complete_cb			cb_fn;
111 	void						*cb_arg;
112 
113 	/* The sock request ends with a 0 length iovec. Place the actual iovec immediately
114 	 * after it. There is a static assert below to check if the compiler inserted
115 	 * any unwanted padding */
116 	struct spdk_sock_request			sock_req;
117 	struct iovec					iov[NVME_TCP_MAX_SGL_DESCRIPTORS * 2];
118 
119 	struct iovec					data_iov[NVME_TCP_MAX_SGL_DESCRIPTORS];
120 	uint32_t					data_iovcnt;
121 	uint32_t					data_len;
122 
123 	uint32_t					readv_offset;
124 	TAILQ_ENTRY(nvme_tcp_pdu)			tailq;
125 	uint32_t					remaining;
126 	uint32_t					padding_len;
127 	struct _nvme_tcp_sgl				sgl;
128 
129 	struct spdk_dif_ctx				*dif_ctx;
130 
131 	void						*req; /* data tied to a tcp request */
132 	void						*qpair;
133 };
134 SPDK_STATIC_ASSERT(offsetof(struct nvme_tcp_pdu,
135 			    sock_req) + sizeof(struct spdk_sock_request) == offsetof(struct nvme_tcp_pdu, iov),
136 		   "Compiler inserted padding between iov and sock_req");
137 
138 enum nvme_tcp_pdu_recv_state {
139 	/* Ready to wait for PDU */
140 	NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY,
141 
142 	/* Active tqpair waiting for any PDU common header */
143 	NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH,
144 
145 	/* Active tqpair waiting for any PDU specific header */
146 	NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH,
147 
148 	/* Active tqpair waiting for a tcp request, only use in target side */
149 	NVME_TCP_PDU_RECV_STATE_AWAIT_REQ,
150 
151 	/* Active tqpair waiting for payload */
152 	NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD,
153 
154 	/* Active tqpair does not wait for payload */
155 	NVME_TCP_PDU_RECV_STATE_ERROR,
156 };
157 
158 enum nvme_tcp_error_codes {
159 	NVME_TCP_PDU_IN_PROGRESS        = 0,
160 	NVME_TCP_CONNECTION_FATAL       = -1,
161 	NVME_TCP_PDU_FATAL              = -2,
162 };
163 
164 enum nvme_tcp_qpair_state {
165 	NVME_TCP_QPAIR_STATE_INVALID = 0,
166 	NVME_TCP_QPAIR_STATE_INITIALIZING = 1,
167 	NVME_TCP_QPAIR_STATE_RUNNING = 2,
168 	NVME_TCP_QPAIR_STATE_EXITING = 3,
169 	NVME_TCP_QPAIR_STATE_EXITED = 4,
170 };
171 
172 static const bool g_nvme_tcp_hdgst[] = {
173 	[SPDK_NVME_TCP_PDU_TYPE_IC_REQ]         = false,
174 	[SPDK_NVME_TCP_PDU_TYPE_IC_RESP]        = false,
175 	[SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ]   = false,
176 	[SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ]   = false,
177 	[SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD]    = true,
178 	[SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP]   = true,
179 	[SPDK_NVME_TCP_PDU_TYPE_H2C_DATA]       = true,
180 	[SPDK_NVME_TCP_PDU_TYPE_C2H_DATA]       = true,
181 	[SPDK_NVME_TCP_PDU_TYPE_R2T]            = true
182 };
183 
184 static const bool g_nvme_tcp_ddgst[] = {
185 	[SPDK_NVME_TCP_PDU_TYPE_IC_REQ]         = false,
186 	[SPDK_NVME_TCP_PDU_TYPE_IC_RESP]        = false,
187 	[SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ]   = false,
188 	[SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ]   = false,
189 	[SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD]    = true,
190 	[SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP]   = false,
191 	[SPDK_NVME_TCP_PDU_TYPE_H2C_DATA]       = true,
192 	[SPDK_NVME_TCP_PDU_TYPE_C2H_DATA]       = true,
193 	[SPDK_NVME_TCP_PDU_TYPE_R2T]            = false
194 };
195 
196 static uint32_t
197 nvme_tcp_pdu_calc_header_digest(struct nvme_tcp_pdu *pdu)
198 {
199 	uint32_t crc32c;
200 	uint32_t hlen = pdu->hdr.common.hlen;
201 
202 	crc32c = spdk_crc32c_update(&pdu->hdr.raw, hlen, ~0);
203 	crc32c = crc32c ^ SPDK_CRC32C_XOR;
204 	return crc32c;
205 }
206 
207 static uint32_t
208 _update_crc32c_iov(struct iovec *iov, int iovcnt, uint32_t crc32c)
209 {
210 	int i;
211 
212 	for (i = 0; i < iovcnt; i++) {
213 		assert(iov[i].iov_base != NULL);
214 		assert(iov[i].iov_len != 0);
215 		crc32c = spdk_crc32c_update(iov[i].iov_base, iov[i].iov_len, crc32c);
216 	}
217 
218 	return crc32c;
219 }
220 
221 static uint32_t
222 nvme_tcp_pdu_calc_data_digest(struct nvme_tcp_pdu *pdu)
223 {
224 	uint32_t crc32c = SPDK_CRC32C_XOR;
225 	uint32_t mod;
226 
227 	assert(pdu->data_len != 0);
228 
229 	if (spdk_likely(!pdu->dif_ctx)) {
230 		crc32c = _update_crc32c_iov(pdu->data_iov, pdu->data_iovcnt, crc32c);
231 	} else {
232 		spdk_dif_update_crc32c_stream(pdu->data_iov, pdu->data_iovcnt,
233 					      0, pdu->data_len, &crc32c, pdu->dif_ctx);
234 	}
235 
236 	mod = pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT;
237 	if (mod != 0) {
238 		uint32_t pad_length = SPDK_NVME_TCP_DIGEST_ALIGNMENT - mod;
239 		uint8_t pad[3] = {0, 0, 0};
240 
241 		assert(pad_length > 0);
242 		assert(pad_length <= sizeof(pad));
243 		crc32c = spdk_crc32c_update(pad, pad_length, crc32c);
244 	}
245 	crc32c = crc32c ^ SPDK_CRC32C_XOR;
246 	return crc32c;
247 }
248 
249 static inline void
250 _nvme_tcp_sgl_init(struct _nvme_tcp_sgl *s, struct iovec *iov, int iovcnt,
251 		   uint32_t iov_offset)
252 {
253 	s->iov = iov;
254 	s->iovcnt = iovcnt;
255 	s->iov_offset = iov_offset;
256 	s->total_size = 0;
257 }
258 
259 static inline void
260 _nvme_tcp_sgl_advance(struct _nvme_tcp_sgl *s, uint32_t step)
261 {
262 	s->iov_offset += step;
263 	while (s->iovcnt > 0) {
264 		if (s->iov_offset < s->iov->iov_len) {
265 			break;
266 		}
267 
268 		s->iov_offset -= s->iov->iov_len;
269 		s->iov++;
270 		s->iovcnt--;
271 	}
272 }
273 
274 static inline void
275 _nvme_tcp_sgl_get_buf(struct _nvme_tcp_sgl *s, void **_buf, uint32_t *_buf_len)
276 {
277 	if (_buf != NULL) {
278 		*_buf = s->iov->iov_base + s->iov_offset;
279 	}
280 	if (_buf_len != NULL) {
281 		*_buf_len = s->iov->iov_len - s->iov_offset;
282 	}
283 }
284 
285 static inline bool
286 _nvme_tcp_sgl_append(struct _nvme_tcp_sgl *s, uint8_t *data, uint32_t data_len)
287 {
288 	if (s->iov_offset >= data_len) {
289 		s->iov_offset -= data_len;
290 	} else {
291 		assert(s->iovcnt > 0);
292 		s->iov->iov_base = data + s->iov_offset;
293 		s->iov->iov_len = data_len - s->iov_offset;
294 		s->total_size += data_len - s->iov_offset;
295 		s->iov_offset = 0;
296 		s->iov++;
297 		s->iovcnt--;
298 		if (s->iovcnt == 0) {
299 			return false;
300 		}
301 	}
302 
303 	return true;
304 }
305 
306 static inline bool
307 _nvme_tcp_sgl_append_multi(struct _nvme_tcp_sgl *s, struct iovec *iov, int iovcnt)
308 {
309 	int i;
310 
311 	for (i = 0; i < iovcnt; i++) {
312 		if (!_nvme_tcp_sgl_append(s, iov[i].iov_base, iov[i].iov_len)) {
313 			return false;
314 		}
315 	}
316 
317 	return true;
318 }
319 
320 static inline uint32_t
321 _get_iov_array_size(struct iovec *iov, int iovcnt)
322 {
323 	int i;
324 	uint32_t size = 0;
325 
326 	for (i = 0; i < iovcnt; i++) {
327 		size += iov[i].iov_len;
328 	}
329 
330 	return size;
331 }
332 
333 static inline bool
334 _nvme_tcp_sgl_append_multi_with_md(struct _nvme_tcp_sgl *s, struct iovec *iov, int iovcnt,
335 				   uint32_t data_len, const struct spdk_dif_ctx *dif_ctx)
336 {
337 	int rc;
338 	uint32_t mapped_len = 0;
339 
340 	if (s->iov_offset >= data_len) {
341 		s->iov_offset -= _get_iov_array_size(iov, iovcnt);
342 	} else {
343 		rc = spdk_dif_set_md_interleave_iovs(s->iov, s->iovcnt, iov, iovcnt,
344 						     s->iov_offset, data_len - s->iov_offset,
345 						     &mapped_len, dif_ctx);
346 		if (rc < 0) {
347 			SPDK_ERRLOG("Failed to setup iovs for DIF insert/strip.\n");
348 			return false;
349 		}
350 
351 		s->total_size += mapped_len;
352 		s->iov_offset = 0;
353 		assert(s->iovcnt >= rc);
354 		s->iovcnt -= rc;
355 		s->iov += rc;
356 
357 		if (s->iovcnt == 0) {
358 			return false;
359 		}
360 	}
361 
362 	return true;
363 }
364 
365 static int
366 nvme_tcp_build_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
367 		    bool hdgst_enable, bool ddgst_enable, uint32_t *_mapped_length)
368 {
369 	uint32_t hlen, plen;
370 	struct _nvme_tcp_sgl *sgl;
371 
372 	if (iovcnt == 0) {
373 		return 0;
374 	}
375 
376 	sgl = &pdu->sgl;
377 	_nvme_tcp_sgl_init(sgl, iov, iovcnt, 0);
378 	hlen = pdu->hdr.common.hlen;
379 
380 	/* Header Digest */
381 	if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && hdgst_enable) {
382 		hlen += SPDK_NVME_TCP_DIGEST_LEN;
383 	}
384 
385 	plen = hlen;
386 	if (!pdu->data_len) {
387 		/* PDU header + possible header digest */
388 		_nvme_tcp_sgl_append(sgl, (uint8_t *)&pdu->hdr.raw, hlen);
389 		goto end;
390 	}
391 
392 	/* Padding */
393 	if (pdu->padding_len > 0) {
394 		hlen += pdu->padding_len;
395 		plen = hlen;
396 	}
397 
398 	if (!_nvme_tcp_sgl_append(sgl, (uint8_t *)&pdu->hdr.raw, hlen)) {
399 		goto end;
400 	}
401 
402 	/* Data Segment */
403 	plen += pdu->data_len;
404 	if (spdk_likely(!pdu->dif_ctx)) {
405 		if (!_nvme_tcp_sgl_append_multi(sgl, pdu->data_iov, pdu->data_iovcnt)) {
406 			goto end;
407 		}
408 	} else {
409 		if (!_nvme_tcp_sgl_append_multi_with_md(sgl, pdu->data_iov, pdu->data_iovcnt,
410 							pdu->data_len, pdu->dif_ctx)) {
411 			goto end;
412 		}
413 	}
414 
415 	/* Data Digest */
416 	if (g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && ddgst_enable) {
417 		plen += SPDK_NVME_TCP_DIGEST_LEN;
418 		_nvme_tcp_sgl_append(sgl, pdu->data_digest, SPDK_NVME_TCP_DIGEST_LEN);
419 	}
420 
421 	assert(plen == pdu->hdr.common.plen);
422 
423 end:
424 	if (_mapped_length != NULL) {
425 		*_mapped_length = sgl->total_size;
426 	}
427 
428 	return iovcnt - sgl->iovcnt;
429 }
430 
431 static int
432 nvme_tcp_build_payload_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
433 			    bool ddgst_enable, uint32_t *_mapped_length)
434 {
435 	struct _nvme_tcp_sgl *sgl;
436 
437 	if (iovcnt == 0) {
438 		return 0;
439 	}
440 
441 	sgl = &pdu->sgl;
442 	_nvme_tcp_sgl_init(sgl, iov, iovcnt, pdu->readv_offset);
443 
444 	if (spdk_likely(!pdu->dif_ctx)) {
445 		if (!_nvme_tcp_sgl_append_multi(sgl, pdu->data_iov, pdu->data_iovcnt)) {
446 			goto end;
447 		}
448 	} else {
449 		if (!_nvme_tcp_sgl_append_multi_with_md(sgl, pdu->data_iov, pdu->data_iovcnt,
450 							pdu->data_len, pdu->dif_ctx)) {
451 			goto end;
452 		}
453 	}
454 
455 	/* Data Digest */
456 	if (ddgst_enable) {
457 		_nvme_tcp_sgl_append(sgl, pdu->data_digest, SPDK_NVME_TCP_DIGEST_LEN);
458 	}
459 
460 end:
461 	if (_mapped_length != NULL) {
462 		*_mapped_length = sgl->total_size;
463 	}
464 	return iovcnt - sgl->iovcnt;
465 }
466 
467 static int
468 nvme_tcp_read_data(struct spdk_sock *sock, int bytes,
469 		   void *buf)
470 {
471 	int ret;
472 
473 	ret = spdk_sock_recv(sock, buf, bytes);
474 
475 	if (ret > 0) {
476 		return ret;
477 	}
478 
479 	if (ret < 0) {
480 		if (errno == EAGAIN || errno == EWOULDBLOCK) {
481 			return 0;
482 		}
483 
484 		/* For connect reset issue, do not output error log */
485 		if (errno != ECONNRESET) {
486 			SPDK_ERRLOG("spdk_sock_recv() failed, errno %d: %s\n",
487 				    errno, spdk_strerror(errno));
488 		}
489 	}
490 
491 	/* connection closed */
492 	return NVME_TCP_CONNECTION_FATAL;
493 }
494 
495 static int
496 nvme_tcp_readv_data(struct spdk_sock *sock, struct iovec *iov, int iovcnt)
497 {
498 	int ret;
499 
500 	assert(sock != NULL);
501 	if (iov == NULL || iovcnt == 0) {
502 		return 0;
503 	}
504 
505 	if (iovcnt == 1) {
506 		return nvme_tcp_read_data(sock, iov->iov_len, iov->iov_base);
507 	}
508 
509 	ret = spdk_sock_readv(sock, iov, iovcnt);
510 
511 	if (ret > 0) {
512 		return ret;
513 	}
514 
515 	if (ret < 0) {
516 		if (errno == EAGAIN || errno == EWOULDBLOCK) {
517 			return 0;
518 		}
519 
520 		/* For connect reset issue, do not output error log */
521 		if (errno != ECONNRESET) {
522 			SPDK_ERRLOG("spdk_sock_readv() failed, errno %d: %s\n",
523 				    errno, spdk_strerror(errno));
524 		}
525 	}
526 
527 	/* connection closed */
528 	return NVME_TCP_CONNECTION_FATAL;
529 }
530 
531 
532 static int
533 nvme_tcp_read_payload_data(struct spdk_sock *sock, struct nvme_tcp_pdu *pdu)
534 {
535 	struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS + 1];
536 	int iovcnt;
537 
538 	iovcnt = nvme_tcp_build_payload_iovs(iov, NVME_TCP_MAX_SGL_DESCRIPTORS + 1, pdu,
539 					     pdu->ddgst_enable, NULL);
540 	assert(iovcnt >= 0);
541 
542 	return nvme_tcp_readv_data(sock, iov, iovcnt);
543 }
544 
545 static void
546 _nvme_tcp_pdu_set_data(struct nvme_tcp_pdu *pdu, void *data, uint32_t data_len)
547 {
548 	pdu->data_iov[0].iov_base = data;
549 	pdu->data_iov[0].iov_len = data_len;
550 	pdu->data_iovcnt = 1;
551 }
552 
553 static void
554 nvme_tcp_pdu_set_data(struct nvme_tcp_pdu *pdu, void *data, uint32_t data_len)
555 {
556 	_nvme_tcp_pdu_set_data(pdu, data, data_len);
557 	pdu->data_len = data_len;
558 }
559 
560 static void
561 nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
562 			  struct iovec *iov, int iovcnt,
563 			  uint32_t data_offset, uint32_t data_len)
564 {
565 	uint32_t buf_offset, buf_len, remain_len, len;
566 	uint8_t *buf;
567 	struct _nvme_tcp_sgl *pdu_sgl, buf_sgl;
568 
569 	pdu->data_len = data_len;
570 
571 	if (spdk_likely(!pdu->dif_ctx)) {
572 		buf_offset = data_offset;
573 		buf_len = data_len;
574 	} else {
575 		spdk_dif_ctx_set_data_offset(pdu->dif_ctx, data_offset);
576 		spdk_dif_get_range_with_md(data_offset, data_len,
577 					   &buf_offset, &buf_len, pdu->dif_ctx);
578 	}
579 
580 	if (iovcnt == 1) {
581 		_nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)iov[0].iov_base + buf_offset), buf_len);
582 	} else {
583 		pdu_sgl = &pdu->sgl;
584 
585 		_nvme_tcp_sgl_init(pdu_sgl, pdu->data_iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0);
586 		_nvme_tcp_sgl_init(&buf_sgl, iov, iovcnt, 0);
587 
588 		_nvme_tcp_sgl_advance(&buf_sgl, buf_offset);
589 		remain_len = buf_len;
590 
591 		while (remain_len > 0) {
592 			_nvme_tcp_sgl_get_buf(&buf_sgl, (void *)&buf, &len);
593 			len = spdk_min(len, remain_len);
594 
595 			_nvme_tcp_sgl_advance(&buf_sgl, len);
596 			remain_len -= len;
597 
598 			if (!_nvme_tcp_sgl_append(pdu_sgl, buf, len)) {
599 				break;
600 			}
601 		}
602 
603 		assert(remain_len == 0);
604 		assert(pdu_sgl->total_size == buf_len);
605 
606 		pdu->data_iovcnt = NVME_TCP_MAX_SGL_DESCRIPTORS - pdu_sgl->iovcnt;
607 	}
608 }
609 
610 static void
611 nvme_tcp_pdu_calc_psh_len(struct nvme_tcp_pdu *pdu, bool hdgst_enable)
612 {
613 	uint8_t psh_len, pdo, padding_len;
614 
615 	psh_len = pdu->hdr.common.hlen;
616 
617 	if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && hdgst_enable) {
618 		pdu->has_hdgst = true;
619 		psh_len += SPDK_NVME_TCP_DIGEST_LEN;
620 		if (pdu->hdr.common.plen > psh_len) {
621 			pdo = pdu->hdr.common.pdo;
622 			padding_len = pdo - psh_len;
623 			if (padding_len > 0) {
624 				psh_len = pdo;
625 			}
626 		}
627 	}
628 
629 	psh_len -= sizeof(struct spdk_nvme_tcp_common_pdu_hdr);
630 	pdu->psh_len = psh_len;
631 }
632 
633 #endif /* SPDK_INTERNAL_NVME_TCP_H */
634