xref: /spdk/lib/nvme/nvme_tcp.c (revision b78e763c1af2ace4c19d2932065a43357e3f5d3e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe/TCP transport
36  */
37 
38 #include "nvme_internal.h"
39 
40 #include "spdk/endian.h"
41 #include "spdk/likely.h"
42 #include "spdk/string.h"
43 #include "spdk/stdinc.h"
44 #include "spdk/crc32.h"
45 #include "spdk/endian.h"
46 #include "spdk/assert.h"
47 #include "spdk/thread.h"
48 #include "spdk/trace.h"
49 #include "spdk/util.h"
50 
51 #include "spdk_internal/nvme_tcp.h"
52 
53 #define NVME_TCP_RW_BUFFER_SIZE 131072
54 
55 /*
56  * Maximum number of SGL elements.
57  * This is chosen to match the current nvme_pcie.c limit.
58  */
59 #define NVME_TCP_MAX_SGL_DESCRIPTORS	(253)
60 
61 #define NVME_TCP_HPDA_DEFAULT			0
62 #define NVME_TCP_MAX_R2T_DEFAULT		16
63 #define NVME_TCP_PDU_H2C_MIN_DATA_SIZE		4096
64 #define NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE	8192
65 
66 /* NVMe TCP transport extensions for spdk_nvme_ctrlr */
67 struct nvme_tcp_ctrlr {
68 	struct spdk_nvme_ctrlr			ctrlr;
69 };
70 
71 /* NVMe TCP qpair extensions for spdk_nvme_qpair */
72 struct nvme_tcp_qpair {
73 	struct spdk_nvme_qpair			qpair;
74 	struct spdk_sock			*sock;
75 
76 	TAILQ_HEAD(, nvme_tcp_req)		free_reqs;
77 	TAILQ_HEAD(, nvme_tcp_req)		outstanding_reqs;
78 	TAILQ_HEAD(, nvme_tcp_req)		active_r2t_reqs;
79 
80 	TAILQ_HEAD(, nvme_tcp_pdu)		send_queue;
81 	struct nvme_tcp_pdu			recv_pdu;
82 	struct nvme_tcp_pdu			send_pdu; /* only for error pdu and init pdu */
83 	enum nvme_tcp_pdu_recv_state		recv_state;
84 
85 	struct nvme_tcp_req			*tcp_reqs;
86 
87 	uint16_t				num_entries;
88 
89 	bool					host_hdgst_enable;
90 	bool					host_ddgst_enable;
91 
92 	/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
93 	uint32_t				maxh2cdata;
94 
95 	int32_t					max_r2t;
96 	int32_t					pending_r2t;
97 
98 	/* 0 based value, which is used to guide the padding */
99 	uint8_t					cpda;
100 
101 	enum nvme_tcp_qpair_state		state;
102 };
103 
104 enum nvme_tcp_req_state {
105 	NVME_TCP_REQ_FREE,
106 	NVME_TCP_REQ_ACTIVE,
107 	NVME_TCP_REQ_ACTIVE_R2T,
108 };
109 
110 struct nvme_tcp_req {
111 	struct nvme_request			*req;
112 	enum nvme_tcp_req_state			state;
113 	uint16_t				cid;
114 	uint16_t				ttag;
115 	uint32_t				datao;
116 	uint32_t				r2tl_remain;
117 	bool					in_capsule_data;
118 	struct nvme_tcp_pdu			send_pdu;
119 	TAILQ_ENTRY(nvme_tcp_req)		link;
120 	TAILQ_ENTRY(nvme_tcp_req)		active_r2t_link;
121 };
122 
123 static void spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req);
124 
125 static inline struct nvme_tcp_qpair *
126 nvme_tcp_qpair(struct spdk_nvme_qpair *qpair)
127 {
128 	assert(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
129 	return SPDK_CONTAINEROF(qpair, struct nvme_tcp_qpair, qpair);
130 }
131 
132 static inline struct nvme_tcp_ctrlr *
133 nvme_tcp_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
134 {
135 	assert(ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_TCP);
136 	return SPDK_CONTAINEROF(ctrlr, struct nvme_tcp_ctrlr, ctrlr);
137 }
138 
139 static struct nvme_tcp_req *
140 nvme_tcp_req_get(struct nvme_tcp_qpair *tqpair)
141 {
142 	struct nvme_tcp_req *tcp_req;
143 
144 	tcp_req = TAILQ_FIRST(&tqpair->free_reqs);
145 	if (!tcp_req) {
146 		return NULL;
147 	}
148 
149 	assert(tcp_req->state == NVME_TCP_REQ_FREE);
150 	tcp_req->state = NVME_TCP_REQ_ACTIVE;
151 	TAILQ_REMOVE(&tqpair->free_reqs, tcp_req, link);
152 	tcp_req->datao = 0;
153 	tcp_req->req = NULL;
154 	tcp_req->in_capsule_data = false;
155 	tcp_req->r2tl_remain = 0;
156 	memset(&tcp_req->send_pdu, 0, sizeof(tcp_req->send_pdu));
157 	TAILQ_INSERT_TAIL(&tqpair->outstanding_reqs, tcp_req, link);
158 
159 	return tcp_req;
160 }
161 
162 static void
163 nvme_tcp_req_put(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
164 {
165 	assert(tcp_req->state != NVME_TCP_REQ_FREE);
166 	tcp_req->state = NVME_TCP_REQ_FREE;
167 	TAILQ_REMOVE(&tqpair->outstanding_reqs, tcp_req, link);
168 	TAILQ_INSERT_TAIL(&tqpair->free_reqs, tcp_req, link);
169 }
170 
171 static int
172 nvme_tcp_parse_addr(struct sockaddr_storage *sa, int family, const char *addr, const char *service)
173 {
174 	struct addrinfo *res;
175 	struct addrinfo hints;
176 	int ret;
177 
178 	memset(&hints, 0, sizeof(hints));
179 	hints.ai_family = family;
180 	hints.ai_socktype = SOCK_STREAM;
181 	hints.ai_protocol = 0;
182 
183 	ret = getaddrinfo(addr, service, &hints, &res);
184 	if (ret) {
185 		SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(ret), ret);
186 		return ret;
187 	}
188 
189 	if (res->ai_addrlen > sizeof(*sa)) {
190 		SPDK_ERRLOG("getaddrinfo() ai_addrlen %zu too large\n", (size_t)res->ai_addrlen);
191 		ret = EINVAL;
192 	} else {
193 		memcpy(sa, res->ai_addr, res->ai_addrlen);
194 	}
195 
196 	freeaddrinfo(res);
197 	return ret;
198 }
199 
200 static void
201 nvme_tcp_free_reqs(struct nvme_tcp_qpair *tqpair)
202 {
203 	free(tqpair->tcp_reqs);
204 	tqpair->tcp_reqs = NULL;
205 }
206 
207 static int
208 nvme_tcp_alloc_reqs(struct nvme_tcp_qpair *tqpair)
209 {
210 	int i;
211 	struct nvme_tcp_req	*tcp_req;
212 
213 	tqpair->tcp_reqs = calloc(tqpair->num_entries, sizeof(struct nvme_tcp_req));
214 	if (tqpair->tcp_reqs == NULL) {
215 		SPDK_ERRLOG("Failed to allocate tcp_reqs\n");
216 		goto fail;
217 	}
218 
219 	TAILQ_INIT(&tqpair->send_queue);
220 	TAILQ_INIT(&tqpair->free_reqs);
221 	TAILQ_INIT(&tqpair->outstanding_reqs);
222 	TAILQ_INIT(&tqpair->active_r2t_reqs);
223 	for (i = 0; i < tqpair->num_entries; i++) {
224 		tcp_req = &tqpair->tcp_reqs[i];
225 		tcp_req->cid = i;
226 		TAILQ_INSERT_TAIL(&tqpair->free_reqs, tcp_req, link);
227 	}
228 
229 	return 0;
230 fail:
231 	nvme_tcp_free_reqs(tqpair);
232 	return -ENOMEM;
233 }
234 
235 static int
236 nvme_tcp_qpair_destroy(struct spdk_nvme_qpair *qpair)
237 {
238 	struct nvme_tcp_qpair *tqpair;
239 
240 	if (!qpair) {
241 		return -1;
242 	}
243 
244 	nvme_qpair_deinit(qpair);
245 
246 	tqpair = nvme_tcp_qpair(qpair);
247 
248 	nvme_tcp_free_reqs(tqpair);
249 
250 	spdk_sock_close(&tqpair->sock);
251 	free(tqpair);
252 
253 	return 0;
254 }
255 
256 int
257 nvme_tcp_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
258 {
259 	return 0;
260 }
261 
262 /* This function must only be called while holding g_spdk_nvme_driver->lock */
263 int
264 nvme_tcp_ctrlr_scan(const struct spdk_nvme_transport_id *trid,
265 		    void *cb_ctx,
266 		    spdk_nvme_probe_cb probe_cb,
267 		    spdk_nvme_remove_cb remove_cb,
268 		    bool direct_connect)
269 {
270 	struct spdk_nvme_ctrlr_opts discovery_opts;
271 	struct spdk_nvme_ctrlr *discovery_ctrlr;
272 	union spdk_nvme_cc_register cc;
273 	int rc;
274 	struct nvme_completion_poll_status status;
275 
276 	if (strcmp(trid->subnqn, SPDK_NVMF_DISCOVERY_NQN) != 0) {
277 		/* Not a discovery controller - connect directly. */
278 		rc = nvme_ctrlr_probe(trid, NULL, probe_cb, cb_ctx);
279 		return rc;
280 	}
281 
282 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&discovery_opts, sizeof(discovery_opts));
283 	/* For discovery_ctrlr set the timeout to 0 */
284 	discovery_opts.keep_alive_timeout_ms = 0;
285 
286 	discovery_ctrlr = nvme_tcp_ctrlr_construct(trid, &discovery_opts, NULL);
287 	if (discovery_ctrlr == NULL) {
288 		return -1;
289 	}
290 
291 	/* TODO: this should be using the normal NVMe controller initialization process */
292 	cc.raw = 0;
293 	cc.bits.en = 1;
294 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
295 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
296 	rc = nvme_transport_ctrlr_set_reg_4(discovery_ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
297 					    cc.raw);
298 	if (rc < 0) {
299 		SPDK_ERRLOG("Failed to set cc\n");
300 		nvme_ctrlr_destruct(discovery_ctrlr);
301 		return -1;
302 	}
303 
304 	/* get the cdata info */
305 	status.done = false;
306 	rc = nvme_ctrlr_cmd_identify(discovery_ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
307 				     &discovery_ctrlr->cdata, sizeof(discovery_ctrlr->cdata),
308 				     nvme_completion_poll_cb, &status);
309 	if (rc != 0) {
310 		SPDK_ERRLOG("Failed to identify cdata\n");
311 		return rc;
312 	}
313 
314 	while (status.done == false) {
315 		spdk_nvme_qpair_process_completions(discovery_ctrlr->adminq, 0);
316 	}
317 	if (spdk_nvme_cpl_is_error(&status.cpl)) {
318 		SPDK_ERRLOG("nvme_identify_controller failed!\n");
319 		return -ENXIO;
320 	}
321 
322 	/* Direct attach through spdk_nvme_connect() API */
323 	if (direct_connect == true) {
324 		/* Set the ready state to skip the normal init process */
325 		discovery_ctrlr->state = NVME_CTRLR_STATE_READY;
326 		nvme_ctrlr_connected(discovery_ctrlr);
327 		nvme_ctrlr_add_process(discovery_ctrlr, 0);
328 		return 0;
329 	}
330 
331 	rc = nvme_fabric_ctrlr_discover(discovery_ctrlr, cb_ctx, probe_cb);
332 	nvme_ctrlr_destruct(discovery_ctrlr);
333 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "leave\n");
334 	return rc;
335 }
336 
337 int
338 nvme_tcp_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
339 {
340 	struct nvme_tcp_ctrlr *tctrlr = nvme_tcp_ctrlr(ctrlr);
341 
342 	if (ctrlr->adminq) {
343 		nvme_tcp_qpair_destroy(ctrlr->adminq);
344 	}
345 
346 	nvme_ctrlr_destruct_finish(ctrlr);
347 
348 	free(tctrlr);
349 
350 	return 0;
351 }
352 
353 int
354 nvme_tcp_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
355 {
356 	return nvme_fabric_ctrlr_set_reg_4(ctrlr, offset, value);
357 }
358 
359 int
360 nvme_tcp_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
361 {
362 	return nvme_fabric_ctrlr_set_reg_8(ctrlr, offset, value);
363 }
364 
365 int
366 nvme_tcp_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
367 {
368 	return nvme_fabric_ctrlr_get_reg_4(ctrlr, offset, value);
369 }
370 
371 int
372 nvme_tcp_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
373 {
374 	return nvme_fabric_ctrlr_get_reg_8(ctrlr, offset, value);
375 }
376 
377 static int
378 nvme_tcp_qpair_process_send_queue(struct nvme_tcp_qpair *tqpair)
379 {
380 	const int array_size = 32;
381 	struct iovec	iovec_array[array_size];
382 	struct iovec	*iov = iovec_array;
383 	int iovec_cnt = 0;
384 	int bytes = 0;
385 	uint32_t writev_offset;
386 	struct nvme_tcp_pdu *pdu;
387 	int pdu_length;
388 	TAILQ_HEAD(, nvme_tcp_pdu) completed_pdus_list;
389 
390 	pdu = TAILQ_FIRST(&tqpair->send_queue);
391 
392 	if (pdu == NULL) {
393 		return 0;
394 	}
395 
396 	/*
397 	 * Build up a list of iovecs for the first few PDUs in the
398 	 *  tqpair 's send_queue.
399 	 */
400 	while (pdu != NULL && ((array_size - iovec_cnt) >= 3)) {
401 		iovec_cnt += nvme_tcp_build_iovecs(&iovec_array[iovec_cnt],
402 						   pdu, tqpair->host_hdgst_enable,
403 						   tqpair->host_ddgst_enable);
404 		pdu = TAILQ_NEXT(pdu, tailq);
405 	}
406 
407 	/*
408 	 * Check if the first PDU was partially written out the last time
409 	 *  this function was called, and if so adjust the iovec array
410 	 *  accordingly.
411 	 */
412 	writev_offset = TAILQ_FIRST(&tqpair->send_queue)->writev_offset;
413 	while ((writev_offset > 0) && (iovec_cnt > 0)) {
414 		if (writev_offset >= iov->iov_len) {
415 			writev_offset -= iov->iov_len;
416 			iov++;
417 			iovec_cnt--;
418 		} else {
419 			iov->iov_len -= writev_offset;
420 			iov->iov_base = (char *)iov->iov_base + writev_offset;
421 			writev_offset = 0;
422 		}
423 	}
424 
425 	bytes = spdk_sock_writev(tqpair->sock, iov, iovec_cnt);
426 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "bytes=%d are out\n", bytes);
427 	if (bytes == -1) {
428 		if (errno == EWOULDBLOCK || errno == EAGAIN) {
429 			return 1;
430 		} else {
431 			SPDK_ERRLOG("spdk_sock_writev() failed, errno %d: %s\n",
432 				    errno, spdk_strerror(errno));
433 			return -1;
434 		}
435 	}
436 
437 	pdu = TAILQ_FIRST(&tqpair->send_queue);
438 
439 	/*
440 	 * Free any PDUs that were fully written.  If a PDU was only
441 	 *  partially written, update its writev_offset so that next
442 	 *  time only the unwritten portion will be sent to writev().
443 	 */
444 	TAILQ_INIT(&completed_pdus_list);
445 	while (bytes > 0) {
446 		pdu_length = pdu->hdr.common.plen - pdu->writev_offset;
447 		assert(pdu_length > 0);
448 		if (bytes >= pdu_length) {
449 			bytes -= pdu_length;
450 			TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
451 			TAILQ_INSERT_TAIL(&completed_pdus_list, pdu, tailq);
452 			pdu = TAILQ_FIRST(&tqpair->send_queue);
453 
454 		} else {
455 			pdu->writev_offset += bytes;
456 			bytes = 0;
457 		}
458 	}
459 
460 	while (!TAILQ_EMPTY(&completed_pdus_list)) {
461 		pdu = TAILQ_FIRST(&completed_pdus_list);
462 		TAILQ_REMOVE(&completed_pdus_list, pdu, tailq);
463 		assert(pdu->cb_fn != NULL);
464 		pdu->cb_fn(pdu->cb_arg);
465 	}
466 
467 	return TAILQ_EMPTY(&tqpair->send_queue) ? 0 : 1;
468 
469 }
470 
471 static int
472 nvme_tcp_qpair_write_pdu(struct nvme_tcp_qpair *tqpair,
473 			 struct nvme_tcp_pdu *pdu,
474 			 nvme_tcp_qpair_xfer_complete_cb cb_fn,
475 			 void *cb_arg)
476 {
477 	int enable_digest;
478 	int hlen;
479 	uint32_t crc32c;
480 
481 	hlen = pdu->hdr.common.hlen;
482 	enable_digest = 1;
483 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ ||
484 	    pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) {
485 		/* this PDU should be sent without digest */
486 		enable_digest = 0;
487 	}
488 
489 	/* Header Digest */
490 	if (enable_digest && tqpair->host_hdgst_enable) {
491 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
492 		MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c);
493 	}
494 
495 	/* Data Digest */
496 	if (pdu->data_len > 0 && enable_digest && tqpair->host_ddgst_enable) {
497 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
498 		MAKE_DIGEST_WORD(pdu->data_digest, crc32c);
499 	}
500 
501 	pdu->cb_fn = cb_fn;
502 	pdu->cb_arg = cb_arg;
503 	TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
504 	return 0;
505 }
506 
507 /*
508  * Build SGL describing contiguous payload buffer.
509  */
510 static int
511 nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_request *req)
512 {
513 	void *payload = req->payload.contig_or_cb_arg + req->payload_offset;
514 
515 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
516 
517 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
518 	req->cmd.dptr.sgl1.address = (uint64_t)payload;
519 
520 	return 0;
521 }
522 
523 /*
524  * Build SGL describing scattered payload buffer.
525  */
526 static int
527 nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_request *req)
528 {
529 	int rc;
530 	void *virt_addr;
531 	uint32_t length;
532 
533 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
534 
535 	assert(req->payload_size != 0);
536 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
537 	assert(req->payload.reset_sgl_fn != NULL);
538 	assert(req->payload.next_sge_fn != NULL);
539 	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
540 
541 	/* TODO: for now, we only support a single SGL entry */
542 	rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
543 	if (rc) {
544 		return -1;
545 	}
546 
547 	if (length < req->payload_size) {
548 		SPDK_ERRLOG("multi-element SGL currently not supported for TCP now\n");
549 		return -1;
550 	}
551 
552 	req->cmd.dptr.sgl1.address = (uint64_t)virt_addr;
553 
554 	return 0;
555 }
556 
557 static inline uint32_t
558 nvme_tcp_icdsz_bytes(struct spdk_nvme_ctrlr *ctrlr)
559 {
560 	return (ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd));
561 }
562 
563 static int
564 nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
565 		  struct nvme_tcp_req *tcp_req)
566 {
567 	struct spdk_nvme_ctrlr *ctrlr = tqpair->qpair.ctrlr;
568 	int rc = 0;
569 	enum spdk_nvme_data_transfer xfer;
570 	uint32_t max_incapsule_data_size;
571 
572 	tcp_req->req = req;
573 	req->cmd.cid = tcp_req->cid;
574 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
575 	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
576 	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
577 	req->cmd.dptr.sgl1.unkeyed.length = req->payload_size;
578 
579 	if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
580 		rc = nvme_tcp_build_contig_request(tqpair, req);
581 	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
582 		rc = nvme_tcp_build_sgl_request(tqpair, req);
583 	} else {
584 		rc = -1;
585 	}
586 
587 	if (rc) {
588 		return rc;
589 	}
590 
591 	if (req->cmd.opc == SPDK_NVME_OPC_FABRIC) {
592 		struct spdk_nvmf_capsule_cmd *nvmf_cmd = (struct spdk_nvmf_capsule_cmd *)&req->cmd;
593 
594 		xfer = spdk_nvme_opc_get_data_transfer(nvmf_cmd->fctype);
595 	} else {
596 		xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
597 	}
598 	if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
599 		max_incapsule_data_size = nvme_tcp_icdsz_bytes(ctrlr);
600 		if ((req->cmd.opc == SPDK_NVME_OPC_FABRIC) || nvme_qpair_is_admin_queue(&tqpair->qpair)) {
601 			max_incapsule_data_size = spdk_min(max_incapsule_data_size, NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE);
602 		}
603 
604 		if (req->payload_size <= max_incapsule_data_size) {
605 			req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
606 			req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
607 			req->cmd.dptr.sgl1.address = 0;
608 			tcp_req->in_capsule_data = true;
609 		}
610 	}
611 
612 	return 0;
613 }
614 
615 static void
616 nvme_tcp_qpair_cmd_send_complete(void *cb_arg)
617 {
618 }
619 
620 static void
621 nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
622 			  struct nvme_tcp_req *tcp_req)
623 {
624 	/* Here is the tricky, we should consider different NVME data command type: SGL with continue or
625 	scatter data, now we only consider continous data, which is not exactly correct, shoud be fixed */
626 	if (spdk_unlikely(!tcp_req->req->cmd.dptr.sgl1.address)) {
627 		pdu->data = (void *)tcp_req->req->payload.contig_or_cb_arg + tcp_req->datao;
628 	} else {
629 		pdu->data = (void *)tcp_req->req->cmd.dptr.sgl1.address + tcp_req->datao;
630 	}
631 
632 }
633 
634 static int
635 nvme_tcp_qpair_capsule_cmd_send(struct nvme_tcp_qpair *tqpair,
636 				struct nvme_tcp_req *tcp_req)
637 {
638 	struct nvme_tcp_pdu *pdu;
639 	struct spdk_nvme_tcp_cmd *capsule_cmd;
640 	uint32_t plen = 0, alignment;
641 	uint8_t pdo;
642 
643 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
644 	pdu = &tcp_req->send_pdu;
645 
646 	capsule_cmd = &pdu->hdr.capsule_cmd;
647 	capsule_cmd->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
648 	plen = capsule_cmd->common.hlen = sizeof(*capsule_cmd);
649 	capsule_cmd->ccsqe = tcp_req->req->cmd;
650 
651 
652 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "capsule_cmd cid=%u on tqpair(%p)\n", tcp_req->req->cmd.cid, tqpair);
653 
654 	if (tqpair->host_hdgst_enable) {
655 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Header digest is enabled for capsule command on tcp_req=%p\n",
656 			      tcp_req);
657 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
658 		plen += SPDK_NVME_TCP_DIGEST_LEN;
659 	}
660 
661 	if ((tcp_req->req->payload_size == 0) || !tcp_req->in_capsule_data) {
662 		goto end;
663 	}
664 
665 	pdo = plen;
666 	pdu->padding_len = 0;
667 	if (tqpair->cpda) {
668 		alignment = (tqpair->cpda + 1) << 2;
669 		if (alignment > plen) {
670 			pdu->padding_len = alignment - plen;
671 			pdo = alignment;
672 			plen = alignment;
673 		}
674 	}
675 
676 	capsule_cmd->common.pdo = pdo;
677 	plen += tcp_req->req->payload_size;
678 	if (tqpair->host_ddgst_enable) {
679 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
680 		plen += SPDK_NVME_TCP_DIGEST_LEN;
681 	}
682 
683 	tcp_req->datao = 0;
684 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req);
685 	pdu->data_len = tcp_req->req->payload_size;
686 
687 end:
688 	capsule_cmd->common.plen = plen;
689 	return nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_qpair_cmd_send_complete, NULL);
690 
691 }
692 
693 int
694 nvme_tcp_qpair_submit_request(struct spdk_nvme_qpair *qpair,
695 			      struct nvme_request *req)
696 {
697 	struct nvme_tcp_qpair *tqpair;
698 	struct nvme_tcp_req *tcp_req;
699 
700 	tqpair = nvme_tcp_qpair(qpair);
701 	assert(tqpair != NULL);
702 	assert(req != NULL);
703 
704 	tcp_req = nvme_tcp_req_get(tqpair);
705 	if (!tcp_req) {
706 		/*
707 		 * No tcp_req is available.  Queue the request to be processed later.
708 		 */
709 		STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
710 		return 0;
711 	}
712 
713 	if (nvme_tcp_req_init(tqpair, req, tcp_req)) {
714 		SPDK_ERRLOG("nvme_tcp_req_init() failed\n");
715 		nvme_tcp_req_put(tqpair, tcp_req);
716 		return -1;
717 	}
718 
719 	req->timed_out = false;
720 	if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
721 		req->submit_tick = spdk_get_ticks();
722 	} else {
723 		req->submit_tick = 0;
724 	}
725 
726 	return nvme_tcp_qpair_capsule_cmd_send(tqpair, tcp_req);
727 }
728 
729 int
730 nvme_tcp_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
731 {
732 	return nvme_tcp_qpair_destroy(qpair);
733 }
734 
735 int
736 nvme_tcp_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
737 {
738 	return -1;
739 }
740 
741 int
742 nvme_tcp_qpair_enable(struct spdk_nvme_qpair *qpair)
743 {
744 	return 0;
745 }
746 
747 int
748 nvme_tcp_qpair_disable(struct spdk_nvme_qpair *qpair)
749 {
750 	return 0;
751 }
752 
753 int
754 nvme_tcp_qpair_reset(struct spdk_nvme_qpair *qpair)
755 {
756 	return 0;
757 }
758 
759 int
760 nvme_tcp_qpair_fail(struct spdk_nvme_qpair *qpair)
761 {
762 	return 0;
763 }
764 
765 static void
766 nvme_tcp_qpair_set_recv_state(struct nvme_tcp_qpair *tqpair,
767 			      enum nvme_tcp_pdu_recv_state state)
768 {
769 	if (tqpair->recv_state == state) {
770 		SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n",
771 			    tqpair, state);
772 		return;
773 	}
774 
775 	tqpair->recv_state = state;
776 	switch (state) {
777 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
778 	case NVME_TCP_PDU_RECV_STATE_ERROR:
779 		memset(&tqpair->recv_pdu, 0, sizeof(struct nvme_tcp_pdu));
780 		break;
781 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
782 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
783 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
784 	default:
785 		break;
786 	}
787 }
788 
789 static void
790 nvme_tcp_qpair_send_h2c_term_req_complete(void *cb_arg)
791 {
792 	struct nvme_tcp_qpair *tqpair = cb_arg;
793 
794 	tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
795 }
796 
797 static void
798 nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
799 				 enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset)
800 {
801 	struct nvme_tcp_pdu *rsp_pdu;
802 	struct spdk_nvme_tcp_term_req_hdr *h2c_term_req;
803 	uint32_t h2c_term_req_hdr_len = sizeof(*h2c_term_req);
804 	uint8_t copy_len;
805 
806 	rsp_pdu = &tqpair->send_pdu;
807 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
808 	h2c_term_req = &rsp_pdu->hdr.term_req;
809 	h2c_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
810 	h2c_term_req->common.hlen = h2c_term_req_hdr_len;
811 
812 	if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
813 	    (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
814 		DSET32(&h2c_term_req->fei, error_offset);
815 	}
816 
817 	rsp_pdu->data = (uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len;
818 
819 	copy_len = pdu->hdr.common.hlen;
820 	if (copy_len > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE) {
821 		copy_len = SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE;
822 	}
823 
824 	/* Copy the error info into the buffer */
825 	memcpy((uint8_t *)rsp_pdu->data, pdu->hdr.raw, copy_len);
826 	rsp_pdu->data_len = copy_len;
827 
828 	/* Contain the header len of the wrong received pdu */
829 	h2c_term_req->common.plen = h2c_term_req->common.hlen + copy_len;
830 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
831 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_send_h2c_term_req_complete, NULL);
832 
833 }
834 
835 static void
836 nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
837 {
838 	struct nvme_tcp_pdu *pdu;
839 	uint32_t error_offset = 0;
840 	enum spdk_nvme_tcp_term_req_fes fes;
841 	uint32_t expected_hlen, hd_len = 0;
842 	bool plen_error = false;
843 
844 	pdu = &tqpair->recv_pdu;
845 
846 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "pdu type = %d\n", pdu->hdr.common.pdu_type);
847 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP) {
848 		if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
849 			SPDK_ERRLOG("Already received IC_RESP PDU, and we should reject this pdu=%p\n", pdu);
850 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
851 			goto err;
852 		}
853 		expected_hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
854 		if (pdu->hdr.common.plen != expected_hlen) {
855 			plen_error = true;
856 		}
857 	} else {
858 		if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
859 			SPDK_ERRLOG("The TCP/IP tqpair connection is not negotitated\n");
860 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
861 			goto err;
862 		}
863 
864 		switch (pdu->hdr.common.pdu_type) {
865 		case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
866 			expected_hlen = sizeof(struct spdk_nvme_tcp_rsp);
867 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
868 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
869 			}
870 
871 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
872 				plen_error = true;
873 			}
874 			break;
875 		case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
876 			expected_hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
877 			if (pdu->hdr.common.plen < pdu->hdr.common.pdo) {
878 				plen_error = true;
879 			}
880 			break;
881 		case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
882 			expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
883 			if ((pdu->hdr.common.plen <= expected_hlen) ||
884 			    (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
885 				plen_error = true;
886 			}
887 			break;
888 		case SPDK_NVME_TCP_PDU_TYPE_R2T:
889 			expected_hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
890 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
891 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
892 			}
893 
894 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
895 				plen_error = true;
896 			}
897 			break;
898 
899 		default:
900 			SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
901 			fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
902 			error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
903 			goto err;
904 		}
905 	}
906 
907 	if (pdu->hdr.common.hlen != expected_hlen) {
908 		SPDK_ERRLOG("Expected PDU header length %u, got %u\n",
909 			    expected_hlen, pdu->hdr.common.hlen);
910 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
911 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen);
912 		goto err;
913 
914 	} else if (plen_error) {
915 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
916 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen);
917 		goto err;
918 	} else {
919 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
920 		return;
921 	}
922 err:
923 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
924 }
925 
926 static struct nvme_tcp_req *
927 get_nvme_active_req_by_cid(struct nvme_tcp_qpair *tqpair, uint32_t cid)
928 {
929 	assert(tqpair != NULL);
930 	if ((cid >= tqpair->num_entries) || (tqpair->tcp_reqs[cid].state == NVME_TCP_REQ_FREE)) {
931 		return NULL;
932 	}
933 
934 	return &tqpair->tcp_reqs[cid];
935 }
936 
937 static void
938 nvme_tcp_req_complete(struct nvme_request *req,
939 		      struct spdk_nvme_cpl *rsp)
940 {
941 	nvme_complete_request(req, rsp);
942 	nvme_free_request(req);
943 }
944 
945 static void
946 nvme_tcp_free_and_handle_queued_req(struct spdk_nvme_qpair *qpair)
947 {
948 	struct nvme_request *req;
949 
950 	if (!STAILQ_EMPTY(&qpair->queued_req) && !qpair->ctrlr->is_resetting) {
951 		req = STAILQ_FIRST(&qpair->queued_req);
952 		STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
953 		nvme_qpair_submit_request(qpair, req);
954 	}
955 }
956 
957 static void
958 nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
959 				 struct nvme_tcp_pdu *pdu, uint32_t *reaped)
960 {
961 	struct nvme_tcp_req *tcp_req;
962 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
963 	struct spdk_nvme_cpl cpl = {};
964 	uint8_t flags;
965 
966 	tcp_req = pdu->tcp_req;
967 	assert(tcp_req != NULL);
968 
969 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
970 	c2h_data = &pdu->hdr.c2h_data;
971 	tcp_req->datao += pdu->data_len;
972 	flags = c2h_data->common.flags;
973 
974 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
975 	if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) {
976 		if (tcp_req->datao == tcp_req->req->payload_size) {
977 			cpl.status.p = 0;
978 		} else {
979 			cpl.status.p = 1;
980 		}
981 
982 		cpl.cid = tcp_req->cid;
983 		cpl.sqid = tqpair->qpair.id;
984 		nvme_tcp_req_complete(tcp_req->req, &cpl);
985 		nvme_tcp_req_put(tqpair, tcp_req);
986 		(*reaped)++;
987 		nvme_tcp_free_and_handle_queued_req(&tqpair->qpair);
988 	}
989 }
990 
991 static const char *spdk_nvme_tcp_term_req_fes_str[] = {
992 	"Invalid PDU Header Field",
993 	"PDU Sequence Error",
994 	"Header Digest Error",
995 	"Data Transfer Out of Range",
996 	"Data Transfer Limit Exceeded",
997 	"Unsupported parameter",
998 };
999 
1000 static void
1001 nvme_tcp_c2h_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *c2h_term_req)
1002 {
1003 	SPDK_ERRLOG("Error info of pdu(%p): %s\n", c2h_term_req,
1004 		    spdk_nvme_tcp_term_req_fes_str[c2h_term_req->fes]);
1005 	if ((c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
1006 	    (c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
1007 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "The offset from the start of the PDU header is %u\n",
1008 			      DGET32(c2h_term_req->fei));
1009 	}
1010 	/* we may also need to dump some other info here */
1011 }
1012 
1013 static void
1014 nvme_tcp_c2h_term_req_payload_handle(struct nvme_tcp_qpair *tqpair,
1015 				     struct nvme_tcp_pdu *pdu)
1016 {
1017 	nvme_tcp_c2h_term_req_dump(&pdu->hdr.term_req);
1018 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1019 }
1020 
1021 static void
1022 nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
1023 			    uint32_t *reaped)
1024 {
1025 	int rc = 0;
1026 	struct nvme_tcp_pdu *pdu;
1027 	uint32_t crc32c, error_offset = 0;
1028 	enum spdk_nvme_tcp_term_req_fes fes;
1029 
1030 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1031 	pdu = &tqpair->recv_pdu;
1032 
1033 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1034 
1035 	/* check data digest if need */
1036 	if (pdu->ddigest_valid_bytes) {
1037 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
1038 		rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
1039 		if (rc == 0) {
1040 			SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1041 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1042 			nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1043 			return;
1044 		}
1045 	}
1046 
1047 	switch (pdu->hdr.common.pdu_type) {
1048 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1049 		nvme_tcp_c2h_data_payload_handle(tqpair, pdu, reaped);
1050 		break;
1051 
1052 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1053 		nvme_tcp_c2h_term_req_payload_handle(tqpair, pdu);
1054 		break;
1055 
1056 	default:
1057 		/* The code should not go to here */
1058 		SPDK_ERRLOG("The code should not go to here\n");
1059 		break;
1060 	}
1061 }
1062 
1063 static void
1064 nvme_tcp_send_icreq_complete(void *cb_arg)
1065 {
1066 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Complete the icreq send for tqpair=%p\n",
1067 		      (struct nvme_tcp_qpair *)cb_arg);
1068 }
1069 
1070 static void
1071 nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair,
1072 		       struct nvme_tcp_pdu *pdu)
1073 {
1074 	struct spdk_nvme_tcp_ic_resp *ic_resp = &pdu->hdr.ic_resp;
1075 	uint32_t error_offset = 0;
1076 	enum spdk_nvme_tcp_term_req_fes fes;
1077 
1078 	/* Only PFV 0 is defined currently */
1079 	if (ic_resp->pfv != 0) {
1080 		SPDK_ERRLOG("Expected ICResp PFV %u, got %u\n", 0u, ic_resp->pfv);
1081 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1082 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, pfv);
1083 		goto end;
1084 	}
1085 
1086 	if (ic_resp->maxh2cdata < NVME_TCP_PDU_H2C_MIN_DATA_SIZE) {
1087 		SPDK_ERRLOG("Expected ICResp maxh2cdata >=%u, got %u\n", NVME_TCP_PDU_H2C_MIN_DATA_SIZE,
1088 			    ic_resp->maxh2cdata);
1089 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1090 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, maxh2cdata);
1091 		goto end;
1092 	}
1093 	tqpair->maxh2cdata = ic_resp->maxh2cdata;
1094 
1095 	if (ic_resp->cpda > SPDK_NVME_TCP_CPDA_MAX) {
1096 		SPDK_ERRLOG("Expected ICResp cpda <=%u, got %u\n", SPDK_NVME_TCP_CPDA_MAX, ic_resp->cpda);
1097 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1098 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, cpda);
1099 		goto end;
1100 	}
1101 	tqpair->cpda = ic_resp->cpda;
1102 
1103 	tqpair->host_hdgst_enable = ic_resp->dgst.bits.hdgst_enable ? true : false;
1104 	tqpair->host_ddgst_enable = ic_resp->dgst.bits.ddgst_enable ? true : false;
1105 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable);
1106 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable);
1107 
1108 	tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
1109 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1110 	return;
1111 end:
1112 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1113 	return;
1114 }
1115 
1116 static void
1117 nvme_tcp_capsule_resp_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1118 				 uint32_t *reaped)
1119 {
1120 	struct nvme_tcp_req *tcp_req;
1121 	struct spdk_nvme_tcp_rsp *capsule_resp = &pdu->hdr.capsule_resp;
1122 	uint32_t cid, error_offset = 0;
1123 	enum spdk_nvme_tcp_term_req_fes fes;
1124 	struct spdk_nvme_cpl cpl;
1125 
1126 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1127 	cpl = capsule_resp->rccqe;
1128 	cid = cpl.cid;
1129 
1130 	/* Recv the pdu again */
1131 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1132 
1133 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1134 	if (!tcp_req) {
1135 		SPDK_ERRLOG("no tcp_req is found with cid=%u for tqpair=%p\n", cid, tqpair);
1136 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1137 		error_offset = offsetof(struct spdk_nvme_tcp_rsp, rccqe);
1138 		goto end;
1139 
1140 	}
1141 
1142 	assert(tcp_req->req != NULL);
1143 	assert(tcp_req->state == NVME_TCP_REQ_ACTIVE);
1144 	nvme_tcp_req_complete(tcp_req->req, &cpl);
1145 	nvme_tcp_req_put(tqpair, tcp_req);
1146 	(*reaped)++;
1147 	nvme_tcp_free_and_handle_queued_req(&tqpair->qpair);
1148 
1149 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "complete tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);
1150 
1151 	return;
1152 
1153 end:
1154 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1155 	return;
1156 }
1157 
1158 static void
1159 nvme_tcp_c2h_term_req_hdr_handle(struct nvme_tcp_qpair *tqpair,
1160 				 struct nvme_tcp_pdu *pdu)
1161 {
1162 	struct spdk_nvme_tcp_term_req_hdr *c2h_term_req = &pdu->hdr.term_req;
1163 	uint32_t error_offset = 0;
1164 	enum spdk_nvme_tcp_term_req_fes fes;
1165 
1166 
1167 	if (c2h_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) {
1168 		SPDK_ERRLOG("Fatal Error Stauts(FES) is unknown for c2h_term_req pdu=%p\n", pdu);
1169 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1170 		error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes);
1171 		goto end;
1172 	}
1173 
1174 	/* set the data buffer */
1175 	pdu->data = (uint8_t *)pdu->hdr.raw + c2h_term_req->common.hlen;
1176 	pdu->data_len = c2h_term_req->common.plen - c2h_term_req->common.hlen;
1177 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1178 	return;
1179 end:
1180 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1181 	return;
1182 }
1183 
1184 static void
1185 nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1186 {
1187 	struct nvme_tcp_req *tcp_req;
1188 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data = &pdu->hdr.c2h_data;
1189 	uint32_t error_offset = 0;
1190 	enum spdk_nvme_tcp_term_req_fes fes;
1191 
1192 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1193 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "c2h_data info on tqpair(%p): datao=%u, datal=%u, cccid=%d\n",
1194 		      tqpair, c2h_data->datao, c2h_data->datal, c2h_data->cccid);
1195 	tcp_req = get_nvme_active_req_by_cid(tqpair, c2h_data->cccid);
1196 	if (!tcp_req) {
1197 		SPDK_ERRLOG("no tcp_req found for c2hdata cid=%d\n", c2h_data->cccid);
1198 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1199 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, cccid);
1200 		goto end;
1201 
1202 	}
1203 
1204 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "tcp_req(%p) on tqpair(%p): datao=%u, payload_size=%u\n",
1205 		      tcp_req, tqpair, tcp_req->datao, tcp_req->req->payload_size);
1206 
1207 	if (c2h_data->datal > tcp_req->req->payload_size) {
1208 		SPDK_ERRLOG("Invalid datal for tcp_req(%p), datal(%u) exceeds payload_size(%u)\n",
1209 			    tcp_req, c2h_data->datal, tcp_req->req->payload_size);
1210 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1211 		goto end;
1212 	}
1213 
1214 	if (tcp_req->datao != c2h_data->datao) {
1215 		SPDK_ERRLOG("Invalid datao for tcp_req(%p), received datal(%u) != datao(%u) in tcp_req\n",
1216 			    tcp_req, c2h_data->datao, tcp_req->datao);
1217 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1218 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datao);
1219 		goto end;
1220 	}
1221 
1222 	if ((c2h_data->datao + c2h_data->datal) > tcp_req->req->payload_size) {
1223 		SPDK_ERRLOG("Invalid data range for tcp_req(%p), received (datao(%u) + datal(%u)) > datao(%u) in tcp_req\n",
1224 			    tcp_req, c2h_data->datao, c2h_data->datal, tcp_req->req->payload_size);
1225 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1226 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datal);
1227 		goto end;
1228 
1229 	}
1230 
1231 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req);
1232 	pdu->data_len = c2h_data->datal;
1233 	pdu->tcp_req = tcp_req;
1234 
1235 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1236 	return;
1237 
1238 end:
1239 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1240 	return;
1241 }
1242 
1243 static void
1244 nvme_tcp_qpair_h2c_data_send_complete(void *cb_arg)
1245 {
1246 	struct nvme_tcp_req *tcp_req = cb_arg;
1247 
1248 	assert(tcp_req != NULL);
1249 
1250 	if (tcp_req->r2tl_remain) {
1251 		spdk_nvme_tcp_send_h2c_data(tcp_req);
1252 	}
1253 }
1254 
1255 static void
1256 spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req)
1257 {
1258 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(tcp_req->req->qpair);
1259 	struct nvme_tcp_pdu *rsp_pdu;
1260 	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
1261 	uint32_t plen, pdo, alignment;
1262 
1263 	rsp_pdu = &tcp_req->send_pdu;
1264 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
1265 	nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req);
1266 	h2c_data = &rsp_pdu->hdr.h2c_data;
1267 
1268 	h2c_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
1269 	plen = h2c_data->common.hlen = sizeof(*h2c_data);
1270 	h2c_data->cccid = tcp_req->cid;
1271 	h2c_data->ttag = tcp_req->ttag;
1272 	h2c_data->datao = tcp_req->datao;
1273 
1274 	h2c_data->datal = spdk_min(tcp_req->r2tl_remain, tqpair->maxh2cdata);
1275 	rsp_pdu->data_len = h2c_data->datal;
1276 	tcp_req->r2tl_remain -= h2c_data->datal;
1277 
1278 	if (tqpair->host_hdgst_enable) {
1279 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1280 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1281 	}
1282 
1283 	rsp_pdu->padding_len = 0;
1284 	pdo = plen;
1285 	if (tqpair->cpda) {
1286 		alignment = (tqpair->cpda + 1) << 2;
1287 		if (alignment > plen) {
1288 			rsp_pdu->padding_len = alignment - plen;
1289 			pdo = plen = alignment;
1290 		}
1291 	}
1292 
1293 	h2c_data->common.pdo = pdo;
1294 	plen += h2c_data->datal;
1295 	if (tqpair->host_ddgst_enable) {
1296 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
1297 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1298 	}
1299 
1300 	h2c_data->common.plen = plen;
1301 	tcp_req->datao += h2c_data->datal;
1302 	if (!tcp_req->r2tl_remain) {
1303 		tqpair->pending_r2t--;
1304 		assert(tqpair->pending_r2t >= 0);
1305 		tcp_req->state = NVME_TCP_REQ_ACTIVE;
1306 		h2c_data->common.flags |= SPDK_NVME_TCP_H2C_DATA_FLAGS_LAST_PDU;
1307 	}
1308 
1309 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "h2c_data info: datao=%u, datal=%u, pdu_len=%u for tqpair=%p\n",
1310 		      h2c_data->datao, h2c_data->datal, h2c_data->common.plen, tqpair);
1311 
1312 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_h2c_data_send_complete, tcp_req);
1313 }
1314 
1315 static void
1316 nvme_tcp_r2t_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1317 {
1318 	struct nvme_tcp_req *tcp_req;
1319 	struct spdk_nvme_tcp_r2t_hdr *r2t = &pdu->hdr.r2t;
1320 	uint32_t cid, error_offset = 0;
1321 	enum spdk_nvme_tcp_term_req_fes fes;
1322 
1323 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1324 	cid = r2t->cccid;
1325 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1326 	if (!tcp_req) {
1327 		SPDK_ERRLOG("Cannot find tcp_req for tqpair=%p\n", tqpair);
1328 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1329 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, cccid);
1330 		goto end;
1331 	}
1332 
1333 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "r2t info: r2to=%u, r2tl=%u for tqpair=%p\n", r2t->r2to, r2t->r2tl,
1334 		      tqpair);
1335 
1336 	if (tcp_req->state != NVME_TCP_REQ_ACTIVE_R2T) {
1337 		if (tqpair->pending_r2t >= tqpair->max_r2t) {
1338 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
1339 			SPDK_ERRLOG("Invalid R2T: it exceeds the R2T maixmal=%u for tqpair=%p\n", tqpair->max_r2t, tqpair);
1340 			goto end;
1341 		}
1342 		tcp_req->state = NVME_TCP_REQ_ACTIVE_R2T;
1343 		tqpair->pending_r2t++;
1344 	}
1345 
1346 	if (tcp_req->datao != r2t->r2to) {
1347 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1348 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2to);
1349 		goto end;
1350 
1351 	}
1352 
1353 	if ((r2t->r2tl + r2t->r2to) > tcp_req->req->payload_size) {
1354 		SPDK_ERRLOG("Invalid R2T info for tcp_req=%p: (r2to(%u) + r2tl(%u)) exceeds payload_size(%u)\n",
1355 			    tcp_req, r2t->r2to, r2t->r2tl, tqpair->maxh2cdata);
1356 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1357 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2tl);
1358 		goto end;
1359 
1360 	}
1361 
1362 	tcp_req->ttag = r2t->ttag;
1363 	tcp_req->r2tl_remain = r2t->r2tl;
1364 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1365 
1366 	spdk_nvme_tcp_send_h2c_data(tcp_req);
1367 	return;
1368 
1369 end:
1370 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1371 	return;
1372 
1373 }
1374 
1375 static void
1376 nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1377 {
1378 	struct nvme_tcp_pdu *pdu;
1379 	int rc;
1380 	uint32_t crc32c, error_offset = 0;
1381 	enum spdk_nvme_tcp_term_req_fes fes;
1382 
1383 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1384 	pdu = &tqpair->recv_pdu;
1385 
1386 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter: pdu type =%u\n", pdu->hdr.common.pdu_type);
1387 	/* check header digest if needed */
1388 	if (pdu->has_hdgst) {
1389 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
1390 		rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
1391 		if (rc == 0) {
1392 			SPDK_ERRLOG("header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1393 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1394 			nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1395 			return;
1396 
1397 		}
1398 	}
1399 
1400 	switch (pdu->hdr.common.pdu_type) {
1401 	case SPDK_NVME_TCP_PDU_TYPE_IC_RESP:
1402 		nvme_tcp_icresp_handle(tqpair, pdu);
1403 		break;
1404 	case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
1405 		nvme_tcp_capsule_resp_hdr_handle(tqpair, pdu, reaped);
1406 		break;
1407 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1408 		nvme_tcp_c2h_data_hdr_handle(tqpair, pdu);
1409 		break;
1410 
1411 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1412 		nvme_tcp_c2h_term_req_hdr_handle(tqpair, pdu);
1413 		break;
1414 	case SPDK_NVME_TCP_PDU_TYPE_R2T:
1415 		nvme_tcp_r2t_hdr_handle(tqpair, pdu);
1416 		break;
1417 
1418 	default:
1419 		SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
1420 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1421 		error_offset = 1;
1422 		nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1423 		break;
1424 	}
1425 
1426 }
1427 
1428 static int
1429 nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1430 {
1431 	int rc = 0;
1432 	struct nvme_tcp_pdu *pdu;
1433 	uint32_t data_len;
1434 	uint8_t psh_len, pdo;
1435 	int8_t padding_len;
1436 	enum nvme_tcp_pdu_recv_state prev_state;
1437 
1438 	/* The loop here is to allow for several back-to-back state changes. */
1439 	do {
1440 		prev_state = tqpair->recv_state;
1441 		switch (tqpair->recv_state) {
1442 		/* If in a new state */
1443 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
1444 			nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1445 			break;
1446 		/* common header */
1447 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
1448 			pdu = &tqpair->recv_pdu;
1449 			if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
1450 				rc = nvme_tcp_read_data(tqpair->sock,
1451 							sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
1452 							(uint8_t *)&pdu->hdr.common + pdu->ch_valid_bytes);
1453 				if (rc < 0) {
1454 					nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1455 					break;
1456 				}
1457 				pdu->ch_valid_bytes += rc;
1458 				if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
1459 					return NVME_TCP_PDU_IN_PROGRESS;
1460 				}
1461 			}
1462 
1463 			/* The command header of this PDU has now been read from the socket. */
1464 			nvme_tcp_pdu_ch_handle(tqpair);
1465 			break;
1466 		/* Wait for the pdu specific header  */
1467 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
1468 			pdu = &tqpair->recv_pdu;
1469 			psh_len = pdu->hdr.common.hlen;
1470 
1471 			/* The following pdus can have digest  */
1472 			if (((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP) ||
1473 			     (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) ||
1474 			     (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_R2T)) &&
1475 			    tqpair->host_hdgst_enable) {
1476 				pdu->has_hdgst = true;
1477 				psh_len += SPDK_NVME_TCP_DIGEST_LEN;
1478 				if (pdu->hdr.common.plen > psh_len) {
1479 					pdo = pdu->hdr.common.pdo;
1480 					padding_len = pdo - psh_len;
1481 					SPDK_DEBUGLOG(SPDK_LOG_NVME, "padding length is =%d for pdu=%p on tqpair=%p\n", padding_len,
1482 						      pdu, tqpair);
1483 					if (padding_len > 0) {
1484 						psh_len = pdo;
1485 					}
1486 				}
1487 			}
1488 
1489 			psh_len -= sizeof(struct spdk_nvme_tcp_common_pdu_hdr);
1490 			/* The following will read psh + hdgest (if possbile) + padding (if posssible) */
1491 			if (pdu->psh_valid_bytes < psh_len) {
1492 				rc = nvme_tcp_read_data(tqpair->sock,
1493 							psh_len - pdu->psh_valid_bytes,
1494 							(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
1495 				if (rc < 0) {
1496 					nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1497 					break;
1498 				}
1499 
1500 				pdu->psh_valid_bytes += rc;
1501 				if (pdu->psh_valid_bytes < psh_len) {
1502 					return NVME_TCP_PDU_IN_PROGRESS;
1503 				}
1504 			}
1505 
1506 			/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
1507 			nvme_tcp_pdu_psh_handle(tqpair, reaped);
1508 			break;
1509 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
1510 			pdu = &tqpair->recv_pdu;
1511 			/* check whether the data is valid, if not we just return */
1512 			if (!pdu->data) {
1513 				return NVME_TCP_PDU_IN_PROGRESS;
1514 			}
1515 
1516 			data_len = pdu->data_len;
1517 			/* data len */
1518 			if (pdu->data_valid_bytes < data_len) {
1519 				rc = nvme_tcp_read_data(tqpair->sock,
1520 							data_len - pdu->data_valid_bytes,
1521 							(uint8_t *)pdu->data + pdu->data_valid_bytes);
1522 				if (rc < 0) {
1523 					nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1524 					break;
1525 				}
1526 
1527 				pdu->data_valid_bytes += rc;
1528 				if (pdu->data_valid_bytes < data_len) {
1529 					return NVME_TCP_PDU_IN_PROGRESS;
1530 				}
1531 			}
1532 
1533 			/* data digest */
1534 			if ((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
1535 			    tqpair->host_ddgst_enable && (pdu->ddigest_valid_bytes < SPDK_NVME_TCP_DIGEST_LEN)) {
1536 				rc = nvme_tcp_read_data(tqpair->sock,
1537 							SPDK_NVME_TCP_DIGEST_LEN - pdu->ddigest_valid_bytes,
1538 							pdu->data_digest + pdu->ddigest_valid_bytes);
1539 				if (rc < 0) {
1540 					nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1541 					break;
1542 				}
1543 
1544 				pdu->ddigest_valid_bytes += rc;
1545 				if (pdu->ddigest_valid_bytes < SPDK_NVME_TCP_DIGEST_LEN) {
1546 					return NVME_TCP_PDU_IN_PROGRESS;
1547 				}
1548 			}
1549 
1550 			/* All of this PDU has now been read from the socket. */
1551 			nvme_tcp_pdu_payload_handle(tqpair, reaped);
1552 			break;
1553 		case NVME_TCP_PDU_RECV_STATE_ERROR:
1554 			rc = NVME_TCP_PDU_FATAL;
1555 			break;
1556 		default:
1557 			assert(0);
1558 			break;
1559 		}
1560 	} while (prev_state != tqpair->recv_state);
1561 
1562 	return rc;
1563 }
1564 
1565 int
1566 nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
1567 {
1568 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
1569 	uint32_t reaped;
1570 	int rc;
1571 
1572 	rc = nvme_tcp_qpair_process_send_queue(tqpair);
1573 	if (rc) {
1574 		return 0;
1575 	}
1576 
1577 	if (max_completions == 0) {
1578 		max_completions = tqpair->num_entries;
1579 	} else {
1580 		max_completions = spdk_min(max_completions, tqpair->num_entries);
1581 	}
1582 
1583 	reaped = 0;
1584 	do {
1585 		rc = nvme_tcp_read_pdu(tqpair, &reaped);
1586 		if (rc < 0) {
1587 			SPDK_ERRLOG("Error polling CQ! (%d): %s\n",
1588 				    errno, spdk_strerror(errno));
1589 			return -1;
1590 		} else if (rc == 0) {
1591 			/* Partial PDU is read */
1592 			break;
1593 		}
1594 
1595 	} while (reaped < max_completions);
1596 
1597 	return reaped;
1598 }
1599 
1600 static int
1601 nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
1602 {
1603 	struct spdk_nvme_tcp_ic_req *ic_req;
1604 	struct nvme_tcp_pdu *pdu;
1605 
1606 	pdu = &tqpair->send_pdu;
1607 	memset(&tqpair->send_pdu, 0, sizeof(tqpair->send_pdu));
1608 	ic_req = &pdu->hdr.ic_req;
1609 
1610 	ic_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
1611 	ic_req->common.hlen = ic_req->common.plen = sizeof(*ic_req);
1612 	ic_req->pfv = 0;
1613 	ic_req->maxr2t = NVME_TCP_MAX_R2T_DEFAULT - 1;
1614 	ic_req->hpda = NVME_TCP_HPDA_DEFAULT;
1615 
1616 	ic_req->dgst.bits.hdgst_enable = tqpair->qpair.ctrlr->opts.header_digest;
1617 	ic_req->dgst.bits.ddgst_enable = tqpair->qpair.ctrlr->opts.data_digest;
1618 
1619 	nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_send_icreq_complete, tqpair);
1620 
1621 	while (tqpair->state == NVME_TCP_QPAIR_STATE_INVALID) {
1622 		nvme_tcp_qpair_process_completions(&tqpair->qpair, 0);
1623 	}
1624 
1625 	if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
1626 		SPDK_ERRLOG("Failed to construct the tqpair=%p via correct icresp\n", tqpair);
1627 		return -1;
1628 	}
1629 
1630 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Succesfully construct the tqpair=%p via correct icresp\n", tqpair);
1631 
1632 	return 0;
1633 }
1634 
1635 static int
1636 nvme_tcp_qpair_connect(struct nvme_tcp_qpair *tqpair)
1637 {
1638 	struct sockaddr_storage dst_addr;
1639 	struct sockaddr_storage src_addr;
1640 	int rc;
1641 	struct spdk_nvme_ctrlr *ctrlr;
1642 	int family;
1643 
1644 	ctrlr = tqpair->qpair.ctrlr;
1645 
1646 	switch (ctrlr->trid.adrfam) {
1647 	case SPDK_NVMF_ADRFAM_IPV4:
1648 		family = AF_INET;
1649 		break;
1650 	case SPDK_NVMF_ADRFAM_IPV6:
1651 		family = AF_INET6;
1652 		break;
1653 	default:
1654 		SPDK_ERRLOG("Unhandled ADRFAM %d\n", ctrlr->trid.adrfam);
1655 		return -1;
1656 	}
1657 
1658 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "adrfam %d ai_family %d\n", ctrlr->trid.adrfam, family);
1659 
1660 	memset(&dst_addr, 0, sizeof(dst_addr));
1661 
1662 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "trsvcid is %s\n", ctrlr->trid.trsvcid);
1663 	rc = nvme_tcp_parse_addr(&dst_addr, family, ctrlr->trid.traddr, ctrlr->trid.trsvcid);
1664 	if (rc != 0) {
1665 		SPDK_ERRLOG("dst_addr nvme_tcp_parse_addr() failed\n");
1666 		return -1;
1667 	}
1668 
1669 	if (ctrlr->opts.src_addr[0] || ctrlr->opts.src_svcid[0]) {
1670 		memset(&src_addr, 0, sizeof(src_addr));
1671 		rc = nvme_tcp_parse_addr(&src_addr, family, ctrlr->opts.src_addr, ctrlr->opts.src_svcid);
1672 		if (rc != 0) {
1673 			SPDK_ERRLOG("src_addr nvme_tcp_parse_addr() failed\n");
1674 			return -1;
1675 		}
1676 	}
1677 
1678 	tqpair->sock = spdk_sock_connect(ctrlr->trid.traddr, atoi(ctrlr->trid.trsvcid));
1679 	if (!tqpair->sock) {
1680 		SPDK_ERRLOG("sock connection error of tqpair=%p with addr=%s, port=%d\n",
1681 			    tqpair, ctrlr->trid.traddr, atoi(ctrlr->trid.trsvcid));
1682 		return -1;
1683 	}
1684 
1685 	tqpair->max_r2t = NVME_TCP_MAX_R2T_DEFAULT;
1686 	rc = nvme_tcp_alloc_reqs(tqpair);
1687 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "rc =%d\n", rc);
1688 	if (rc) {
1689 		SPDK_ERRLOG("Unable to allocate tqpair tcp requests\n");
1690 		return -1;
1691 	}
1692 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "TCP requests allocated\n");
1693 
1694 	rc = nvme_tcp_qpair_icreq_send(tqpair);
1695 	if (rc != 0) {
1696 		SPDK_ERRLOG("Unable to connect the tqpair\n");
1697 		return -1;
1698 	}
1699 
1700 	rc = nvme_fabric_qpair_connect(&tqpair->qpair, tqpair->num_entries);
1701 	if (rc < 0) {
1702 		SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
1703 		return -1;
1704 	}
1705 
1706 	return 0;
1707 }
1708 
1709 static struct spdk_nvme_qpair *
1710 nvme_tcp_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
1711 			    uint16_t qid, uint32_t qsize,
1712 			    enum spdk_nvme_qprio qprio,
1713 			    uint32_t num_requests)
1714 {
1715 	struct nvme_tcp_qpair *tqpair;
1716 	struct spdk_nvme_qpair *qpair;
1717 	int rc;
1718 
1719 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
1720 	if (!tqpair) {
1721 		SPDK_ERRLOG("failed to get create tqpair\n");
1722 		return NULL;
1723 	}
1724 
1725 	tqpair->num_entries = qsize;
1726 	qpair = &tqpair->qpair;
1727 
1728 	rc = nvme_qpair_init(qpair, qid, ctrlr, qprio, num_requests);
1729 	if (rc != 0) {
1730 		free(tqpair);
1731 		return NULL;
1732 	}
1733 
1734 	rc = nvme_tcp_qpair_connect(tqpair);
1735 	if (rc < 0) {
1736 		nvme_tcp_qpair_destroy(qpair);
1737 		return NULL;
1738 	}
1739 
1740 	return qpair;
1741 }
1742 
1743 struct spdk_nvme_qpair *
1744 nvme_tcp_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
1745 			       const struct spdk_nvme_io_qpair_opts *opts)
1746 {
1747 	return nvme_tcp_ctrlr_create_qpair(ctrlr, qid, opts->io_queue_size, opts->qprio,
1748 					   opts->io_queue_requests);
1749 }
1750 
1751 struct spdk_nvme_ctrlr *nvme_tcp_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1752 		const struct spdk_nvme_ctrlr_opts *opts,
1753 		void *devhandle)
1754 {
1755 	struct nvme_tcp_ctrlr *tctrlr;
1756 	union spdk_nvme_cap_register cap;
1757 	union spdk_nvme_vs_register vs;
1758 	int rc;
1759 
1760 	tctrlr = calloc(1, sizeof(*tctrlr));
1761 	if (tctrlr == NULL) {
1762 		SPDK_ERRLOG("could not allocate ctrlr\n");
1763 		return NULL;
1764 	}
1765 
1766 	tctrlr->ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
1767 	tctrlr->ctrlr.opts = *opts;
1768 	tctrlr->ctrlr.trid = *trid;
1769 
1770 	rc = nvme_ctrlr_construct(&tctrlr->ctrlr);
1771 	if (rc != 0) {
1772 		free(tctrlr);
1773 		return NULL;
1774 	}
1775 
1776 	tctrlr->ctrlr.adminq = nvme_tcp_ctrlr_create_qpair(&tctrlr->ctrlr, 0,
1777 			       SPDK_NVMF_MIN_ADMIN_QUEUE_ENTRIES, 0, SPDK_NVMF_MIN_ADMIN_QUEUE_ENTRIES);
1778 	if (!tctrlr->ctrlr.adminq) {
1779 		SPDK_ERRLOG("failed to create admin qpair\n");
1780 		nvme_tcp_ctrlr_destruct(&tctrlr->ctrlr);
1781 		return NULL;
1782 	}
1783 
1784 	if (nvme_ctrlr_get_cap(&tctrlr->ctrlr, &cap)) {
1785 		SPDK_ERRLOG("get_cap() failed\n");
1786 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
1787 		return NULL;
1788 	}
1789 
1790 	if (nvme_ctrlr_get_vs(&tctrlr->ctrlr, &vs)) {
1791 		SPDK_ERRLOG("get_vs() failed\n");
1792 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
1793 		return NULL;
1794 	}
1795 
1796 	nvme_ctrlr_init_cap(&tctrlr->ctrlr, &cap, &vs);
1797 
1798 	return &tctrlr->ctrlr;
1799 }
1800 
1801 uint32_t
1802 nvme_tcp_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
1803 {
1804 	return NVME_TCP_RW_BUFFER_SIZE;
1805 }
1806 
1807 uint16_t
1808 nvme_tcp_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
1809 {
1810 	/*
1811 	 * We do not support >1 SGE in the initiator currently,
1812 	 *  so we can only return 1 here.  Once that support is
1813 	 *  added, this should return ctrlr->cdata.nvmf_specific.msdbd
1814 	 *  instead.
1815 	 */
1816 	return 1;
1817 }
1818 
1819 void *
1820 nvme_tcp_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
1821 {
1822 	return NULL;
1823 }
1824 
1825 int
1826 nvme_tcp_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
1827 {
1828 	return 0;
1829 }
1830