xref: /spdk/lib/nvme/nvme_tcp.c (revision f93b6fb0a4ebcee203e7c44c9e170c20bbce96cc)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe/TCP transport
36  */
37 
38 #include "nvme_internal.h"
39 
40 #include "spdk/endian.h"
41 #include "spdk/likely.h"
42 #include "spdk/string.h"
43 #include "spdk/stdinc.h"
44 #include "spdk/crc32.h"
45 #include "spdk/endian.h"
46 #include "spdk/assert.h"
47 #include "spdk/string.h"
48 #include "spdk/thread.h"
49 #include "spdk/trace.h"
50 #include "spdk/util.h"
51 
52 #include "spdk_internal/nvme_tcp.h"
53 
54 #define NVME_TCP_RW_BUFFER_SIZE 131072
55 
56 #define NVME_TCP_HPDA_DEFAULT			0
57 #define NVME_TCP_MAX_R2T_DEFAULT		16
58 #define NVME_TCP_PDU_H2C_MIN_DATA_SIZE		4096
59 #define NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE	8192
60 
61 /* NVMe TCP transport extensions for spdk_nvme_ctrlr */
62 struct nvme_tcp_ctrlr {
63 	struct spdk_nvme_ctrlr			ctrlr;
64 };
65 
66 /* NVMe TCP qpair extensions for spdk_nvme_qpair */
67 struct nvme_tcp_qpair {
68 	struct spdk_nvme_qpair			qpair;
69 	struct spdk_sock			*sock;
70 
71 	TAILQ_HEAD(, nvme_tcp_req)		free_reqs;
72 	TAILQ_HEAD(, nvme_tcp_req)		outstanding_reqs;
73 
74 	TAILQ_HEAD(, nvme_tcp_pdu)		send_queue;
75 	struct nvme_tcp_pdu			recv_pdu;
76 	struct nvme_tcp_pdu			send_pdu; /* only for error pdu and init pdu */
77 	enum nvme_tcp_pdu_recv_state		recv_state;
78 
79 	struct nvme_tcp_req			*tcp_reqs;
80 
81 	uint16_t				num_entries;
82 
83 	bool					host_hdgst_enable;
84 	bool					host_ddgst_enable;
85 
86 	/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
87 	uint32_t				maxh2cdata;
88 
89 	int32_t					max_r2t;
90 	int32_t					pending_r2t;
91 
92 	/* 0 based value, which is used to guide the padding */
93 	uint8_t					cpda;
94 
95 	enum nvme_tcp_qpair_state		state;
96 };
97 
98 enum nvme_tcp_req_state {
99 	NVME_TCP_REQ_FREE,
100 	NVME_TCP_REQ_ACTIVE,
101 	NVME_TCP_REQ_ACTIVE_R2T,
102 };
103 
104 struct nvme_tcp_req {
105 	struct nvme_request			*req;
106 	enum nvme_tcp_req_state			state;
107 	uint16_t				cid;
108 	uint16_t				ttag;
109 	uint32_t				datao;
110 	uint32_t				r2tl_remain;
111 	bool					in_capsule_data;
112 	struct nvme_tcp_pdu			send_pdu;
113 	struct iovec				iov[NVME_TCP_MAX_SGL_DESCRIPTORS];
114 	uint32_t				iovcnt;
115 	TAILQ_ENTRY(nvme_tcp_req)		link;
116 };
117 
118 static void spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req);
119 
120 static inline struct nvme_tcp_qpair *
121 nvme_tcp_qpair(struct spdk_nvme_qpair *qpair)
122 {
123 	assert(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
124 	return SPDK_CONTAINEROF(qpair, struct nvme_tcp_qpair, qpair);
125 }
126 
127 static inline struct nvme_tcp_ctrlr *
128 nvme_tcp_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
129 {
130 	assert(ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_TCP);
131 	return SPDK_CONTAINEROF(ctrlr, struct nvme_tcp_ctrlr, ctrlr);
132 }
133 
134 static struct nvme_tcp_req *
135 nvme_tcp_req_get(struct nvme_tcp_qpair *tqpair)
136 {
137 	struct nvme_tcp_req *tcp_req;
138 
139 	tcp_req = TAILQ_FIRST(&tqpair->free_reqs);
140 	if (!tcp_req) {
141 		return NULL;
142 	}
143 
144 	assert(tcp_req->state == NVME_TCP_REQ_FREE);
145 	tcp_req->state = NVME_TCP_REQ_ACTIVE;
146 	TAILQ_REMOVE(&tqpair->free_reqs, tcp_req, link);
147 	tcp_req->datao = 0;
148 	tcp_req->req = NULL;
149 	tcp_req->in_capsule_data = false;
150 	tcp_req->r2tl_remain = 0;
151 	tcp_req->iovcnt = 0;
152 	memset(&tcp_req->send_pdu, 0, sizeof(tcp_req->send_pdu));
153 	TAILQ_INSERT_TAIL(&tqpair->outstanding_reqs, tcp_req, link);
154 
155 	return tcp_req;
156 }
157 
158 static void
159 nvme_tcp_req_put(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
160 {
161 	assert(tcp_req->state != NVME_TCP_REQ_FREE);
162 	tcp_req->state = NVME_TCP_REQ_FREE;
163 	TAILQ_REMOVE(&tqpair->outstanding_reqs, tcp_req, link);
164 	TAILQ_INSERT_TAIL(&tqpair->free_reqs, tcp_req, link);
165 }
166 
167 static int
168 nvme_tcp_parse_addr(struct sockaddr_storage *sa, int family, const char *addr, const char *service)
169 {
170 	struct addrinfo *res;
171 	struct addrinfo hints;
172 	int ret;
173 
174 	memset(&hints, 0, sizeof(hints));
175 	hints.ai_family = family;
176 	hints.ai_socktype = SOCK_STREAM;
177 	hints.ai_protocol = 0;
178 
179 	ret = getaddrinfo(addr, service, &hints, &res);
180 	if (ret) {
181 		SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(ret), ret);
182 		return ret;
183 	}
184 
185 	if (res->ai_addrlen > sizeof(*sa)) {
186 		SPDK_ERRLOG("getaddrinfo() ai_addrlen %zu too large\n", (size_t)res->ai_addrlen);
187 		ret = EINVAL;
188 	} else {
189 		memcpy(sa, res->ai_addr, res->ai_addrlen);
190 	}
191 
192 	freeaddrinfo(res);
193 	return ret;
194 }
195 
196 static void
197 nvme_tcp_free_reqs(struct nvme_tcp_qpair *tqpair)
198 {
199 	free(tqpair->tcp_reqs);
200 	tqpair->tcp_reqs = NULL;
201 }
202 
203 static int
204 nvme_tcp_alloc_reqs(struct nvme_tcp_qpair *tqpair)
205 {
206 	int i;
207 	struct nvme_tcp_req	*tcp_req;
208 
209 	tqpair->tcp_reqs = calloc(tqpair->num_entries, sizeof(struct nvme_tcp_req));
210 	if (tqpair->tcp_reqs == NULL) {
211 		SPDK_ERRLOG("Failed to allocate tcp_reqs\n");
212 		goto fail;
213 	}
214 
215 	TAILQ_INIT(&tqpair->send_queue);
216 	TAILQ_INIT(&tqpair->free_reqs);
217 	TAILQ_INIT(&tqpair->outstanding_reqs);
218 	for (i = 0; i < tqpair->num_entries; i++) {
219 		tcp_req = &tqpair->tcp_reqs[i];
220 		tcp_req->cid = i;
221 		TAILQ_INSERT_TAIL(&tqpair->free_reqs, tcp_req, link);
222 	}
223 
224 	return 0;
225 fail:
226 	nvme_tcp_free_reqs(tqpair);
227 	return -ENOMEM;
228 }
229 
230 static int
231 nvme_tcp_qpair_destroy(struct spdk_nvme_qpair *qpair)
232 {
233 	struct nvme_tcp_qpair *tqpair;
234 
235 	if (!qpair) {
236 		return -1;
237 	}
238 
239 	nvme_tcp_qpair_fail(qpair);
240 	nvme_qpair_deinit(qpair);
241 
242 	tqpair = nvme_tcp_qpair(qpair);
243 
244 	nvme_tcp_free_reqs(tqpair);
245 
246 	spdk_sock_close(&tqpair->sock);
247 	free(tqpair);
248 
249 	return 0;
250 }
251 
252 int
253 nvme_tcp_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
254 {
255 	return 0;
256 }
257 
258 /* This function must only be called while holding g_spdk_nvme_driver->lock */
259 int
260 nvme_tcp_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
261 		    bool direct_connect)
262 {
263 	struct spdk_nvme_ctrlr_opts discovery_opts;
264 	struct spdk_nvme_ctrlr *discovery_ctrlr;
265 	union spdk_nvme_cc_register cc;
266 	int rc;
267 	struct nvme_completion_poll_status status;
268 
269 	if (strcmp(probe_ctx->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN) != 0) {
270 		/* Not a discovery controller - connect directly. */
271 		rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
272 		return rc;
273 	}
274 
275 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&discovery_opts, sizeof(discovery_opts));
276 	/* For discovery_ctrlr set the timeout to 0 */
277 	discovery_opts.keep_alive_timeout_ms = 0;
278 
279 	discovery_ctrlr = nvme_tcp_ctrlr_construct(&probe_ctx->trid, &discovery_opts, NULL);
280 	if (discovery_ctrlr == NULL) {
281 		return -1;
282 	}
283 
284 	/* TODO: this should be using the normal NVMe controller initialization process */
285 	cc.raw = 0;
286 	cc.bits.en = 1;
287 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
288 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
289 	rc = nvme_transport_ctrlr_set_reg_4(discovery_ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
290 					    cc.raw);
291 	if (rc < 0) {
292 		SPDK_ERRLOG("Failed to set cc\n");
293 		nvme_ctrlr_destruct(discovery_ctrlr);
294 		return -1;
295 	}
296 
297 	/* Direct attach through spdk_nvme_connect() API */
298 	if (direct_connect == true) {
299 		/* get the cdata info */
300 		status.done = false;
301 		rc = nvme_ctrlr_cmd_identify(discovery_ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
302 					     &discovery_ctrlr->cdata, sizeof(discovery_ctrlr->cdata),
303 					     nvme_completion_poll_cb, &status);
304 		if (rc != 0) {
305 			SPDK_ERRLOG("Failed to identify cdata\n");
306 			return rc;
307 		}
308 
309 		if (spdk_nvme_wait_for_completion(discovery_ctrlr->adminq, &status)) {
310 			SPDK_ERRLOG("nvme_identify_controller failed!\n");
311 			return -ENXIO;
312 		}
313 		/* Set the ready state to skip the normal init process */
314 		discovery_ctrlr->state = NVME_CTRLR_STATE_READY;
315 		nvme_ctrlr_connected(probe_ctx, discovery_ctrlr);
316 		nvme_ctrlr_add_process(discovery_ctrlr, 0);
317 		return 0;
318 	}
319 
320 	rc = nvme_fabric_ctrlr_discover(discovery_ctrlr, probe_ctx);
321 	nvme_ctrlr_destruct(discovery_ctrlr);
322 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "leave\n");
323 	return rc;
324 }
325 
326 int
327 nvme_tcp_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
328 {
329 	struct nvme_tcp_ctrlr *tctrlr = nvme_tcp_ctrlr(ctrlr);
330 
331 	if (ctrlr->adminq) {
332 		nvme_tcp_qpair_destroy(ctrlr->adminq);
333 	}
334 
335 	nvme_ctrlr_destruct_finish(ctrlr);
336 
337 	free(tctrlr);
338 
339 	return 0;
340 }
341 
342 int
343 nvme_tcp_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
344 {
345 	return nvme_fabric_ctrlr_set_reg_4(ctrlr, offset, value);
346 }
347 
348 int
349 nvme_tcp_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
350 {
351 	return nvme_fabric_ctrlr_set_reg_8(ctrlr, offset, value);
352 }
353 
354 int
355 nvme_tcp_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
356 {
357 	return nvme_fabric_ctrlr_get_reg_4(ctrlr, offset, value);
358 }
359 
360 int
361 nvme_tcp_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
362 {
363 	return nvme_fabric_ctrlr_get_reg_8(ctrlr, offset, value);
364 }
365 
366 static int
367 nvme_tcp_qpair_process_send_queue(struct nvme_tcp_qpair *tqpair)
368 {
369 	const int array_size = 32;
370 	struct iovec	iovec_array[array_size];
371 	struct iovec	*iov = iovec_array;
372 	int iovec_cnt = 0;
373 	int bytes = 0;
374 	uint32_t mapped_length;
375 	struct nvme_tcp_pdu *pdu;
376 	int pdu_length;
377 	TAILQ_HEAD(, nvme_tcp_pdu) completed_pdus_list;
378 
379 	pdu = TAILQ_FIRST(&tqpair->send_queue);
380 
381 	if (pdu == NULL) {
382 		return 0;
383 	}
384 
385 	/*
386 	 * Build up a list of iovecs for the first few PDUs in the
387 	 *  tqpair 's send_queue.
388 	 */
389 	while (pdu != NULL && ((array_size - iovec_cnt) >= 3)) {
390 		iovec_cnt += nvme_tcp_build_iovecs(&iovec_array[iovec_cnt], array_size - iovec_cnt,
391 						   pdu, tqpair->host_hdgst_enable,
392 						   tqpair->host_ddgst_enable, &mapped_length);
393 		pdu = TAILQ_NEXT(pdu, tailq);
394 	}
395 
396 	bytes = spdk_sock_writev(tqpair->sock, iov, iovec_cnt);
397 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "bytes=%d are out\n", bytes);
398 	if (bytes == -1) {
399 		if (errno == EWOULDBLOCK || errno == EAGAIN) {
400 			return 1;
401 		} else {
402 			SPDK_ERRLOG("spdk_sock_writev() failed, errno %d: %s\n",
403 				    errno, spdk_strerror(errno));
404 			return -1;
405 		}
406 	}
407 
408 	pdu = TAILQ_FIRST(&tqpair->send_queue);
409 
410 	/*
411 	 * Free any PDUs that were fully written.  If a PDU was only
412 	 *  partially written, update its writev_offset so that next
413 	 *  time only the unwritten portion will be sent to writev().
414 	 */
415 	TAILQ_INIT(&completed_pdus_list);
416 	while (bytes > 0) {
417 		pdu_length = pdu->hdr.common.plen - pdu->writev_offset;
418 		assert(pdu_length > 0);
419 		if (bytes >= pdu_length) {
420 			bytes -= pdu_length;
421 			TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
422 			TAILQ_INSERT_TAIL(&completed_pdus_list, pdu, tailq);
423 			pdu = TAILQ_FIRST(&tqpair->send_queue);
424 
425 		} else {
426 			pdu->writev_offset += bytes;
427 			bytes = 0;
428 		}
429 	}
430 
431 	while (!TAILQ_EMPTY(&completed_pdus_list)) {
432 		pdu = TAILQ_FIRST(&completed_pdus_list);
433 		TAILQ_REMOVE(&completed_pdus_list, pdu, tailq);
434 		assert(pdu->cb_fn != NULL);
435 		pdu->cb_fn(pdu->cb_arg);
436 	}
437 
438 	return TAILQ_EMPTY(&tqpair->send_queue) ? 0 : 1;
439 
440 }
441 
442 static int
443 nvme_tcp_qpair_write_pdu(struct nvme_tcp_qpair *tqpair,
444 			 struct nvme_tcp_pdu *pdu,
445 			 nvme_tcp_qpair_xfer_complete_cb cb_fn,
446 			 void *cb_arg)
447 {
448 	int enable_digest;
449 	int hlen;
450 	uint32_t crc32c;
451 
452 	hlen = pdu->hdr.common.hlen;
453 	enable_digest = 1;
454 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ ||
455 	    pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) {
456 		/* this PDU should be sent without digest */
457 		enable_digest = 0;
458 	}
459 
460 	/* Header Digest */
461 	if (enable_digest && tqpair->host_hdgst_enable) {
462 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
463 		MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c);
464 	}
465 
466 	/* Data Digest */
467 	if (pdu->data_len > 0 && enable_digest && tqpair->host_ddgst_enable) {
468 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
469 		MAKE_DIGEST_WORD(pdu->data_digest, crc32c);
470 	}
471 
472 	pdu->cb_fn = cb_fn;
473 	pdu->cb_arg = cb_arg;
474 	TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
475 	return 0;
476 }
477 
478 /*
479  * Build SGL describing contiguous payload buffer.
480  */
481 static int
482 nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
483 {
484 	struct nvme_request *req = tcp_req->req;
485 
486 	tcp_req->iov[0].iov_base = req->payload.contig_or_cb_arg + req->payload_offset;
487 	tcp_req->iov[0].iov_len = req->payload_size;
488 	tcp_req->iovcnt = 1;
489 
490 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
491 
492 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
493 
494 	return 0;
495 }
496 
497 /*
498  * Build SGL describing scattered payload buffer.
499  */
500 static int
501 nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
502 {
503 	int rc, iovcnt;
504 	uint32_t length;
505 	uint64_t remaining_size;
506 	struct nvme_request *req = tcp_req->req;
507 
508 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
509 
510 	assert(req->payload_size != 0);
511 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
512 	assert(req->payload.reset_sgl_fn != NULL);
513 	assert(req->payload.next_sge_fn != NULL);
514 	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
515 
516 	remaining_size = req->payload_size;
517 	iovcnt = 0;
518 
519 	do {
520 		rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &tcp_req->iov[iovcnt].iov_base,
521 					      &length);
522 		if (rc) {
523 			return -1;
524 		}
525 
526 		tcp_req->iov[iovcnt].iov_len = length;
527 		remaining_size -= length;
528 		iovcnt++;
529 	} while (remaining_size > 0 && iovcnt < NVME_TCP_MAX_SGL_DESCRIPTORS);
530 
531 
532 	/* Should be impossible if we did our sgl checks properly up the stack, but do a sanity check here. */
533 	if (remaining_size > 0) {
534 		return -1;
535 	}
536 
537 	tcp_req->iovcnt = iovcnt;
538 
539 	return 0;
540 }
541 
542 static inline uint32_t
543 nvme_tcp_icdsz_bytes(struct spdk_nvme_ctrlr *ctrlr)
544 {
545 	return (ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd));
546 }
547 
548 static int
549 nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
550 		  struct nvme_tcp_req *tcp_req)
551 {
552 	struct spdk_nvme_ctrlr *ctrlr = tqpair->qpair.ctrlr;
553 	int rc = 0;
554 	enum spdk_nvme_data_transfer xfer;
555 	uint32_t max_incapsule_data_size;
556 
557 	tcp_req->req = req;
558 	req->cmd.cid = tcp_req->cid;
559 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
560 	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
561 	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
562 	req->cmd.dptr.sgl1.unkeyed.length = req->payload_size;
563 
564 	if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
565 		rc = nvme_tcp_build_contig_request(tqpair, tcp_req);
566 	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
567 		rc = nvme_tcp_build_sgl_request(tqpair, tcp_req);
568 	} else {
569 		rc = -1;
570 	}
571 
572 	if (rc) {
573 		return rc;
574 	}
575 
576 	if (req->cmd.opc == SPDK_NVME_OPC_FABRIC) {
577 		struct spdk_nvmf_capsule_cmd *nvmf_cmd = (struct spdk_nvmf_capsule_cmd *)&req->cmd;
578 
579 		xfer = spdk_nvme_opc_get_data_transfer(nvmf_cmd->fctype);
580 	} else {
581 		xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
582 	}
583 	if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
584 		max_incapsule_data_size = nvme_tcp_icdsz_bytes(ctrlr);
585 		if ((req->cmd.opc == SPDK_NVME_OPC_FABRIC) || nvme_qpair_is_admin_queue(&tqpair->qpair)) {
586 			max_incapsule_data_size = spdk_min(max_incapsule_data_size, NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE);
587 		}
588 
589 		if (req->payload_size <= max_incapsule_data_size) {
590 			req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
591 			req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
592 			req->cmd.dptr.sgl1.address = 0;
593 			tcp_req->in_capsule_data = true;
594 		}
595 	}
596 
597 	return 0;
598 }
599 
600 static void
601 nvme_tcp_qpair_cmd_send_complete(void *cb_arg)
602 {
603 }
604 
605 static void
606 nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
607 			  struct nvme_tcp_req *tcp_req,
608 			  uint32_t data_len)
609 {
610 	uint32_t i, remain_len, len;
611 	struct _iov_ctx *ctx;
612 
613 	if (tcp_req->iovcnt == 1) {
614 		nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)tcp_req->iov[0].iov_base + tcp_req->datao), data_len);
615 	} else {
616 		i = 0;
617 		ctx = &pdu->iov_ctx;
618 		assert(tcp_req->iovcnt <= NVME_TCP_MAX_SGL_DESCRIPTORS);
619 		_iov_ctx_init(ctx, pdu->data_iov, tcp_req->iovcnt, tcp_req->datao);
620 		remain_len = data_len;
621 
622 		while (remain_len > 0) {
623 			assert(i < NVME_TCP_MAX_SGL_DESCRIPTORS);
624 			len = spdk_min(remain_len, tcp_req->iov[i].iov_len);
625 			remain_len -= len;
626 			if (!_iov_ctx_set_iov(ctx, tcp_req->iov[i].iov_base, len)) {
627 				break;
628 			}
629 			i++;
630 		}
631 
632 		assert(remain_len == 0);
633 		pdu->data_iovcnt = ctx->iovcnt;
634 		pdu->data_len = data_len;
635 	}
636 }
637 
638 static int
639 nvme_tcp_qpair_capsule_cmd_send(struct nvme_tcp_qpair *tqpair,
640 				struct nvme_tcp_req *tcp_req)
641 {
642 	struct nvme_tcp_pdu *pdu;
643 	struct spdk_nvme_tcp_cmd *capsule_cmd;
644 	uint32_t plen = 0, alignment;
645 	uint8_t pdo;
646 
647 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
648 	pdu = &tcp_req->send_pdu;
649 
650 	capsule_cmd = &pdu->hdr.capsule_cmd;
651 	capsule_cmd->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
652 	plen = capsule_cmd->common.hlen = sizeof(*capsule_cmd);
653 	capsule_cmd->ccsqe = tcp_req->req->cmd;
654 
655 
656 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "capsule_cmd cid=%u on tqpair(%p)\n", tcp_req->req->cmd.cid, tqpair);
657 
658 	if (tqpair->host_hdgst_enable) {
659 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Header digest is enabled for capsule command on tcp_req=%p\n",
660 			      tcp_req);
661 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
662 		plen += SPDK_NVME_TCP_DIGEST_LEN;
663 	}
664 
665 	if ((tcp_req->req->payload_size == 0) || !tcp_req->in_capsule_data) {
666 		goto end;
667 	}
668 
669 	pdo = plen;
670 	pdu->padding_len = 0;
671 	if (tqpair->cpda) {
672 		alignment = (tqpair->cpda + 1) << 2;
673 		if (alignment > plen) {
674 			pdu->padding_len = alignment - plen;
675 			pdo = alignment;
676 			plen = alignment;
677 		}
678 	}
679 
680 	capsule_cmd->common.pdo = pdo;
681 	plen += tcp_req->req->payload_size;
682 	if (tqpair->host_ddgst_enable) {
683 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
684 		plen += SPDK_NVME_TCP_DIGEST_LEN;
685 	}
686 
687 	tcp_req->datao = 0;
688 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req, tcp_req->req->payload_size);
689 end:
690 	capsule_cmd->common.plen = plen;
691 	return nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_qpair_cmd_send_complete, NULL);
692 
693 }
694 
695 int
696 nvme_tcp_qpair_submit_request(struct spdk_nvme_qpair *qpair,
697 			      struct nvme_request *req)
698 {
699 	struct nvme_tcp_qpair *tqpair;
700 	struct nvme_tcp_req *tcp_req;
701 
702 	tqpair = nvme_tcp_qpair(qpair);
703 	assert(tqpair != NULL);
704 	assert(req != NULL);
705 
706 	tcp_req = nvme_tcp_req_get(tqpair);
707 	if (!tcp_req) {
708 		/*
709 		 * No tcp_req is available.  Queue the request to be processed later.
710 		 */
711 		STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
712 		return 0;
713 	}
714 
715 	if (nvme_tcp_req_init(tqpair, req, tcp_req)) {
716 		SPDK_ERRLOG("nvme_tcp_req_init() failed\n");
717 		nvme_tcp_req_put(tqpair, tcp_req);
718 		return -1;
719 	}
720 
721 	return nvme_tcp_qpair_capsule_cmd_send(tqpair, tcp_req);
722 }
723 
724 int
725 nvme_tcp_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
726 {
727 	return nvme_tcp_qpair_destroy(qpair);
728 }
729 
730 int
731 nvme_tcp_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
732 {
733 	return -1;
734 }
735 
736 int
737 nvme_tcp_qpair_enable(struct spdk_nvme_qpair *qpair)
738 {
739 	return 0;
740 }
741 
742 int
743 nvme_tcp_qpair_disable(struct spdk_nvme_qpair *qpair)
744 {
745 	return 0;
746 }
747 
748 int
749 nvme_tcp_qpair_reset(struct spdk_nvme_qpair *qpair)
750 {
751 	return 0;
752 }
753 
754 static void
755 nvme_tcp_req_complete(struct nvme_request *req,
756 		      struct spdk_nvme_cpl *rsp)
757 {
758 	nvme_complete_request(req->cb_fn, req->cb_arg, req, rsp);
759 	nvme_free_request(req);
760 }
761 
762 int
763 nvme_tcp_qpair_fail(struct spdk_nvme_qpair *qpair)
764 {
765 	/*
766 	 * If the qpair is really failed, the connection is broken
767 	 * and we need to flush back all I/O
768 	 */
769 	struct nvme_tcp_req *tcp_req, *tmp;
770 	struct nvme_request *req;
771 	struct spdk_nvme_cpl cpl;
772 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
773 
774 	cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
775 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
776 
777 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
778 		assert(tcp_req->req != NULL);
779 		req = tcp_req->req;
780 
781 		nvme_tcp_req_complete(req, &cpl);
782 		nvme_tcp_req_put(tqpair, tcp_req);
783 	}
784 
785 	return 0;
786 }
787 
788 static void
789 nvme_tcp_qpair_set_recv_state(struct nvme_tcp_qpair *tqpair,
790 			      enum nvme_tcp_pdu_recv_state state)
791 {
792 	if (tqpair->recv_state == state) {
793 		SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n",
794 			    tqpair, state);
795 		return;
796 	}
797 
798 	tqpair->recv_state = state;
799 	switch (state) {
800 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
801 	case NVME_TCP_PDU_RECV_STATE_ERROR:
802 		memset(&tqpair->recv_pdu, 0, sizeof(struct nvme_tcp_pdu));
803 		break;
804 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
805 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
806 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
807 	default:
808 		break;
809 	}
810 }
811 
812 static void
813 nvme_tcp_qpair_send_h2c_term_req_complete(void *cb_arg)
814 {
815 	struct nvme_tcp_qpair *tqpair = cb_arg;
816 
817 	tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
818 }
819 
820 static void
821 nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
822 				 enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset)
823 {
824 	struct nvme_tcp_pdu *rsp_pdu;
825 	struct spdk_nvme_tcp_term_req_hdr *h2c_term_req;
826 	uint32_t h2c_term_req_hdr_len = sizeof(*h2c_term_req);
827 	uint8_t copy_len;
828 
829 	rsp_pdu = &tqpair->send_pdu;
830 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
831 	h2c_term_req = &rsp_pdu->hdr.term_req;
832 	h2c_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
833 	h2c_term_req->common.hlen = h2c_term_req_hdr_len;
834 
835 	if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
836 	    (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
837 		DSET32(&h2c_term_req->fei, error_offset);
838 	}
839 
840 	copy_len = pdu->hdr.common.hlen;
841 	if (copy_len > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE) {
842 		copy_len = SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE;
843 	}
844 
845 	/* Copy the error info into the buffer */
846 	memcpy((uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len, pdu->hdr.raw, copy_len);
847 	nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len, copy_len);
848 
849 	/* Contain the header len of the wrong received pdu */
850 	h2c_term_req->common.plen = h2c_term_req->common.hlen + copy_len;
851 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
852 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_send_h2c_term_req_complete, NULL);
853 
854 }
855 
856 static void
857 nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
858 {
859 	struct nvme_tcp_pdu *pdu;
860 	uint32_t error_offset = 0;
861 	enum spdk_nvme_tcp_term_req_fes fes;
862 	uint32_t expected_hlen, hd_len = 0;
863 	bool plen_error = false;
864 
865 	pdu = &tqpair->recv_pdu;
866 
867 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "pdu type = %d\n", pdu->hdr.common.pdu_type);
868 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP) {
869 		if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
870 			SPDK_ERRLOG("Already received IC_RESP PDU, and we should reject this pdu=%p\n", pdu);
871 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
872 			goto err;
873 		}
874 		expected_hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
875 		if (pdu->hdr.common.plen != expected_hlen) {
876 			plen_error = true;
877 		}
878 	} else {
879 		if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
880 			SPDK_ERRLOG("The TCP/IP tqpair connection is not negotitated\n");
881 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
882 			goto err;
883 		}
884 
885 		switch (pdu->hdr.common.pdu_type) {
886 		case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
887 			expected_hlen = sizeof(struct spdk_nvme_tcp_rsp);
888 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
889 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
890 			}
891 
892 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
893 				plen_error = true;
894 			}
895 			break;
896 		case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
897 			expected_hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
898 			if (pdu->hdr.common.plen < pdu->hdr.common.pdo) {
899 				plen_error = true;
900 			}
901 			break;
902 		case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
903 			expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
904 			if ((pdu->hdr.common.plen <= expected_hlen) ||
905 			    (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
906 				plen_error = true;
907 			}
908 			break;
909 		case SPDK_NVME_TCP_PDU_TYPE_R2T:
910 			expected_hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
911 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
912 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
913 			}
914 
915 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
916 				plen_error = true;
917 			}
918 			break;
919 
920 		default:
921 			SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
922 			fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
923 			error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
924 			goto err;
925 		}
926 	}
927 
928 	if (pdu->hdr.common.hlen != expected_hlen) {
929 		SPDK_ERRLOG("Expected PDU header length %u, got %u\n",
930 			    expected_hlen, pdu->hdr.common.hlen);
931 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
932 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen);
933 		goto err;
934 
935 	} else if (plen_error) {
936 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
937 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen);
938 		goto err;
939 	} else {
940 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
941 		return;
942 	}
943 err:
944 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
945 }
946 
947 static struct nvme_tcp_req *
948 get_nvme_active_req_by_cid(struct nvme_tcp_qpair *tqpair, uint32_t cid)
949 {
950 	assert(tqpair != NULL);
951 	if ((cid >= tqpair->num_entries) || (tqpair->tcp_reqs[cid].state == NVME_TCP_REQ_FREE)) {
952 		return NULL;
953 	}
954 
955 	return &tqpair->tcp_reqs[cid];
956 }
957 
958 static void
959 nvme_tcp_free_and_handle_queued_req(struct spdk_nvme_qpair *qpair)
960 {
961 	struct nvme_request *req;
962 
963 	if (!STAILQ_EMPTY(&qpair->queued_req) && !qpair->ctrlr->is_resetting) {
964 		req = STAILQ_FIRST(&qpair->queued_req);
965 		STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
966 		nvme_qpair_submit_request(qpair, req);
967 	}
968 }
969 
970 static void
971 nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
972 				 struct nvme_tcp_pdu *pdu, uint32_t *reaped)
973 {
974 	struct nvme_tcp_req *tcp_req;
975 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
976 	struct spdk_nvme_cpl cpl = {};
977 	uint8_t flags;
978 
979 	tcp_req = pdu->ctx;
980 	assert(tcp_req != NULL);
981 
982 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
983 	c2h_data = &pdu->hdr.c2h_data;
984 	tcp_req->datao += pdu->data_len;
985 	flags = c2h_data->common.flags;
986 
987 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
988 	if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) {
989 		if (tcp_req->datao == tcp_req->req->payload_size) {
990 			cpl.status.p = 0;
991 		} else {
992 			cpl.status.p = 1;
993 		}
994 
995 		cpl.cid = tcp_req->cid;
996 		cpl.sqid = tqpair->qpair.id;
997 		nvme_tcp_req_complete(tcp_req->req, &cpl);
998 		nvme_tcp_req_put(tqpair, tcp_req);
999 		(*reaped)++;
1000 		nvme_tcp_free_and_handle_queued_req(&tqpair->qpair);
1001 	}
1002 }
1003 
1004 static const char *spdk_nvme_tcp_term_req_fes_str[] = {
1005 	"Invalid PDU Header Field",
1006 	"PDU Sequence Error",
1007 	"Header Digest Error",
1008 	"Data Transfer Out of Range",
1009 	"Data Transfer Limit Exceeded",
1010 	"Unsupported parameter",
1011 };
1012 
1013 static void
1014 nvme_tcp_c2h_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *c2h_term_req)
1015 {
1016 	SPDK_ERRLOG("Error info of pdu(%p): %s\n", c2h_term_req,
1017 		    spdk_nvme_tcp_term_req_fes_str[c2h_term_req->fes]);
1018 	if ((c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
1019 	    (c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
1020 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "The offset from the start of the PDU header is %u\n",
1021 			      DGET32(c2h_term_req->fei));
1022 	}
1023 	/* we may also need to dump some other info here */
1024 }
1025 
1026 static void
1027 nvme_tcp_c2h_term_req_payload_handle(struct nvme_tcp_qpair *tqpair,
1028 				     struct nvme_tcp_pdu *pdu)
1029 {
1030 	nvme_tcp_c2h_term_req_dump(&pdu->hdr.term_req);
1031 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1032 }
1033 
1034 static void
1035 nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
1036 			    uint32_t *reaped)
1037 {
1038 	int rc = 0;
1039 	struct nvme_tcp_pdu *pdu;
1040 	uint32_t crc32c, error_offset = 0;
1041 	enum spdk_nvme_tcp_term_req_fes fes;
1042 
1043 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1044 	pdu = &tqpair->recv_pdu;
1045 
1046 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1047 
1048 	/* check data digest if need */
1049 	if (pdu->ddgst_enable) {
1050 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
1051 		rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
1052 		if (rc == 0) {
1053 			SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1054 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1055 			nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1056 			return;
1057 		}
1058 	}
1059 
1060 	switch (pdu->hdr.common.pdu_type) {
1061 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1062 		nvme_tcp_c2h_data_payload_handle(tqpair, pdu, reaped);
1063 		break;
1064 
1065 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1066 		nvme_tcp_c2h_term_req_payload_handle(tqpair, pdu);
1067 		break;
1068 
1069 	default:
1070 		/* The code should not go to here */
1071 		SPDK_ERRLOG("The code should not go to here\n");
1072 		break;
1073 	}
1074 }
1075 
1076 static void
1077 nvme_tcp_send_icreq_complete(void *cb_arg)
1078 {
1079 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Complete the icreq send for tqpair=%p\n",
1080 		      (struct nvme_tcp_qpair *)cb_arg);
1081 }
1082 
1083 static void
1084 nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair,
1085 		       struct nvme_tcp_pdu *pdu)
1086 {
1087 	struct spdk_nvme_tcp_ic_resp *ic_resp = &pdu->hdr.ic_resp;
1088 	uint32_t error_offset = 0;
1089 	enum spdk_nvme_tcp_term_req_fes fes;
1090 
1091 	/* Only PFV 0 is defined currently */
1092 	if (ic_resp->pfv != 0) {
1093 		SPDK_ERRLOG("Expected ICResp PFV %u, got %u\n", 0u, ic_resp->pfv);
1094 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1095 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, pfv);
1096 		goto end;
1097 	}
1098 
1099 	if (ic_resp->maxh2cdata < NVME_TCP_PDU_H2C_MIN_DATA_SIZE) {
1100 		SPDK_ERRLOG("Expected ICResp maxh2cdata >=%u, got %u\n", NVME_TCP_PDU_H2C_MIN_DATA_SIZE,
1101 			    ic_resp->maxh2cdata);
1102 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1103 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, maxh2cdata);
1104 		goto end;
1105 	}
1106 	tqpair->maxh2cdata = ic_resp->maxh2cdata;
1107 
1108 	if (ic_resp->cpda > SPDK_NVME_TCP_CPDA_MAX) {
1109 		SPDK_ERRLOG("Expected ICResp cpda <=%u, got %u\n", SPDK_NVME_TCP_CPDA_MAX, ic_resp->cpda);
1110 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1111 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, cpda);
1112 		goto end;
1113 	}
1114 	tqpair->cpda = ic_resp->cpda;
1115 
1116 	tqpair->host_hdgst_enable = ic_resp->dgst.bits.hdgst_enable ? true : false;
1117 	tqpair->host_ddgst_enable = ic_resp->dgst.bits.ddgst_enable ? true : false;
1118 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable);
1119 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable);
1120 
1121 	tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
1122 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1123 	return;
1124 end:
1125 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1126 	return;
1127 }
1128 
1129 static void
1130 nvme_tcp_capsule_resp_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1131 				 uint32_t *reaped)
1132 {
1133 	struct nvme_tcp_req *tcp_req;
1134 	struct spdk_nvme_tcp_rsp *capsule_resp = &pdu->hdr.capsule_resp;
1135 	uint32_t cid, error_offset = 0;
1136 	enum spdk_nvme_tcp_term_req_fes fes;
1137 	struct spdk_nvme_cpl cpl;
1138 
1139 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1140 	cpl = capsule_resp->rccqe;
1141 	cid = cpl.cid;
1142 
1143 	/* Recv the pdu again */
1144 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1145 
1146 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1147 	if (!tcp_req) {
1148 		SPDK_ERRLOG("no tcp_req is found with cid=%u for tqpair=%p\n", cid, tqpair);
1149 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1150 		error_offset = offsetof(struct spdk_nvme_tcp_rsp, rccqe);
1151 		goto end;
1152 
1153 	}
1154 
1155 	assert(tcp_req->req != NULL);
1156 	assert(tcp_req->state == NVME_TCP_REQ_ACTIVE);
1157 	nvme_tcp_req_complete(tcp_req->req, &cpl);
1158 	nvme_tcp_req_put(tqpair, tcp_req);
1159 	(*reaped)++;
1160 	nvme_tcp_free_and_handle_queued_req(&tqpair->qpair);
1161 
1162 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "complete tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);
1163 
1164 	return;
1165 
1166 end:
1167 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1168 	return;
1169 }
1170 
1171 static void
1172 nvme_tcp_c2h_term_req_hdr_handle(struct nvme_tcp_qpair *tqpair,
1173 				 struct nvme_tcp_pdu *pdu)
1174 {
1175 	struct spdk_nvme_tcp_term_req_hdr *c2h_term_req = &pdu->hdr.term_req;
1176 	uint32_t error_offset = 0;
1177 	enum spdk_nvme_tcp_term_req_fes fes;
1178 
1179 	if (c2h_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) {
1180 		SPDK_ERRLOG("Fatal Error Stauts(FES) is unknown for c2h_term_req pdu=%p\n", pdu);
1181 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1182 		error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes);
1183 		goto end;
1184 	}
1185 
1186 	/* set the data buffer */
1187 	nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + c2h_term_req->common.hlen,
1188 			      c2h_term_req->common.plen - c2h_term_req->common.hlen);
1189 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1190 	return;
1191 end:
1192 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1193 	return;
1194 }
1195 
1196 static void
1197 nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1198 {
1199 	struct nvme_tcp_req *tcp_req;
1200 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data = &pdu->hdr.c2h_data;
1201 	uint32_t error_offset = 0;
1202 	enum spdk_nvme_tcp_term_req_fes fes;
1203 
1204 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1205 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "c2h_data info on tqpair(%p): datao=%u, datal=%u, cccid=%d\n",
1206 		      tqpair, c2h_data->datao, c2h_data->datal, c2h_data->cccid);
1207 	tcp_req = get_nvme_active_req_by_cid(tqpair, c2h_data->cccid);
1208 	if (!tcp_req) {
1209 		SPDK_ERRLOG("no tcp_req found for c2hdata cid=%d\n", c2h_data->cccid);
1210 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1211 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, cccid);
1212 		goto end;
1213 
1214 	}
1215 
1216 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "tcp_req(%p) on tqpair(%p): datao=%u, payload_size=%u\n",
1217 		      tcp_req, tqpair, tcp_req->datao, tcp_req->req->payload_size);
1218 
1219 	if (c2h_data->datal > tcp_req->req->payload_size) {
1220 		SPDK_ERRLOG("Invalid datal for tcp_req(%p), datal(%u) exceeds payload_size(%u)\n",
1221 			    tcp_req, c2h_data->datal, tcp_req->req->payload_size);
1222 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1223 		goto end;
1224 	}
1225 
1226 	if (tcp_req->datao != c2h_data->datao) {
1227 		SPDK_ERRLOG("Invalid datao for tcp_req(%p), received datal(%u) != datao(%u) in tcp_req\n",
1228 			    tcp_req, c2h_data->datao, tcp_req->datao);
1229 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1230 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datao);
1231 		goto end;
1232 	}
1233 
1234 	if ((c2h_data->datao + c2h_data->datal) > tcp_req->req->payload_size) {
1235 		SPDK_ERRLOG("Invalid data range for tcp_req(%p), received (datao(%u) + datal(%u)) > datao(%u) in tcp_req\n",
1236 			    tcp_req, c2h_data->datao, c2h_data->datal, tcp_req->req->payload_size);
1237 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1238 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datal);
1239 		goto end;
1240 
1241 	}
1242 
1243 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req, c2h_data->datal);
1244 	pdu->ctx = tcp_req;
1245 
1246 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1247 	return;
1248 
1249 end:
1250 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1251 	return;
1252 }
1253 
1254 static void
1255 nvme_tcp_qpair_h2c_data_send_complete(void *cb_arg)
1256 {
1257 	struct nvme_tcp_req *tcp_req = cb_arg;
1258 
1259 	assert(tcp_req != NULL);
1260 
1261 	if (tcp_req->r2tl_remain) {
1262 		spdk_nvme_tcp_send_h2c_data(tcp_req);
1263 	}
1264 }
1265 
1266 static void
1267 spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req)
1268 {
1269 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(tcp_req->req->qpair);
1270 	struct nvme_tcp_pdu *rsp_pdu;
1271 	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
1272 	uint32_t plen, pdo, alignment;
1273 
1274 	rsp_pdu = &tcp_req->send_pdu;
1275 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
1276 	h2c_data = &rsp_pdu->hdr.h2c_data;
1277 
1278 	h2c_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
1279 	plen = h2c_data->common.hlen = sizeof(*h2c_data);
1280 	h2c_data->cccid = tcp_req->cid;
1281 	h2c_data->ttag = tcp_req->ttag;
1282 	h2c_data->datao = tcp_req->datao;
1283 
1284 	h2c_data->datal = spdk_min(tcp_req->r2tl_remain, tqpair->maxh2cdata);
1285 	nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req, h2c_data->datal);
1286 	tcp_req->r2tl_remain -= h2c_data->datal;
1287 
1288 	if (tqpair->host_hdgst_enable) {
1289 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1290 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1291 	}
1292 
1293 	rsp_pdu->padding_len = 0;
1294 	pdo = plen;
1295 	if (tqpair->cpda) {
1296 		alignment = (tqpair->cpda + 1) << 2;
1297 		if (alignment > plen) {
1298 			rsp_pdu->padding_len = alignment - plen;
1299 			pdo = plen = alignment;
1300 		}
1301 	}
1302 
1303 	h2c_data->common.pdo = pdo;
1304 	plen += h2c_data->datal;
1305 	if (tqpair->host_ddgst_enable) {
1306 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
1307 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1308 	}
1309 
1310 	h2c_data->common.plen = plen;
1311 	tcp_req->datao += h2c_data->datal;
1312 	if (!tcp_req->r2tl_remain) {
1313 		tqpair->pending_r2t--;
1314 		assert(tqpair->pending_r2t >= 0);
1315 		tcp_req->state = NVME_TCP_REQ_ACTIVE;
1316 		h2c_data->common.flags |= SPDK_NVME_TCP_H2C_DATA_FLAGS_LAST_PDU;
1317 	}
1318 
1319 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "h2c_data info: datao=%u, datal=%u, pdu_len=%u for tqpair=%p\n",
1320 		      h2c_data->datao, h2c_data->datal, h2c_data->common.plen, tqpair);
1321 
1322 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_h2c_data_send_complete, tcp_req);
1323 }
1324 
1325 static void
1326 nvme_tcp_r2t_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1327 {
1328 	struct nvme_tcp_req *tcp_req;
1329 	struct spdk_nvme_tcp_r2t_hdr *r2t = &pdu->hdr.r2t;
1330 	uint32_t cid, error_offset = 0;
1331 	enum spdk_nvme_tcp_term_req_fes fes;
1332 
1333 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1334 	cid = r2t->cccid;
1335 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1336 	if (!tcp_req) {
1337 		SPDK_ERRLOG("Cannot find tcp_req for tqpair=%p\n", tqpair);
1338 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1339 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, cccid);
1340 		goto end;
1341 	}
1342 
1343 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "r2t info: r2to=%u, r2tl=%u for tqpair=%p\n", r2t->r2to, r2t->r2tl,
1344 		      tqpair);
1345 
1346 	if (tcp_req->state != NVME_TCP_REQ_ACTIVE_R2T) {
1347 		if (tqpair->pending_r2t >= tqpair->max_r2t) {
1348 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
1349 			SPDK_ERRLOG("Invalid R2T: it exceeds the R2T maixmal=%u for tqpair=%p\n", tqpair->max_r2t, tqpair);
1350 			goto end;
1351 		}
1352 		tcp_req->state = NVME_TCP_REQ_ACTIVE_R2T;
1353 		tqpair->pending_r2t++;
1354 	}
1355 
1356 	if (tcp_req->datao != r2t->r2to) {
1357 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1358 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2to);
1359 		goto end;
1360 
1361 	}
1362 
1363 	if ((r2t->r2tl + r2t->r2to) > tcp_req->req->payload_size) {
1364 		SPDK_ERRLOG("Invalid R2T info for tcp_req=%p: (r2to(%u) + r2tl(%u)) exceeds payload_size(%u)\n",
1365 			    tcp_req, r2t->r2to, r2t->r2tl, tqpair->maxh2cdata);
1366 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1367 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2tl);
1368 		goto end;
1369 
1370 	}
1371 
1372 	tcp_req->ttag = r2t->ttag;
1373 	tcp_req->r2tl_remain = r2t->r2tl;
1374 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1375 
1376 	spdk_nvme_tcp_send_h2c_data(tcp_req);
1377 	return;
1378 
1379 end:
1380 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1381 	return;
1382 
1383 }
1384 
1385 static void
1386 nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1387 {
1388 	struct nvme_tcp_pdu *pdu;
1389 	int rc;
1390 	uint32_t crc32c, error_offset = 0;
1391 	enum spdk_nvme_tcp_term_req_fes fes;
1392 
1393 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1394 	pdu = &tqpair->recv_pdu;
1395 
1396 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter: pdu type =%u\n", pdu->hdr.common.pdu_type);
1397 	/* check header digest if needed */
1398 	if (pdu->has_hdgst) {
1399 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
1400 		rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
1401 		if (rc == 0) {
1402 			SPDK_ERRLOG("header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1403 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1404 			nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1405 			return;
1406 
1407 		}
1408 	}
1409 
1410 	switch (pdu->hdr.common.pdu_type) {
1411 	case SPDK_NVME_TCP_PDU_TYPE_IC_RESP:
1412 		nvme_tcp_icresp_handle(tqpair, pdu);
1413 		break;
1414 	case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
1415 		nvme_tcp_capsule_resp_hdr_handle(tqpair, pdu, reaped);
1416 		break;
1417 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1418 		nvme_tcp_c2h_data_hdr_handle(tqpair, pdu);
1419 		break;
1420 
1421 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1422 		nvme_tcp_c2h_term_req_hdr_handle(tqpair, pdu);
1423 		break;
1424 	case SPDK_NVME_TCP_PDU_TYPE_R2T:
1425 		nvme_tcp_r2t_hdr_handle(tqpair, pdu);
1426 		break;
1427 
1428 	default:
1429 		SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
1430 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1431 		error_offset = 1;
1432 		nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1433 		break;
1434 	}
1435 
1436 }
1437 
1438 static int
1439 nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1440 {
1441 	int rc = 0;
1442 	struct nvme_tcp_pdu *pdu;
1443 	uint32_t data_len;
1444 	uint8_t psh_len, pdo;
1445 	int8_t padding_len;
1446 	enum nvme_tcp_pdu_recv_state prev_state;
1447 
1448 	/* The loop here is to allow for several back-to-back state changes. */
1449 	do {
1450 		prev_state = tqpair->recv_state;
1451 		switch (tqpair->recv_state) {
1452 		/* If in a new state */
1453 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
1454 			nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1455 			break;
1456 		/* common header */
1457 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
1458 			pdu = &tqpair->recv_pdu;
1459 			if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
1460 				rc = nvme_tcp_read_data(tqpair->sock,
1461 							sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
1462 							(uint8_t *)&pdu->hdr.common + pdu->ch_valid_bytes);
1463 				if (rc < 0) {
1464 					nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1465 					break;
1466 				}
1467 				pdu->ch_valid_bytes += rc;
1468 				if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
1469 					return NVME_TCP_PDU_IN_PROGRESS;
1470 				}
1471 			}
1472 
1473 			/* The command header of this PDU has now been read from the socket. */
1474 			nvme_tcp_pdu_ch_handle(tqpair);
1475 			break;
1476 		/* Wait for the pdu specific header  */
1477 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
1478 			pdu = &tqpair->recv_pdu;
1479 			psh_len = pdu->hdr.common.hlen;
1480 
1481 			/* The following pdus can have digest  */
1482 			if (((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP) ||
1483 			     (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) ||
1484 			     (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_R2T)) &&
1485 			    tqpair->host_hdgst_enable) {
1486 				pdu->has_hdgst = true;
1487 				psh_len += SPDK_NVME_TCP_DIGEST_LEN;
1488 				if (pdu->hdr.common.plen > psh_len) {
1489 					pdo = pdu->hdr.common.pdo;
1490 					padding_len = pdo - psh_len;
1491 					SPDK_DEBUGLOG(SPDK_LOG_NVME, "padding length is =%d for pdu=%p on tqpair=%p\n", padding_len,
1492 						      pdu, tqpair);
1493 					if (padding_len > 0) {
1494 						psh_len = pdo;
1495 					}
1496 				}
1497 			}
1498 
1499 			psh_len -= sizeof(struct spdk_nvme_tcp_common_pdu_hdr);
1500 			/* The following will read psh + hdgest (if possbile) + padding (if posssible) */
1501 			if (pdu->psh_valid_bytes < psh_len) {
1502 				rc = nvme_tcp_read_data(tqpair->sock,
1503 							psh_len - pdu->psh_valid_bytes,
1504 							(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
1505 				if (rc < 0) {
1506 					nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1507 					break;
1508 				}
1509 
1510 				pdu->psh_valid_bytes += rc;
1511 				if (pdu->psh_valid_bytes < psh_len) {
1512 					return NVME_TCP_PDU_IN_PROGRESS;
1513 				}
1514 			}
1515 
1516 			/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
1517 			nvme_tcp_pdu_psh_handle(tqpair, reaped);
1518 			break;
1519 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
1520 			pdu = &tqpair->recv_pdu;
1521 			/* check whether the data is valid, if not we just return */
1522 			if (!pdu->data_len) {
1523 				return NVME_TCP_PDU_IN_PROGRESS;
1524 			}
1525 
1526 			data_len = pdu->data_len;
1527 			/* data digest */
1528 			if (spdk_unlikely((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
1529 					  tqpair->host_ddgst_enable)) {
1530 				data_len += SPDK_NVME_TCP_DIGEST_LEN;
1531 				pdu->ddgst_enable = true;
1532 
1533 			}
1534 
1535 			rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
1536 			if (rc < 0) {
1537 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1538 				break;
1539 			}
1540 
1541 			pdu->readv_offset += rc;
1542 			if (pdu->readv_offset < data_len) {
1543 				return NVME_TCP_PDU_IN_PROGRESS;
1544 			}
1545 
1546 			assert(pdu->readv_offset == data_len);
1547 			/* All of this PDU has now been read from the socket. */
1548 			nvme_tcp_pdu_payload_handle(tqpair, reaped);
1549 			break;
1550 		case NVME_TCP_PDU_RECV_STATE_ERROR:
1551 			rc = NVME_TCP_PDU_FATAL;
1552 			break;
1553 		default:
1554 			assert(0);
1555 			break;
1556 		}
1557 	} while (prev_state != tqpair->recv_state);
1558 
1559 	return rc;
1560 }
1561 
1562 static void
1563 nvme_tcp_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
1564 {
1565 	uint64_t t02;
1566 	struct nvme_tcp_req *tcp_req, *tmp;
1567 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
1568 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
1569 	struct spdk_nvme_ctrlr_process *active_proc;
1570 
1571 	/* Don't check timeouts during controller initialization. */
1572 	if (ctrlr->state != NVME_CTRLR_STATE_READY) {
1573 		return;
1574 	}
1575 
1576 	if (nvme_qpair_is_admin_queue(qpair)) {
1577 		active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1578 	} else {
1579 		active_proc = qpair->active_proc;
1580 	}
1581 
1582 	/* Only check timeouts if the current process has a timeout callback. */
1583 	if (active_proc == NULL || active_proc->timeout_cb_fn == NULL) {
1584 		return;
1585 	}
1586 
1587 	t02 = spdk_get_ticks();
1588 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
1589 		assert(tcp_req->req != NULL);
1590 
1591 		if (nvme_request_check_timeout(tcp_req->req, tcp_req->cid, active_proc, t02)) {
1592 			/*
1593 			 * The requests are in order, so as soon as one has not timed out,
1594 			 * stop iterating.
1595 			 */
1596 			break;
1597 		}
1598 	}
1599 }
1600 
1601 int
1602 nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
1603 {
1604 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
1605 	uint32_t reaped;
1606 	int rc;
1607 
1608 	rc = nvme_tcp_qpair_process_send_queue(tqpair);
1609 	if (rc) {
1610 		return 0;
1611 	}
1612 
1613 	if (max_completions == 0) {
1614 		max_completions = tqpair->num_entries;
1615 	} else {
1616 		max_completions = spdk_min(max_completions, tqpair->num_entries);
1617 	}
1618 
1619 	reaped = 0;
1620 	do {
1621 		rc = nvme_tcp_read_pdu(tqpair, &reaped);
1622 		if (rc < 0) {
1623 			SPDK_ERRLOG("Error polling CQ! (%d): %s\n",
1624 				    errno, spdk_strerror(errno));
1625 			return -1;
1626 		} else if (rc == 0) {
1627 			/* Partial PDU is read */
1628 			break;
1629 		}
1630 
1631 	} while (reaped < max_completions);
1632 
1633 	if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
1634 		nvme_tcp_qpair_check_timeout(qpair);
1635 	}
1636 
1637 	return reaped;
1638 }
1639 
1640 static int
1641 nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
1642 {
1643 	struct spdk_nvme_tcp_ic_req *ic_req;
1644 	struct nvme_tcp_pdu *pdu;
1645 
1646 	pdu = &tqpair->send_pdu;
1647 	memset(&tqpair->send_pdu, 0, sizeof(tqpair->send_pdu));
1648 	ic_req = &pdu->hdr.ic_req;
1649 
1650 	ic_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
1651 	ic_req->common.hlen = ic_req->common.plen = sizeof(*ic_req);
1652 	ic_req->pfv = 0;
1653 	ic_req->maxr2t = NVME_TCP_MAX_R2T_DEFAULT - 1;
1654 	ic_req->hpda = NVME_TCP_HPDA_DEFAULT;
1655 
1656 	ic_req->dgst.bits.hdgst_enable = tqpair->qpair.ctrlr->opts.header_digest;
1657 	ic_req->dgst.bits.ddgst_enable = tqpair->qpair.ctrlr->opts.data_digest;
1658 
1659 	nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_send_icreq_complete, tqpair);
1660 
1661 	while (tqpair->state == NVME_TCP_QPAIR_STATE_INVALID) {
1662 		nvme_tcp_qpair_process_completions(&tqpair->qpair, 0);
1663 	}
1664 
1665 	if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
1666 		SPDK_ERRLOG("Failed to construct the tqpair=%p via correct icresp\n", tqpair);
1667 		return -1;
1668 	}
1669 
1670 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Succesfully construct the tqpair=%p via correct icresp\n", tqpair);
1671 
1672 	return 0;
1673 }
1674 
1675 static int
1676 nvme_tcp_qpair_connect(struct nvme_tcp_qpair *tqpair)
1677 {
1678 	struct sockaddr_storage dst_addr;
1679 	struct sockaddr_storage src_addr;
1680 	int rc;
1681 	struct spdk_nvme_ctrlr *ctrlr;
1682 	int family;
1683 	long int port;
1684 
1685 	ctrlr = tqpair->qpair.ctrlr;
1686 
1687 	switch (ctrlr->trid.adrfam) {
1688 	case SPDK_NVMF_ADRFAM_IPV4:
1689 		family = AF_INET;
1690 		break;
1691 	case SPDK_NVMF_ADRFAM_IPV6:
1692 		family = AF_INET6;
1693 		break;
1694 	default:
1695 		SPDK_ERRLOG("Unhandled ADRFAM %d\n", ctrlr->trid.adrfam);
1696 		return -1;
1697 	}
1698 
1699 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "adrfam %d ai_family %d\n", ctrlr->trid.adrfam, family);
1700 
1701 	memset(&dst_addr, 0, sizeof(dst_addr));
1702 
1703 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "trsvcid is %s\n", ctrlr->trid.trsvcid);
1704 	rc = nvme_tcp_parse_addr(&dst_addr, family, ctrlr->trid.traddr, ctrlr->trid.trsvcid);
1705 	if (rc != 0) {
1706 		SPDK_ERRLOG("dst_addr nvme_tcp_parse_addr() failed\n");
1707 		return -1;
1708 	}
1709 
1710 	if (ctrlr->opts.src_addr[0] || ctrlr->opts.src_svcid[0]) {
1711 		memset(&src_addr, 0, sizeof(src_addr));
1712 		rc = nvme_tcp_parse_addr(&src_addr, family, ctrlr->opts.src_addr, ctrlr->opts.src_svcid);
1713 		if (rc != 0) {
1714 			SPDK_ERRLOG("src_addr nvme_tcp_parse_addr() failed\n");
1715 			return -1;
1716 		}
1717 	}
1718 
1719 	port = spdk_strtol(ctrlr->trid.trsvcid, 10);
1720 	if (port <= 0 || port >= INT_MAX) {
1721 		SPDK_ERRLOG("Invalid port: %s\n", ctrlr->trid.trsvcid);
1722 		return -1;
1723 	}
1724 
1725 	tqpair->sock = spdk_sock_connect(ctrlr->trid.traddr, port);
1726 	if (!tqpair->sock) {
1727 		SPDK_ERRLOG("sock connection error of tqpair=%p with addr=%s, port=%ld\n",
1728 			    tqpair, ctrlr->trid.traddr, port);
1729 		return -1;
1730 	}
1731 
1732 	tqpair->max_r2t = NVME_TCP_MAX_R2T_DEFAULT;
1733 	rc = nvme_tcp_alloc_reqs(tqpair);
1734 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "rc =%d\n", rc);
1735 	if (rc) {
1736 		SPDK_ERRLOG("Unable to allocate tqpair tcp requests\n");
1737 		return -1;
1738 	}
1739 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "TCP requests allocated\n");
1740 
1741 	rc = nvme_tcp_qpair_icreq_send(tqpair);
1742 	if (rc != 0) {
1743 		SPDK_ERRLOG("Unable to connect the tqpair\n");
1744 		return -1;
1745 	}
1746 
1747 	rc = nvme_fabric_qpair_connect(&tqpair->qpair, tqpair->num_entries);
1748 	if (rc < 0) {
1749 		SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
1750 		return -1;
1751 	}
1752 
1753 	return 0;
1754 }
1755 
1756 static struct spdk_nvme_qpair *
1757 nvme_tcp_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
1758 			    uint16_t qid, uint32_t qsize,
1759 			    enum spdk_nvme_qprio qprio,
1760 			    uint32_t num_requests)
1761 {
1762 	struct nvme_tcp_qpair *tqpair;
1763 	struct spdk_nvme_qpair *qpair;
1764 	int rc;
1765 
1766 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
1767 	if (!tqpair) {
1768 		SPDK_ERRLOG("failed to get create tqpair\n");
1769 		return NULL;
1770 	}
1771 
1772 	tqpair->num_entries = qsize;
1773 	qpair = &tqpair->qpair;
1774 
1775 	rc = nvme_qpair_init(qpair, qid, ctrlr, qprio, num_requests);
1776 	if (rc != 0) {
1777 		free(tqpair);
1778 		return NULL;
1779 	}
1780 
1781 	rc = nvme_tcp_qpair_connect(tqpair);
1782 	if (rc < 0) {
1783 		nvme_tcp_qpair_destroy(qpair);
1784 		return NULL;
1785 	}
1786 
1787 	return qpair;
1788 }
1789 
1790 struct spdk_nvme_qpair *
1791 nvme_tcp_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
1792 			       const struct spdk_nvme_io_qpair_opts *opts)
1793 {
1794 	return nvme_tcp_ctrlr_create_qpair(ctrlr, qid, opts->io_queue_size, opts->qprio,
1795 					   opts->io_queue_requests);
1796 }
1797 
1798 struct spdk_nvme_ctrlr *nvme_tcp_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1799 		const struct spdk_nvme_ctrlr_opts *opts,
1800 		void *devhandle)
1801 {
1802 	struct nvme_tcp_ctrlr *tctrlr;
1803 	union spdk_nvme_cap_register cap;
1804 	union spdk_nvme_vs_register vs;
1805 	int rc;
1806 
1807 	tctrlr = calloc(1, sizeof(*tctrlr));
1808 	if (tctrlr == NULL) {
1809 		SPDK_ERRLOG("could not allocate ctrlr\n");
1810 		return NULL;
1811 	}
1812 
1813 	tctrlr->ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
1814 	tctrlr->ctrlr.opts = *opts;
1815 	tctrlr->ctrlr.trid = *trid;
1816 
1817 	rc = nvme_ctrlr_construct(&tctrlr->ctrlr);
1818 	if (rc != 0) {
1819 		free(tctrlr);
1820 		return NULL;
1821 	}
1822 
1823 	tctrlr->ctrlr.adminq = nvme_tcp_ctrlr_create_qpair(&tctrlr->ctrlr, 0,
1824 			       SPDK_NVMF_MIN_ADMIN_QUEUE_ENTRIES, 0, SPDK_NVMF_MIN_ADMIN_QUEUE_ENTRIES);
1825 	if (!tctrlr->ctrlr.adminq) {
1826 		SPDK_ERRLOG("failed to create admin qpair\n");
1827 		nvme_tcp_ctrlr_destruct(&tctrlr->ctrlr);
1828 		return NULL;
1829 	}
1830 
1831 	if (nvme_ctrlr_get_cap(&tctrlr->ctrlr, &cap)) {
1832 		SPDK_ERRLOG("get_cap() failed\n");
1833 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
1834 		return NULL;
1835 	}
1836 
1837 	if (nvme_ctrlr_get_vs(&tctrlr->ctrlr, &vs)) {
1838 		SPDK_ERRLOG("get_vs() failed\n");
1839 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
1840 		return NULL;
1841 	}
1842 
1843 	if (nvme_ctrlr_add_process(&tctrlr->ctrlr, 0) != 0) {
1844 		SPDK_ERRLOG("nvme_ctrlr_add_process() failed\n");
1845 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
1846 		return NULL;
1847 	}
1848 
1849 	nvme_ctrlr_init_cap(&tctrlr->ctrlr, &cap, &vs);
1850 
1851 	return &tctrlr->ctrlr;
1852 }
1853 
1854 uint32_t
1855 nvme_tcp_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
1856 {
1857 	return NVME_TCP_RW_BUFFER_SIZE;
1858 }
1859 
1860 uint16_t
1861 nvme_tcp_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
1862 {
1863 	/*
1864 	 * We do not support >1 SGE in the initiator currently,
1865 	 *  so we can only return 1 here.  Once that support is
1866 	 *  added, this should return ctrlr->cdata.nvmf_specific.msdbd
1867 	 *  instead.
1868 	 */
1869 	return 1;
1870 }
1871 
1872 void *
1873 nvme_tcp_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
1874 {
1875 	return NULL;
1876 }
1877 
1878 int
1879 nvme_tcp_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
1880 {
1881 	return 0;
1882 }
1883