xref: /spdk/lib/nvme/nvme_tcp.c (revision fa2d95b3fe66e7f5c543eaef89fa00d4eaa0e6e7)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe/TCP transport
36  */
37 
38 #include "nvme_internal.h"
39 
40 #include "spdk/endian.h"
41 #include "spdk/likely.h"
42 #include "spdk/string.h"
43 #include "spdk/stdinc.h"
44 #include "spdk/crc32.h"
45 #include "spdk/endian.h"
46 #include "spdk/assert.h"
47 #include "spdk/string.h"
48 #include "spdk/thread.h"
49 #include "spdk/trace.h"
50 #include "spdk/util.h"
51 
52 #include "spdk_internal/nvme_tcp.h"
53 
54 #define NVME_TCP_RW_BUFFER_SIZE 131072
55 
56 /*
57  * Maximum number of SGL elements.
58  * This is chosen to match the current nvme_pcie.c limit.
59  */
60 #define NVME_TCP_MAX_SGL_DESCRIPTORS	(253)
61 
62 #define NVME_TCP_HPDA_DEFAULT			0
63 #define NVME_TCP_MAX_R2T_DEFAULT		16
64 #define NVME_TCP_PDU_H2C_MIN_DATA_SIZE		4096
65 #define NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE	8192
66 
67 /* NVMe TCP transport extensions for spdk_nvme_ctrlr */
68 struct nvme_tcp_ctrlr {
69 	struct spdk_nvme_ctrlr			ctrlr;
70 };
71 
72 /* NVMe TCP qpair extensions for spdk_nvme_qpair */
73 struct nvme_tcp_qpair {
74 	struct spdk_nvme_qpair			qpair;
75 	struct spdk_sock			*sock;
76 
77 	TAILQ_HEAD(, nvme_tcp_req)		free_reqs;
78 	TAILQ_HEAD(, nvme_tcp_req)		outstanding_reqs;
79 
80 	TAILQ_HEAD(, nvme_tcp_pdu)		send_queue;
81 	struct nvme_tcp_pdu			recv_pdu;
82 	struct nvme_tcp_pdu			send_pdu; /* only for error pdu and init pdu */
83 	enum nvme_tcp_pdu_recv_state		recv_state;
84 
85 	struct nvme_tcp_req			*tcp_reqs;
86 
87 	uint16_t				num_entries;
88 
89 	bool					host_hdgst_enable;
90 	bool					host_ddgst_enable;
91 
92 	/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
93 	uint32_t				maxh2cdata;
94 
95 	int32_t					max_r2t;
96 	int32_t					pending_r2t;
97 
98 	/* 0 based value, which is used to guide the padding */
99 	uint8_t					cpda;
100 
101 	enum nvme_tcp_qpair_state		state;
102 };
103 
104 enum nvme_tcp_req_state {
105 	NVME_TCP_REQ_FREE,
106 	NVME_TCP_REQ_ACTIVE,
107 	NVME_TCP_REQ_ACTIVE_R2T,
108 };
109 
110 struct nvme_tcp_req {
111 	struct nvme_request			*req;
112 	enum nvme_tcp_req_state			state;
113 	uint16_t				cid;
114 	uint16_t				ttag;
115 	uint32_t				datao;
116 	uint32_t				r2tl_remain;
117 	bool					in_capsule_data;
118 	struct nvme_tcp_pdu			send_pdu;
119 	void					*buf;
120 	TAILQ_ENTRY(nvme_tcp_req)		link;
121 };
122 
123 static void spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req);
124 
125 static inline struct nvme_tcp_qpair *
126 nvme_tcp_qpair(struct spdk_nvme_qpair *qpair)
127 {
128 	assert(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
129 	return SPDK_CONTAINEROF(qpair, struct nvme_tcp_qpair, qpair);
130 }
131 
132 static inline struct nvme_tcp_ctrlr *
133 nvme_tcp_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
134 {
135 	assert(ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_TCP);
136 	return SPDK_CONTAINEROF(ctrlr, struct nvme_tcp_ctrlr, ctrlr);
137 }
138 
139 static struct nvme_tcp_req *
140 nvme_tcp_req_get(struct nvme_tcp_qpair *tqpair)
141 {
142 	struct nvme_tcp_req *tcp_req;
143 
144 	tcp_req = TAILQ_FIRST(&tqpair->free_reqs);
145 	if (!tcp_req) {
146 		return NULL;
147 	}
148 
149 	assert(tcp_req->state == NVME_TCP_REQ_FREE);
150 	tcp_req->state = NVME_TCP_REQ_ACTIVE;
151 	TAILQ_REMOVE(&tqpair->free_reqs, tcp_req, link);
152 	tcp_req->datao = 0;
153 	tcp_req->req = NULL;
154 	tcp_req->in_capsule_data = false;
155 	tcp_req->r2tl_remain = 0;
156 	tcp_req->buf = NULL;
157 	memset(&tcp_req->send_pdu, 0, sizeof(tcp_req->send_pdu));
158 	TAILQ_INSERT_TAIL(&tqpair->outstanding_reqs, tcp_req, link);
159 
160 	return tcp_req;
161 }
162 
163 static void
164 nvme_tcp_req_put(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
165 {
166 	assert(tcp_req->state != NVME_TCP_REQ_FREE);
167 	tcp_req->state = NVME_TCP_REQ_FREE;
168 	TAILQ_REMOVE(&tqpair->outstanding_reqs, tcp_req, link);
169 	TAILQ_INSERT_TAIL(&tqpair->free_reqs, tcp_req, link);
170 }
171 
172 static int
173 nvme_tcp_parse_addr(struct sockaddr_storage *sa, int family, const char *addr, const char *service)
174 {
175 	struct addrinfo *res;
176 	struct addrinfo hints;
177 	int ret;
178 
179 	memset(&hints, 0, sizeof(hints));
180 	hints.ai_family = family;
181 	hints.ai_socktype = SOCK_STREAM;
182 	hints.ai_protocol = 0;
183 
184 	ret = getaddrinfo(addr, service, &hints, &res);
185 	if (ret) {
186 		SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(ret), ret);
187 		return ret;
188 	}
189 
190 	if (res->ai_addrlen > sizeof(*sa)) {
191 		SPDK_ERRLOG("getaddrinfo() ai_addrlen %zu too large\n", (size_t)res->ai_addrlen);
192 		ret = EINVAL;
193 	} else {
194 		memcpy(sa, res->ai_addr, res->ai_addrlen);
195 	}
196 
197 	freeaddrinfo(res);
198 	return ret;
199 }
200 
201 static void
202 nvme_tcp_free_reqs(struct nvme_tcp_qpair *tqpair)
203 {
204 	free(tqpair->tcp_reqs);
205 	tqpair->tcp_reqs = NULL;
206 }
207 
208 static int
209 nvme_tcp_alloc_reqs(struct nvme_tcp_qpair *tqpair)
210 {
211 	int i;
212 	struct nvme_tcp_req	*tcp_req;
213 
214 	tqpair->tcp_reqs = calloc(tqpair->num_entries, sizeof(struct nvme_tcp_req));
215 	if (tqpair->tcp_reqs == NULL) {
216 		SPDK_ERRLOG("Failed to allocate tcp_reqs\n");
217 		goto fail;
218 	}
219 
220 	TAILQ_INIT(&tqpair->send_queue);
221 	TAILQ_INIT(&tqpair->free_reqs);
222 	TAILQ_INIT(&tqpair->outstanding_reqs);
223 	for (i = 0; i < tqpair->num_entries; i++) {
224 		tcp_req = &tqpair->tcp_reqs[i];
225 		tcp_req->cid = i;
226 		TAILQ_INSERT_TAIL(&tqpair->free_reqs, tcp_req, link);
227 	}
228 
229 	return 0;
230 fail:
231 	nvme_tcp_free_reqs(tqpair);
232 	return -ENOMEM;
233 }
234 
235 static int
236 nvme_tcp_qpair_destroy(struct spdk_nvme_qpair *qpair)
237 {
238 	struct nvme_tcp_qpair *tqpair;
239 
240 	if (!qpair) {
241 		return -1;
242 	}
243 
244 	nvme_tcp_qpair_fail(qpair);
245 	nvme_qpair_deinit(qpair);
246 
247 	tqpair = nvme_tcp_qpair(qpair);
248 
249 	nvme_tcp_free_reqs(tqpair);
250 
251 	spdk_sock_close(&tqpair->sock);
252 	free(tqpair);
253 
254 	return 0;
255 }
256 
257 int
258 nvme_tcp_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
259 {
260 	return 0;
261 }
262 
263 /* This function must only be called while holding g_spdk_nvme_driver->lock */
264 int
265 nvme_tcp_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
266 		    bool direct_connect)
267 {
268 	struct spdk_nvme_ctrlr_opts discovery_opts;
269 	struct spdk_nvme_ctrlr *discovery_ctrlr;
270 	union spdk_nvme_cc_register cc;
271 	int rc;
272 	struct nvme_completion_poll_status status;
273 
274 	if (strcmp(probe_ctx->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN) != 0) {
275 		/* Not a discovery controller - connect directly. */
276 		rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
277 		return rc;
278 	}
279 
280 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&discovery_opts, sizeof(discovery_opts));
281 	/* For discovery_ctrlr set the timeout to 0 */
282 	discovery_opts.keep_alive_timeout_ms = 0;
283 
284 	discovery_ctrlr = nvme_tcp_ctrlr_construct(&probe_ctx->trid, &discovery_opts, NULL);
285 	if (discovery_ctrlr == NULL) {
286 		return -1;
287 	}
288 
289 	/* TODO: this should be using the normal NVMe controller initialization process */
290 	cc.raw = 0;
291 	cc.bits.en = 1;
292 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
293 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
294 	rc = nvme_transport_ctrlr_set_reg_4(discovery_ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
295 					    cc.raw);
296 	if (rc < 0) {
297 		SPDK_ERRLOG("Failed to set cc\n");
298 		nvme_ctrlr_destruct(discovery_ctrlr);
299 		return -1;
300 	}
301 
302 	/* Direct attach through spdk_nvme_connect() API */
303 	if (direct_connect == true) {
304 		/* get the cdata info */
305 		status.done = false;
306 		rc = nvme_ctrlr_cmd_identify(discovery_ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
307 					     &discovery_ctrlr->cdata, sizeof(discovery_ctrlr->cdata),
308 					     nvme_completion_poll_cb, &status);
309 		if (rc != 0) {
310 			SPDK_ERRLOG("Failed to identify cdata\n");
311 			return rc;
312 		}
313 
314 		if (spdk_nvme_wait_for_completion(discovery_ctrlr->adminq, &status)) {
315 			SPDK_ERRLOG("nvme_identify_controller failed!\n");
316 			return -ENXIO;
317 		}
318 		/* Set the ready state to skip the normal init process */
319 		discovery_ctrlr->state = NVME_CTRLR_STATE_READY;
320 		nvme_ctrlr_connected(probe_ctx, discovery_ctrlr);
321 		nvme_ctrlr_add_process(discovery_ctrlr, 0);
322 		return 0;
323 	}
324 
325 	rc = nvme_fabric_ctrlr_discover(discovery_ctrlr, probe_ctx);
326 	nvme_ctrlr_destruct(discovery_ctrlr);
327 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "leave\n");
328 	return rc;
329 }
330 
331 int
332 nvme_tcp_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
333 {
334 	struct nvme_tcp_ctrlr *tctrlr = nvme_tcp_ctrlr(ctrlr);
335 
336 	if (ctrlr->adminq) {
337 		nvme_tcp_qpair_destroy(ctrlr->adminq);
338 	}
339 
340 	nvme_ctrlr_destruct_finish(ctrlr);
341 
342 	free(tctrlr);
343 
344 	return 0;
345 }
346 
347 int
348 nvme_tcp_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
349 {
350 	return nvme_fabric_ctrlr_set_reg_4(ctrlr, offset, value);
351 }
352 
353 int
354 nvme_tcp_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
355 {
356 	return nvme_fabric_ctrlr_set_reg_8(ctrlr, offset, value);
357 }
358 
359 int
360 nvme_tcp_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
361 {
362 	return nvme_fabric_ctrlr_get_reg_4(ctrlr, offset, value);
363 }
364 
365 int
366 nvme_tcp_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
367 {
368 	return nvme_fabric_ctrlr_get_reg_8(ctrlr, offset, value);
369 }
370 
371 static int
372 nvme_tcp_qpair_process_send_queue(struct nvme_tcp_qpair *tqpair)
373 {
374 	const int array_size = 32;
375 	struct iovec	iovec_array[array_size];
376 	struct iovec	*iov = iovec_array;
377 	int iovec_cnt = 0;
378 	int bytes = 0;
379 	uint32_t mapped_length;
380 	struct nvme_tcp_pdu *pdu;
381 	int pdu_length;
382 	TAILQ_HEAD(, nvme_tcp_pdu) completed_pdus_list;
383 
384 	pdu = TAILQ_FIRST(&tqpair->send_queue);
385 
386 	if (pdu == NULL) {
387 		return 0;
388 	}
389 
390 	/*
391 	 * Build up a list of iovecs for the first few PDUs in the
392 	 *  tqpair 's send_queue.
393 	 */
394 	while (pdu != NULL && ((array_size - iovec_cnt) >= 3)) {
395 		iovec_cnt += nvme_tcp_build_iovecs(&iovec_array[iovec_cnt], array_size - iovec_cnt,
396 						   pdu, tqpair->host_hdgst_enable,
397 						   tqpair->host_ddgst_enable, &mapped_length);
398 		pdu = TAILQ_NEXT(pdu, tailq);
399 	}
400 
401 	bytes = spdk_sock_writev(tqpair->sock, iov, iovec_cnt);
402 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "bytes=%d are out\n", bytes);
403 	if (bytes == -1) {
404 		if (errno == EWOULDBLOCK || errno == EAGAIN) {
405 			return 1;
406 		} else {
407 			SPDK_ERRLOG("spdk_sock_writev() failed, errno %d: %s\n",
408 				    errno, spdk_strerror(errno));
409 			return -1;
410 		}
411 	}
412 
413 	pdu = TAILQ_FIRST(&tqpair->send_queue);
414 
415 	/*
416 	 * Free any PDUs that were fully written.  If a PDU was only
417 	 *  partially written, update its writev_offset so that next
418 	 *  time only the unwritten portion will be sent to writev().
419 	 */
420 	TAILQ_INIT(&completed_pdus_list);
421 	while (bytes > 0) {
422 		pdu_length = pdu->hdr.common.plen - pdu->writev_offset;
423 		assert(pdu_length > 0);
424 		if (bytes >= pdu_length) {
425 			bytes -= pdu_length;
426 			TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
427 			TAILQ_INSERT_TAIL(&completed_pdus_list, pdu, tailq);
428 			pdu = TAILQ_FIRST(&tqpair->send_queue);
429 
430 		} else {
431 			pdu->writev_offset += bytes;
432 			bytes = 0;
433 		}
434 	}
435 
436 	while (!TAILQ_EMPTY(&completed_pdus_list)) {
437 		pdu = TAILQ_FIRST(&completed_pdus_list);
438 		TAILQ_REMOVE(&completed_pdus_list, pdu, tailq);
439 		assert(pdu->cb_fn != NULL);
440 		pdu->cb_fn(pdu->cb_arg);
441 	}
442 
443 	return TAILQ_EMPTY(&tqpair->send_queue) ? 0 : 1;
444 
445 }
446 
447 static int
448 nvme_tcp_qpair_write_pdu(struct nvme_tcp_qpair *tqpair,
449 			 struct nvme_tcp_pdu *pdu,
450 			 nvme_tcp_qpair_xfer_complete_cb cb_fn,
451 			 void *cb_arg)
452 {
453 	int enable_digest;
454 	int hlen;
455 	uint32_t crc32c;
456 
457 	hlen = pdu->hdr.common.hlen;
458 	enable_digest = 1;
459 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ ||
460 	    pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) {
461 		/* this PDU should be sent without digest */
462 		enable_digest = 0;
463 	}
464 
465 	/* Header Digest */
466 	if (enable_digest && tqpair->host_hdgst_enable) {
467 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
468 		MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c);
469 	}
470 
471 	/* Data Digest */
472 	if (pdu->data_len > 0 && enable_digest && tqpair->host_ddgst_enable) {
473 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
474 		MAKE_DIGEST_WORD(pdu->data_digest, crc32c);
475 	}
476 
477 	pdu->cb_fn = cb_fn;
478 	pdu->cb_arg = cb_arg;
479 	TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
480 	return 0;
481 }
482 
483 /*
484  * Build SGL describing contiguous payload buffer.
485  */
486 static int
487 nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
488 {
489 	struct nvme_request *req = tcp_req->req;
490 	tcp_req->buf = req->payload.contig_or_cb_arg + req->payload_offset;
491 
492 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
493 
494 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
495 
496 	return 0;
497 }
498 
499 /*
500  * Build SGL describing scattered payload buffer.
501  */
502 static int
503 nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
504 {
505 	int rc;
506 	uint32_t length;
507 	struct nvme_request *req = tcp_req->req;
508 
509 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
510 
511 	assert(req->payload_size != 0);
512 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
513 	assert(req->payload.reset_sgl_fn != NULL);
514 	assert(req->payload.next_sge_fn != NULL);
515 	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
516 
517 	/* TODO: for now, we only support a single SGL entry */
518 	rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &tcp_req->buf, &length);
519 
520 	if (rc) {
521 		return -1;
522 	}
523 
524 	if (length < req->payload_size) {
525 		SPDK_ERRLOG("multi-element SGL currently not supported for TCP now\n");
526 		return -1;
527 	}
528 
529 	return 0;
530 }
531 
532 static inline uint32_t
533 nvme_tcp_icdsz_bytes(struct spdk_nvme_ctrlr *ctrlr)
534 {
535 	return (ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd));
536 }
537 
538 static int
539 nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
540 		  struct nvme_tcp_req *tcp_req)
541 {
542 	struct spdk_nvme_ctrlr *ctrlr = tqpair->qpair.ctrlr;
543 	int rc = 0;
544 	enum spdk_nvme_data_transfer xfer;
545 	uint32_t max_incapsule_data_size;
546 
547 	tcp_req->req = req;
548 	req->cmd.cid = tcp_req->cid;
549 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
550 	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
551 	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
552 	req->cmd.dptr.sgl1.unkeyed.length = req->payload_size;
553 
554 	if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
555 		rc = nvme_tcp_build_contig_request(tqpair, tcp_req);
556 	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
557 		rc = nvme_tcp_build_sgl_request(tqpair, tcp_req);
558 	} else {
559 		rc = -1;
560 	}
561 
562 	if (rc) {
563 		return rc;
564 	}
565 
566 	if (req->cmd.opc == SPDK_NVME_OPC_FABRIC) {
567 		struct spdk_nvmf_capsule_cmd *nvmf_cmd = (struct spdk_nvmf_capsule_cmd *)&req->cmd;
568 
569 		xfer = spdk_nvme_opc_get_data_transfer(nvmf_cmd->fctype);
570 	} else {
571 		xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
572 	}
573 	if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
574 		max_incapsule_data_size = nvme_tcp_icdsz_bytes(ctrlr);
575 		if ((req->cmd.opc == SPDK_NVME_OPC_FABRIC) || nvme_qpair_is_admin_queue(&tqpair->qpair)) {
576 			max_incapsule_data_size = spdk_min(max_incapsule_data_size, NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE);
577 		}
578 
579 		if (req->payload_size <= max_incapsule_data_size) {
580 			req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
581 			req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
582 			req->cmd.dptr.sgl1.address = 0;
583 			tcp_req->in_capsule_data = true;
584 		}
585 	}
586 
587 	return 0;
588 }
589 
590 static void
591 nvme_tcp_qpair_cmd_send_complete(void *cb_arg)
592 {
593 }
594 
595 static void
596 nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
597 			  struct nvme_tcp_req *tcp_req)
598 {
599 	pdu->data = (void *)((uint64_t)tcp_req->buf + tcp_req->datao);
600 }
601 
602 static int
603 nvme_tcp_qpair_capsule_cmd_send(struct nvme_tcp_qpair *tqpair,
604 				struct nvme_tcp_req *tcp_req)
605 {
606 	struct nvme_tcp_pdu *pdu;
607 	struct spdk_nvme_tcp_cmd *capsule_cmd;
608 	uint32_t plen = 0, alignment;
609 	uint8_t pdo;
610 
611 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
612 	pdu = &tcp_req->send_pdu;
613 
614 	capsule_cmd = &pdu->hdr.capsule_cmd;
615 	capsule_cmd->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
616 	plen = capsule_cmd->common.hlen = sizeof(*capsule_cmd);
617 	capsule_cmd->ccsqe = tcp_req->req->cmd;
618 
619 
620 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "capsule_cmd cid=%u on tqpair(%p)\n", tcp_req->req->cmd.cid, tqpair);
621 
622 	if (tqpair->host_hdgst_enable) {
623 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Header digest is enabled for capsule command on tcp_req=%p\n",
624 			      tcp_req);
625 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
626 		plen += SPDK_NVME_TCP_DIGEST_LEN;
627 	}
628 
629 	if ((tcp_req->req->payload_size == 0) || !tcp_req->in_capsule_data) {
630 		goto end;
631 	}
632 
633 	pdo = plen;
634 	pdu->padding_len = 0;
635 	if (tqpair->cpda) {
636 		alignment = (tqpair->cpda + 1) << 2;
637 		if (alignment > plen) {
638 			pdu->padding_len = alignment - plen;
639 			pdo = alignment;
640 			plen = alignment;
641 		}
642 	}
643 
644 	capsule_cmd->common.pdo = pdo;
645 	plen += tcp_req->req->payload_size;
646 	if (tqpair->host_ddgst_enable) {
647 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
648 		plen += SPDK_NVME_TCP_DIGEST_LEN;
649 	}
650 
651 	tcp_req->datao = 0;
652 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req);
653 	pdu->data_len = tcp_req->req->payload_size;
654 
655 end:
656 	capsule_cmd->common.plen = plen;
657 	return nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_qpair_cmd_send_complete, NULL);
658 
659 }
660 
661 int
662 nvme_tcp_qpair_submit_request(struct spdk_nvme_qpair *qpair,
663 			      struct nvme_request *req)
664 {
665 	struct nvme_tcp_qpair *tqpair;
666 	struct nvme_tcp_req *tcp_req;
667 
668 	tqpair = nvme_tcp_qpair(qpair);
669 	assert(tqpair != NULL);
670 	assert(req != NULL);
671 
672 	tcp_req = nvme_tcp_req_get(tqpair);
673 	if (!tcp_req) {
674 		/*
675 		 * No tcp_req is available.  Queue the request to be processed later.
676 		 */
677 		STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
678 		return 0;
679 	}
680 
681 	if (nvme_tcp_req_init(tqpair, req, tcp_req)) {
682 		SPDK_ERRLOG("nvme_tcp_req_init() failed\n");
683 		nvme_tcp_req_put(tqpair, tcp_req);
684 		return -1;
685 	}
686 
687 	return nvme_tcp_qpair_capsule_cmd_send(tqpair, tcp_req);
688 }
689 
690 int
691 nvme_tcp_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
692 {
693 	return nvme_tcp_qpair_destroy(qpair);
694 }
695 
696 int
697 nvme_tcp_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
698 {
699 	return -1;
700 }
701 
702 int
703 nvme_tcp_qpair_enable(struct spdk_nvme_qpair *qpair)
704 {
705 	return 0;
706 }
707 
708 int
709 nvme_tcp_qpair_disable(struct spdk_nvme_qpair *qpair)
710 {
711 	return 0;
712 }
713 
714 int
715 nvme_tcp_qpair_reset(struct spdk_nvme_qpair *qpair)
716 {
717 	return 0;
718 }
719 
720 static void
721 nvme_tcp_req_complete(struct nvme_request *req,
722 		      struct spdk_nvme_cpl *rsp)
723 {
724 	nvme_complete_request(req, rsp);
725 	nvme_free_request(req);
726 }
727 
728 int
729 nvme_tcp_qpair_fail(struct spdk_nvme_qpair *qpair)
730 {
731 	/*
732 	 * If the qpair is really failed, the connection is broken
733 	 * and we need to flush back all I/O
734 	 */
735 	struct nvme_tcp_req *tcp_req, *tmp;
736 	struct nvme_request *req;
737 	struct spdk_nvme_cpl cpl;
738 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
739 
740 	cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
741 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
742 
743 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
744 		assert(tcp_req->req != NULL);
745 		req = tcp_req->req;
746 
747 		nvme_tcp_req_complete(req, &cpl);
748 		nvme_tcp_req_put(tqpair, tcp_req);
749 	}
750 
751 	return 0;
752 }
753 
754 static void
755 nvme_tcp_qpair_set_recv_state(struct nvme_tcp_qpair *tqpair,
756 			      enum nvme_tcp_pdu_recv_state state)
757 {
758 	if (tqpair->recv_state == state) {
759 		SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n",
760 			    tqpair, state);
761 		return;
762 	}
763 
764 	tqpair->recv_state = state;
765 	switch (state) {
766 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
767 	case NVME_TCP_PDU_RECV_STATE_ERROR:
768 		memset(&tqpair->recv_pdu, 0, sizeof(struct nvme_tcp_pdu));
769 		break;
770 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
771 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
772 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
773 	default:
774 		break;
775 	}
776 }
777 
778 static void
779 nvme_tcp_qpair_send_h2c_term_req_complete(void *cb_arg)
780 {
781 	struct nvme_tcp_qpair *tqpair = cb_arg;
782 
783 	tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
784 }
785 
786 static void
787 nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
788 				 enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset)
789 {
790 	struct nvme_tcp_pdu *rsp_pdu;
791 	struct spdk_nvme_tcp_term_req_hdr *h2c_term_req;
792 	uint32_t h2c_term_req_hdr_len = sizeof(*h2c_term_req);
793 	uint8_t copy_len;
794 
795 	rsp_pdu = &tqpair->send_pdu;
796 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
797 	h2c_term_req = &rsp_pdu->hdr.term_req;
798 	h2c_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
799 	h2c_term_req->common.hlen = h2c_term_req_hdr_len;
800 
801 	if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
802 	    (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
803 		DSET32(&h2c_term_req->fei, error_offset);
804 	}
805 
806 	rsp_pdu->data = (uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len;
807 
808 	copy_len = pdu->hdr.common.hlen;
809 	if (copy_len > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE) {
810 		copy_len = SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE;
811 	}
812 
813 	/* Copy the error info into the buffer */
814 	memcpy((uint8_t *)rsp_pdu->data, pdu->hdr.raw, copy_len);
815 	rsp_pdu->data_len = copy_len;
816 
817 	/* Contain the header len of the wrong received pdu */
818 	h2c_term_req->common.plen = h2c_term_req->common.hlen + copy_len;
819 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
820 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_send_h2c_term_req_complete, NULL);
821 
822 }
823 
824 static void
825 nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
826 {
827 	struct nvme_tcp_pdu *pdu;
828 	uint32_t error_offset = 0;
829 	enum spdk_nvme_tcp_term_req_fes fes;
830 	uint32_t expected_hlen, hd_len = 0;
831 	bool plen_error = false;
832 
833 	pdu = &tqpair->recv_pdu;
834 
835 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "pdu type = %d\n", pdu->hdr.common.pdu_type);
836 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP) {
837 		if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
838 			SPDK_ERRLOG("Already received IC_RESP PDU, and we should reject this pdu=%p\n", pdu);
839 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
840 			goto err;
841 		}
842 		expected_hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
843 		if (pdu->hdr.common.plen != expected_hlen) {
844 			plen_error = true;
845 		}
846 	} else {
847 		if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
848 			SPDK_ERRLOG("The TCP/IP tqpair connection is not negotitated\n");
849 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
850 			goto err;
851 		}
852 
853 		switch (pdu->hdr.common.pdu_type) {
854 		case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
855 			expected_hlen = sizeof(struct spdk_nvme_tcp_rsp);
856 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
857 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
858 			}
859 
860 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
861 				plen_error = true;
862 			}
863 			break;
864 		case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
865 			expected_hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
866 			if (pdu->hdr.common.plen < pdu->hdr.common.pdo) {
867 				plen_error = true;
868 			}
869 			break;
870 		case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
871 			expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
872 			if ((pdu->hdr.common.plen <= expected_hlen) ||
873 			    (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
874 				plen_error = true;
875 			}
876 			break;
877 		case SPDK_NVME_TCP_PDU_TYPE_R2T:
878 			expected_hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
879 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
880 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
881 			}
882 
883 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
884 				plen_error = true;
885 			}
886 			break;
887 
888 		default:
889 			SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
890 			fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
891 			error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
892 			goto err;
893 		}
894 	}
895 
896 	if (pdu->hdr.common.hlen != expected_hlen) {
897 		SPDK_ERRLOG("Expected PDU header length %u, got %u\n",
898 			    expected_hlen, pdu->hdr.common.hlen);
899 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
900 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen);
901 		goto err;
902 
903 	} else if (plen_error) {
904 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
905 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen);
906 		goto err;
907 	} else {
908 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
909 		return;
910 	}
911 err:
912 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
913 }
914 
915 static struct nvme_tcp_req *
916 get_nvme_active_req_by_cid(struct nvme_tcp_qpair *tqpair, uint32_t cid)
917 {
918 	assert(tqpair != NULL);
919 	if ((cid >= tqpair->num_entries) || (tqpair->tcp_reqs[cid].state == NVME_TCP_REQ_FREE)) {
920 		return NULL;
921 	}
922 
923 	return &tqpair->tcp_reqs[cid];
924 }
925 
926 static void
927 nvme_tcp_free_and_handle_queued_req(struct spdk_nvme_qpair *qpair)
928 {
929 	struct nvme_request *req;
930 
931 	if (!STAILQ_EMPTY(&qpair->queued_req) && !qpair->ctrlr->is_resetting) {
932 		req = STAILQ_FIRST(&qpair->queued_req);
933 		STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
934 		nvme_qpair_submit_request(qpair, req);
935 	}
936 }
937 
938 static void
939 nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
940 				 struct nvme_tcp_pdu *pdu, uint32_t *reaped)
941 {
942 	struct nvme_tcp_req *tcp_req;
943 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
944 	struct spdk_nvme_cpl cpl = {};
945 	uint8_t flags;
946 
947 	tcp_req = pdu->ctx;
948 	assert(tcp_req != NULL);
949 
950 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
951 	c2h_data = &pdu->hdr.c2h_data;
952 	tcp_req->datao += pdu->data_len;
953 	flags = c2h_data->common.flags;
954 
955 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
956 	if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) {
957 		if (tcp_req->datao == tcp_req->req->payload_size) {
958 			cpl.status.p = 0;
959 		} else {
960 			cpl.status.p = 1;
961 		}
962 
963 		cpl.cid = tcp_req->cid;
964 		cpl.sqid = tqpair->qpair.id;
965 		nvme_tcp_req_complete(tcp_req->req, &cpl);
966 		nvme_tcp_req_put(tqpair, tcp_req);
967 		(*reaped)++;
968 		nvme_tcp_free_and_handle_queued_req(&tqpair->qpair);
969 	}
970 }
971 
972 static const char *spdk_nvme_tcp_term_req_fes_str[] = {
973 	"Invalid PDU Header Field",
974 	"PDU Sequence Error",
975 	"Header Digest Error",
976 	"Data Transfer Out of Range",
977 	"Data Transfer Limit Exceeded",
978 	"Unsupported parameter",
979 };
980 
981 static void
982 nvme_tcp_c2h_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *c2h_term_req)
983 {
984 	SPDK_ERRLOG("Error info of pdu(%p): %s\n", c2h_term_req,
985 		    spdk_nvme_tcp_term_req_fes_str[c2h_term_req->fes]);
986 	if ((c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
987 	    (c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
988 		SPDK_DEBUGLOG(SPDK_LOG_NVME, "The offset from the start of the PDU header is %u\n",
989 			      DGET32(c2h_term_req->fei));
990 	}
991 	/* we may also need to dump some other info here */
992 }
993 
994 static void
995 nvme_tcp_c2h_term_req_payload_handle(struct nvme_tcp_qpair *tqpair,
996 				     struct nvme_tcp_pdu *pdu)
997 {
998 	nvme_tcp_c2h_term_req_dump(&pdu->hdr.term_req);
999 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1000 }
1001 
1002 static void
1003 nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
1004 			    uint32_t *reaped)
1005 {
1006 	int rc = 0;
1007 	struct nvme_tcp_pdu *pdu;
1008 	uint32_t crc32c, error_offset = 0;
1009 	enum spdk_nvme_tcp_term_req_fes fes;
1010 
1011 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1012 	pdu = &tqpair->recv_pdu;
1013 
1014 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1015 
1016 	/* check data digest if need */
1017 	if (pdu->ddgst_enable) {
1018 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
1019 		rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
1020 		if (rc == 0) {
1021 			SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1022 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1023 			nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1024 			return;
1025 		}
1026 	}
1027 
1028 	switch (pdu->hdr.common.pdu_type) {
1029 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1030 		nvme_tcp_c2h_data_payload_handle(tqpair, pdu, reaped);
1031 		break;
1032 
1033 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1034 		nvme_tcp_c2h_term_req_payload_handle(tqpair, pdu);
1035 		break;
1036 
1037 	default:
1038 		/* The code should not go to here */
1039 		SPDK_ERRLOG("The code should not go to here\n");
1040 		break;
1041 	}
1042 }
1043 
1044 static void
1045 nvme_tcp_send_icreq_complete(void *cb_arg)
1046 {
1047 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Complete the icreq send for tqpair=%p\n",
1048 		      (struct nvme_tcp_qpair *)cb_arg);
1049 }
1050 
1051 static void
1052 nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair,
1053 		       struct nvme_tcp_pdu *pdu)
1054 {
1055 	struct spdk_nvme_tcp_ic_resp *ic_resp = &pdu->hdr.ic_resp;
1056 	uint32_t error_offset = 0;
1057 	enum spdk_nvme_tcp_term_req_fes fes;
1058 
1059 	/* Only PFV 0 is defined currently */
1060 	if (ic_resp->pfv != 0) {
1061 		SPDK_ERRLOG("Expected ICResp PFV %u, got %u\n", 0u, ic_resp->pfv);
1062 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1063 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, pfv);
1064 		goto end;
1065 	}
1066 
1067 	if (ic_resp->maxh2cdata < NVME_TCP_PDU_H2C_MIN_DATA_SIZE) {
1068 		SPDK_ERRLOG("Expected ICResp maxh2cdata >=%u, got %u\n", NVME_TCP_PDU_H2C_MIN_DATA_SIZE,
1069 			    ic_resp->maxh2cdata);
1070 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1071 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, maxh2cdata);
1072 		goto end;
1073 	}
1074 	tqpair->maxh2cdata = ic_resp->maxh2cdata;
1075 
1076 	if (ic_resp->cpda > SPDK_NVME_TCP_CPDA_MAX) {
1077 		SPDK_ERRLOG("Expected ICResp cpda <=%u, got %u\n", SPDK_NVME_TCP_CPDA_MAX, ic_resp->cpda);
1078 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1079 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, cpda);
1080 		goto end;
1081 	}
1082 	tqpair->cpda = ic_resp->cpda;
1083 
1084 	tqpair->host_hdgst_enable = ic_resp->dgst.bits.hdgst_enable ? true : false;
1085 	tqpair->host_ddgst_enable = ic_resp->dgst.bits.ddgst_enable ? true : false;
1086 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable);
1087 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable);
1088 
1089 	tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
1090 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1091 	return;
1092 end:
1093 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1094 	return;
1095 }
1096 
1097 static void
1098 nvme_tcp_capsule_resp_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1099 				 uint32_t *reaped)
1100 {
1101 	struct nvme_tcp_req *tcp_req;
1102 	struct spdk_nvme_tcp_rsp *capsule_resp = &pdu->hdr.capsule_resp;
1103 	uint32_t cid, error_offset = 0;
1104 	enum spdk_nvme_tcp_term_req_fes fes;
1105 	struct spdk_nvme_cpl cpl;
1106 
1107 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1108 	cpl = capsule_resp->rccqe;
1109 	cid = cpl.cid;
1110 
1111 	/* Recv the pdu again */
1112 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1113 
1114 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1115 	if (!tcp_req) {
1116 		SPDK_ERRLOG("no tcp_req is found with cid=%u for tqpair=%p\n", cid, tqpair);
1117 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1118 		error_offset = offsetof(struct spdk_nvme_tcp_rsp, rccqe);
1119 		goto end;
1120 
1121 	}
1122 
1123 	assert(tcp_req->req != NULL);
1124 	assert(tcp_req->state == NVME_TCP_REQ_ACTIVE);
1125 	nvme_tcp_req_complete(tcp_req->req, &cpl);
1126 	nvme_tcp_req_put(tqpair, tcp_req);
1127 	(*reaped)++;
1128 	nvme_tcp_free_and_handle_queued_req(&tqpair->qpair);
1129 
1130 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "complete tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);
1131 
1132 	return;
1133 
1134 end:
1135 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1136 	return;
1137 }
1138 
1139 static void
1140 nvme_tcp_c2h_term_req_hdr_handle(struct nvme_tcp_qpair *tqpair,
1141 				 struct nvme_tcp_pdu *pdu)
1142 {
1143 	struct spdk_nvme_tcp_term_req_hdr *c2h_term_req = &pdu->hdr.term_req;
1144 	uint32_t error_offset = 0;
1145 	enum spdk_nvme_tcp_term_req_fes fes;
1146 
1147 
1148 	if (c2h_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) {
1149 		SPDK_ERRLOG("Fatal Error Stauts(FES) is unknown for c2h_term_req pdu=%p\n", pdu);
1150 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1151 		error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes);
1152 		goto end;
1153 	}
1154 
1155 	/* set the data buffer */
1156 	pdu->data = (uint8_t *)pdu->hdr.raw + c2h_term_req->common.hlen;
1157 	pdu->data_len = c2h_term_req->common.plen - c2h_term_req->common.hlen;
1158 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1159 	return;
1160 end:
1161 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1162 	return;
1163 }
1164 
1165 static void
1166 nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1167 {
1168 	struct nvme_tcp_req *tcp_req;
1169 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data = &pdu->hdr.c2h_data;
1170 	uint32_t error_offset = 0;
1171 	enum spdk_nvme_tcp_term_req_fes fes;
1172 
1173 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1174 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "c2h_data info on tqpair(%p): datao=%u, datal=%u, cccid=%d\n",
1175 		      tqpair, c2h_data->datao, c2h_data->datal, c2h_data->cccid);
1176 	tcp_req = get_nvme_active_req_by_cid(tqpair, c2h_data->cccid);
1177 	if (!tcp_req) {
1178 		SPDK_ERRLOG("no tcp_req found for c2hdata cid=%d\n", c2h_data->cccid);
1179 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1180 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, cccid);
1181 		goto end;
1182 
1183 	}
1184 
1185 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "tcp_req(%p) on tqpair(%p): datao=%u, payload_size=%u\n",
1186 		      tcp_req, tqpair, tcp_req->datao, tcp_req->req->payload_size);
1187 
1188 	if (c2h_data->datal > tcp_req->req->payload_size) {
1189 		SPDK_ERRLOG("Invalid datal for tcp_req(%p), datal(%u) exceeds payload_size(%u)\n",
1190 			    tcp_req, c2h_data->datal, tcp_req->req->payload_size);
1191 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1192 		goto end;
1193 	}
1194 
1195 	if (tcp_req->datao != c2h_data->datao) {
1196 		SPDK_ERRLOG("Invalid datao for tcp_req(%p), received datal(%u) != datao(%u) in tcp_req\n",
1197 			    tcp_req, c2h_data->datao, tcp_req->datao);
1198 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1199 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datao);
1200 		goto end;
1201 	}
1202 
1203 	if ((c2h_data->datao + c2h_data->datal) > tcp_req->req->payload_size) {
1204 		SPDK_ERRLOG("Invalid data range for tcp_req(%p), received (datao(%u) + datal(%u)) > datao(%u) in tcp_req\n",
1205 			    tcp_req, c2h_data->datao, c2h_data->datal, tcp_req->req->payload_size);
1206 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1207 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datal);
1208 		goto end;
1209 
1210 	}
1211 
1212 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req);
1213 	pdu->data_len = c2h_data->datal;
1214 	pdu->ctx = tcp_req;
1215 
1216 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1217 	return;
1218 
1219 end:
1220 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1221 	return;
1222 }
1223 
1224 static void
1225 nvme_tcp_qpair_h2c_data_send_complete(void *cb_arg)
1226 {
1227 	struct nvme_tcp_req *tcp_req = cb_arg;
1228 
1229 	assert(tcp_req != NULL);
1230 
1231 	if (tcp_req->r2tl_remain) {
1232 		spdk_nvme_tcp_send_h2c_data(tcp_req);
1233 	}
1234 }
1235 
1236 static void
1237 spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req)
1238 {
1239 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(tcp_req->req->qpair);
1240 	struct nvme_tcp_pdu *rsp_pdu;
1241 	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
1242 	uint32_t plen, pdo, alignment;
1243 
1244 	rsp_pdu = &tcp_req->send_pdu;
1245 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
1246 	nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req);
1247 	h2c_data = &rsp_pdu->hdr.h2c_data;
1248 
1249 	h2c_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
1250 	plen = h2c_data->common.hlen = sizeof(*h2c_data);
1251 	h2c_data->cccid = tcp_req->cid;
1252 	h2c_data->ttag = tcp_req->ttag;
1253 	h2c_data->datao = tcp_req->datao;
1254 
1255 	h2c_data->datal = spdk_min(tcp_req->r2tl_remain, tqpair->maxh2cdata);
1256 	rsp_pdu->data_len = h2c_data->datal;
1257 	tcp_req->r2tl_remain -= h2c_data->datal;
1258 
1259 	if (tqpair->host_hdgst_enable) {
1260 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1261 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1262 	}
1263 
1264 	rsp_pdu->padding_len = 0;
1265 	pdo = plen;
1266 	if (tqpair->cpda) {
1267 		alignment = (tqpair->cpda + 1) << 2;
1268 		if (alignment > plen) {
1269 			rsp_pdu->padding_len = alignment - plen;
1270 			pdo = plen = alignment;
1271 		}
1272 	}
1273 
1274 	h2c_data->common.pdo = pdo;
1275 	plen += h2c_data->datal;
1276 	if (tqpair->host_ddgst_enable) {
1277 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
1278 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1279 	}
1280 
1281 	h2c_data->common.plen = plen;
1282 	tcp_req->datao += h2c_data->datal;
1283 	if (!tcp_req->r2tl_remain) {
1284 		tqpair->pending_r2t--;
1285 		assert(tqpair->pending_r2t >= 0);
1286 		tcp_req->state = NVME_TCP_REQ_ACTIVE;
1287 		h2c_data->common.flags |= SPDK_NVME_TCP_H2C_DATA_FLAGS_LAST_PDU;
1288 	}
1289 
1290 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "h2c_data info: datao=%u, datal=%u, pdu_len=%u for tqpair=%p\n",
1291 		      h2c_data->datao, h2c_data->datal, h2c_data->common.plen, tqpair);
1292 
1293 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_h2c_data_send_complete, tcp_req);
1294 }
1295 
1296 static void
1297 nvme_tcp_r2t_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1298 {
1299 	struct nvme_tcp_req *tcp_req;
1300 	struct spdk_nvme_tcp_r2t_hdr *r2t = &pdu->hdr.r2t;
1301 	uint32_t cid, error_offset = 0;
1302 	enum spdk_nvme_tcp_term_req_fes fes;
1303 
1304 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
1305 	cid = r2t->cccid;
1306 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1307 	if (!tcp_req) {
1308 		SPDK_ERRLOG("Cannot find tcp_req for tqpair=%p\n", tqpair);
1309 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1310 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, cccid);
1311 		goto end;
1312 	}
1313 
1314 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "r2t info: r2to=%u, r2tl=%u for tqpair=%p\n", r2t->r2to, r2t->r2tl,
1315 		      tqpair);
1316 
1317 	if (tcp_req->state != NVME_TCP_REQ_ACTIVE_R2T) {
1318 		if (tqpair->pending_r2t >= tqpair->max_r2t) {
1319 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
1320 			SPDK_ERRLOG("Invalid R2T: it exceeds the R2T maixmal=%u for tqpair=%p\n", tqpair->max_r2t, tqpair);
1321 			goto end;
1322 		}
1323 		tcp_req->state = NVME_TCP_REQ_ACTIVE_R2T;
1324 		tqpair->pending_r2t++;
1325 	}
1326 
1327 	if (tcp_req->datao != r2t->r2to) {
1328 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1329 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2to);
1330 		goto end;
1331 
1332 	}
1333 
1334 	if ((r2t->r2tl + r2t->r2to) > tcp_req->req->payload_size) {
1335 		SPDK_ERRLOG("Invalid R2T info for tcp_req=%p: (r2to(%u) + r2tl(%u)) exceeds payload_size(%u)\n",
1336 			    tcp_req, r2t->r2to, r2t->r2tl, tqpair->maxh2cdata);
1337 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1338 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2tl);
1339 		goto end;
1340 
1341 	}
1342 
1343 	tcp_req->ttag = r2t->ttag;
1344 	tcp_req->r2tl_remain = r2t->r2tl;
1345 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1346 
1347 	spdk_nvme_tcp_send_h2c_data(tcp_req);
1348 	return;
1349 
1350 end:
1351 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1352 	return;
1353 
1354 }
1355 
1356 static void
1357 nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1358 {
1359 	struct nvme_tcp_pdu *pdu;
1360 	int rc;
1361 	uint32_t crc32c, error_offset = 0;
1362 	enum spdk_nvme_tcp_term_req_fes fes;
1363 
1364 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1365 	pdu = &tqpair->recv_pdu;
1366 
1367 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter: pdu type =%u\n", pdu->hdr.common.pdu_type);
1368 	/* check header digest if needed */
1369 	if (pdu->has_hdgst) {
1370 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
1371 		rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
1372 		if (rc == 0) {
1373 			SPDK_ERRLOG("header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1374 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1375 			nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1376 			return;
1377 
1378 		}
1379 	}
1380 
1381 	switch (pdu->hdr.common.pdu_type) {
1382 	case SPDK_NVME_TCP_PDU_TYPE_IC_RESP:
1383 		nvme_tcp_icresp_handle(tqpair, pdu);
1384 		break;
1385 	case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
1386 		nvme_tcp_capsule_resp_hdr_handle(tqpair, pdu, reaped);
1387 		break;
1388 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1389 		nvme_tcp_c2h_data_hdr_handle(tqpair, pdu);
1390 		break;
1391 
1392 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1393 		nvme_tcp_c2h_term_req_hdr_handle(tqpair, pdu);
1394 		break;
1395 	case SPDK_NVME_TCP_PDU_TYPE_R2T:
1396 		nvme_tcp_r2t_hdr_handle(tqpair, pdu);
1397 		break;
1398 
1399 	default:
1400 		SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
1401 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1402 		error_offset = 1;
1403 		nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1404 		break;
1405 	}
1406 
1407 }
1408 
1409 static int
1410 nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1411 {
1412 	int rc = 0;
1413 	struct nvme_tcp_pdu *pdu;
1414 	uint32_t data_len;
1415 	uint8_t psh_len, pdo;
1416 	int8_t padding_len;
1417 	enum nvme_tcp_pdu_recv_state prev_state;
1418 
1419 	/* The loop here is to allow for several back-to-back state changes. */
1420 	do {
1421 		prev_state = tqpair->recv_state;
1422 		switch (tqpair->recv_state) {
1423 		/* If in a new state */
1424 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
1425 			nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1426 			break;
1427 		/* common header */
1428 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
1429 			pdu = &tqpair->recv_pdu;
1430 			if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
1431 				rc = nvme_tcp_read_data(tqpair->sock,
1432 							sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
1433 							(uint8_t *)&pdu->hdr.common + pdu->ch_valid_bytes);
1434 				if (rc < 0) {
1435 					nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1436 					break;
1437 				}
1438 				pdu->ch_valid_bytes += rc;
1439 				if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
1440 					return NVME_TCP_PDU_IN_PROGRESS;
1441 				}
1442 			}
1443 
1444 			/* The command header of this PDU has now been read from the socket. */
1445 			nvme_tcp_pdu_ch_handle(tqpair);
1446 			break;
1447 		/* Wait for the pdu specific header  */
1448 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
1449 			pdu = &tqpair->recv_pdu;
1450 			psh_len = pdu->hdr.common.hlen;
1451 
1452 			/* The following pdus can have digest  */
1453 			if (((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP) ||
1454 			     (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) ||
1455 			     (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_R2T)) &&
1456 			    tqpair->host_hdgst_enable) {
1457 				pdu->has_hdgst = true;
1458 				psh_len += SPDK_NVME_TCP_DIGEST_LEN;
1459 				if (pdu->hdr.common.plen > psh_len) {
1460 					pdo = pdu->hdr.common.pdo;
1461 					padding_len = pdo - psh_len;
1462 					SPDK_DEBUGLOG(SPDK_LOG_NVME, "padding length is =%d for pdu=%p on tqpair=%p\n", padding_len,
1463 						      pdu, tqpair);
1464 					if (padding_len > 0) {
1465 						psh_len = pdo;
1466 					}
1467 				}
1468 			}
1469 
1470 			psh_len -= sizeof(struct spdk_nvme_tcp_common_pdu_hdr);
1471 			/* The following will read psh + hdgest (if possbile) + padding (if posssible) */
1472 			if (pdu->psh_valid_bytes < psh_len) {
1473 				rc = nvme_tcp_read_data(tqpair->sock,
1474 							psh_len - pdu->psh_valid_bytes,
1475 							(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
1476 				if (rc < 0) {
1477 					nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1478 					break;
1479 				}
1480 
1481 				pdu->psh_valid_bytes += rc;
1482 				if (pdu->psh_valid_bytes < psh_len) {
1483 					return NVME_TCP_PDU_IN_PROGRESS;
1484 				}
1485 			}
1486 
1487 			/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
1488 			nvme_tcp_pdu_psh_handle(tqpair, reaped);
1489 			break;
1490 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
1491 			pdu = &tqpair->recv_pdu;
1492 			/* check whether the data is valid, if not we just return */
1493 			if (!pdu->data) {
1494 				return NVME_TCP_PDU_IN_PROGRESS;
1495 			}
1496 
1497 			data_len = pdu->data_len;
1498 			/* data digest */
1499 			if (spdk_unlikely((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
1500 					  tqpair->host_ddgst_enable)) {
1501 				data_len += SPDK_NVME_TCP_DIGEST_LEN;
1502 				pdu->ddgst_enable = true;
1503 
1504 			}
1505 
1506 			rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
1507 			if (rc < 0) {
1508 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1509 				break;
1510 			}
1511 
1512 			pdu->readv_offset += rc;
1513 			if (pdu->readv_offset < data_len) {
1514 				return NVME_TCP_PDU_IN_PROGRESS;
1515 			}
1516 
1517 			assert(pdu->readv_offset == data_len);
1518 			/* All of this PDU has now been read from the socket. */
1519 			nvme_tcp_pdu_payload_handle(tqpair, reaped);
1520 			break;
1521 		case NVME_TCP_PDU_RECV_STATE_ERROR:
1522 			rc = NVME_TCP_PDU_FATAL;
1523 			break;
1524 		default:
1525 			assert(0);
1526 			break;
1527 		}
1528 	} while (prev_state != tqpair->recv_state);
1529 
1530 	return rc;
1531 }
1532 
1533 static void
1534 nvme_tcp_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
1535 {
1536 	uint64_t t02;
1537 	struct nvme_tcp_req *tcp_req, *tmp;
1538 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
1539 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
1540 	struct spdk_nvme_ctrlr_process *active_proc;
1541 
1542 	/* Don't check timeouts during controller initialization. */
1543 	if (ctrlr->state != NVME_CTRLR_STATE_READY) {
1544 		return;
1545 	}
1546 
1547 	if (nvme_qpair_is_admin_queue(qpair)) {
1548 		active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr);
1549 	} else {
1550 		active_proc = qpair->active_proc;
1551 	}
1552 
1553 	/* Only check timeouts if the current process has a timeout callback. */
1554 	if (active_proc == NULL || active_proc->timeout_cb_fn == NULL) {
1555 		return;
1556 	}
1557 
1558 	t02 = spdk_get_ticks();
1559 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
1560 		assert(tcp_req->req != NULL);
1561 
1562 		if (nvme_request_check_timeout(tcp_req->req, tcp_req->cid, active_proc, t02)) {
1563 			/*
1564 			 * The requests are in order, so as soon as one has not timed out,
1565 			 * stop iterating.
1566 			 */
1567 			break;
1568 		}
1569 	}
1570 }
1571 
1572 int
1573 nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
1574 {
1575 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
1576 	uint32_t reaped;
1577 	int rc;
1578 
1579 	rc = nvme_tcp_qpair_process_send_queue(tqpair);
1580 	if (rc) {
1581 		return 0;
1582 	}
1583 
1584 	if (max_completions == 0) {
1585 		max_completions = tqpair->num_entries;
1586 	} else {
1587 		max_completions = spdk_min(max_completions, tqpair->num_entries);
1588 	}
1589 
1590 	reaped = 0;
1591 	do {
1592 		rc = nvme_tcp_read_pdu(tqpair, &reaped);
1593 		if (rc < 0) {
1594 			SPDK_ERRLOG("Error polling CQ! (%d): %s\n",
1595 				    errno, spdk_strerror(errno));
1596 			return -1;
1597 		} else if (rc == 0) {
1598 			/* Partial PDU is read */
1599 			break;
1600 		}
1601 
1602 	} while (reaped < max_completions);
1603 
1604 	if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
1605 		nvme_tcp_qpair_check_timeout(qpair);
1606 	}
1607 
1608 	return reaped;
1609 }
1610 
1611 static int
1612 nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
1613 {
1614 	struct spdk_nvme_tcp_ic_req *ic_req;
1615 	struct nvme_tcp_pdu *pdu;
1616 
1617 	pdu = &tqpair->send_pdu;
1618 	memset(&tqpair->send_pdu, 0, sizeof(tqpair->send_pdu));
1619 	ic_req = &pdu->hdr.ic_req;
1620 
1621 	ic_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
1622 	ic_req->common.hlen = ic_req->common.plen = sizeof(*ic_req);
1623 	ic_req->pfv = 0;
1624 	ic_req->maxr2t = NVME_TCP_MAX_R2T_DEFAULT - 1;
1625 	ic_req->hpda = NVME_TCP_HPDA_DEFAULT;
1626 
1627 	ic_req->dgst.bits.hdgst_enable = tqpair->qpair.ctrlr->opts.header_digest;
1628 	ic_req->dgst.bits.ddgst_enable = tqpair->qpair.ctrlr->opts.data_digest;
1629 
1630 	nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_send_icreq_complete, tqpair);
1631 
1632 	while (tqpair->state == NVME_TCP_QPAIR_STATE_INVALID) {
1633 		nvme_tcp_qpair_process_completions(&tqpair->qpair, 0);
1634 	}
1635 
1636 	if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
1637 		SPDK_ERRLOG("Failed to construct the tqpair=%p via correct icresp\n", tqpair);
1638 		return -1;
1639 	}
1640 
1641 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "Succesfully construct the tqpair=%p via correct icresp\n", tqpair);
1642 
1643 	return 0;
1644 }
1645 
1646 static int
1647 nvme_tcp_qpair_connect(struct nvme_tcp_qpair *tqpair)
1648 {
1649 	struct sockaddr_storage dst_addr;
1650 	struct sockaddr_storage src_addr;
1651 	int rc;
1652 	struct spdk_nvme_ctrlr *ctrlr;
1653 	int family;
1654 	long int port;
1655 
1656 	ctrlr = tqpair->qpair.ctrlr;
1657 
1658 	switch (ctrlr->trid.adrfam) {
1659 	case SPDK_NVMF_ADRFAM_IPV4:
1660 		family = AF_INET;
1661 		break;
1662 	case SPDK_NVMF_ADRFAM_IPV6:
1663 		family = AF_INET6;
1664 		break;
1665 	default:
1666 		SPDK_ERRLOG("Unhandled ADRFAM %d\n", ctrlr->trid.adrfam);
1667 		return -1;
1668 	}
1669 
1670 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "adrfam %d ai_family %d\n", ctrlr->trid.adrfam, family);
1671 
1672 	memset(&dst_addr, 0, sizeof(dst_addr));
1673 
1674 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "trsvcid is %s\n", ctrlr->trid.trsvcid);
1675 	rc = nvme_tcp_parse_addr(&dst_addr, family, ctrlr->trid.traddr, ctrlr->trid.trsvcid);
1676 	if (rc != 0) {
1677 		SPDK_ERRLOG("dst_addr nvme_tcp_parse_addr() failed\n");
1678 		return -1;
1679 	}
1680 
1681 	if (ctrlr->opts.src_addr[0] || ctrlr->opts.src_svcid[0]) {
1682 		memset(&src_addr, 0, sizeof(src_addr));
1683 		rc = nvme_tcp_parse_addr(&src_addr, family, ctrlr->opts.src_addr, ctrlr->opts.src_svcid);
1684 		if (rc != 0) {
1685 			SPDK_ERRLOG("src_addr nvme_tcp_parse_addr() failed\n");
1686 			return -1;
1687 		}
1688 	}
1689 
1690 	port = spdk_strtol(ctrlr->trid.trsvcid, 10);
1691 	if (port <= 0 || port >= INT_MAX) {
1692 		SPDK_ERRLOG("Invalid port: %s\n", ctrlr->trid.trsvcid);
1693 		return -1;
1694 	}
1695 
1696 	tqpair->sock = spdk_sock_connect(ctrlr->trid.traddr, port);
1697 	if (!tqpair->sock) {
1698 		SPDK_ERRLOG("sock connection error of tqpair=%p with addr=%s, port=%ld\n",
1699 			    tqpair, ctrlr->trid.traddr, port);
1700 		return -1;
1701 	}
1702 
1703 	tqpair->max_r2t = NVME_TCP_MAX_R2T_DEFAULT;
1704 	rc = nvme_tcp_alloc_reqs(tqpair);
1705 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "rc =%d\n", rc);
1706 	if (rc) {
1707 		SPDK_ERRLOG("Unable to allocate tqpair tcp requests\n");
1708 		return -1;
1709 	}
1710 	SPDK_DEBUGLOG(SPDK_LOG_NVME, "TCP requests allocated\n");
1711 
1712 	rc = nvme_tcp_qpair_icreq_send(tqpair);
1713 	if (rc != 0) {
1714 		SPDK_ERRLOG("Unable to connect the tqpair\n");
1715 		return -1;
1716 	}
1717 
1718 	rc = nvme_fabric_qpair_connect(&tqpair->qpair, tqpair->num_entries);
1719 	if (rc < 0) {
1720 		SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
1721 		return -1;
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 static struct spdk_nvme_qpair *
1728 nvme_tcp_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
1729 			    uint16_t qid, uint32_t qsize,
1730 			    enum spdk_nvme_qprio qprio,
1731 			    uint32_t num_requests)
1732 {
1733 	struct nvme_tcp_qpair *tqpair;
1734 	struct spdk_nvme_qpair *qpair;
1735 	int rc;
1736 
1737 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
1738 	if (!tqpair) {
1739 		SPDK_ERRLOG("failed to get create tqpair\n");
1740 		return NULL;
1741 	}
1742 
1743 	tqpair->num_entries = qsize;
1744 	qpair = &tqpair->qpair;
1745 
1746 	rc = nvme_qpair_init(qpair, qid, ctrlr, qprio, num_requests);
1747 	if (rc != 0) {
1748 		free(tqpair);
1749 		return NULL;
1750 	}
1751 
1752 	rc = nvme_tcp_qpair_connect(tqpair);
1753 	if (rc < 0) {
1754 		nvme_tcp_qpair_destroy(qpair);
1755 		return NULL;
1756 	}
1757 
1758 	return qpair;
1759 }
1760 
1761 struct spdk_nvme_qpair *
1762 nvme_tcp_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
1763 			       const struct spdk_nvme_io_qpair_opts *opts)
1764 {
1765 	return nvme_tcp_ctrlr_create_qpair(ctrlr, qid, opts->io_queue_size, opts->qprio,
1766 					   opts->io_queue_requests);
1767 }
1768 
1769 struct spdk_nvme_ctrlr *nvme_tcp_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1770 		const struct spdk_nvme_ctrlr_opts *opts,
1771 		void *devhandle)
1772 {
1773 	struct nvme_tcp_ctrlr *tctrlr;
1774 	union spdk_nvme_cap_register cap;
1775 	union spdk_nvme_vs_register vs;
1776 	int rc;
1777 
1778 	tctrlr = calloc(1, sizeof(*tctrlr));
1779 	if (tctrlr == NULL) {
1780 		SPDK_ERRLOG("could not allocate ctrlr\n");
1781 		return NULL;
1782 	}
1783 
1784 	tctrlr->ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
1785 	tctrlr->ctrlr.opts = *opts;
1786 	tctrlr->ctrlr.trid = *trid;
1787 
1788 	rc = nvme_ctrlr_construct(&tctrlr->ctrlr);
1789 	if (rc != 0) {
1790 		free(tctrlr);
1791 		return NULL;
1792 	}
1793 
1794 	tctrlr->ctrlr.adminq = nvme_tcp_ctrlr_create_qpair(&tctrlr->ctrlr, 0,
1795 			       SPDK_NVMF_MIN_ADMIN_QUEUE_ENTRIES, 0, SPDK_NVMF_MIN_ADMIN_QUEUE_ENTRIES);
1796 	if (!tctrlr->ctrlr.adminq) {
1797 		SPDK_ERRLOG("failed to create admin qpair\n");
1798 		nvme_tcp_ctrlr_destruct(&tctrlr->ctrlr);
1799 		return NULL;
1800 	}
1801 
1802 	if (nvme_ctrlr_get_cap(&tctrlr->ctrlr, &cap)) {
1803 		SPDK_ERRLOG("get_cap() failed\n");
1804 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
1805 		return NULL;
1806 	}
1807 
1808 	if (nvme_ctrlr_get_vs(&tctrlr->ctrlr, &vs)) {
1809 		SPDK_ERRLOG("get_vs() failed\n");
1810 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
1811 		return NULL;
1812 	}
1813 
1814 	if (nvme_ctrlr_add_process(&tctrlr->ctrlr, 0) != 0) {
1815 		SPDK_ERRLOG("nvme_ctrlr_add_process() failed\n");
1816 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
1817 		return NULL;
1818 	}
1819 
1820 	nvme_ctrlr_init_cap(&tctrlr->ctrlr, &cap, &vs);
1821 
1822 	return &tctrlr->ctrlr;
1823 }
1824 
1825 uint32_t
1826 nvme_tcp_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
1827 {
1828 	return NVME_TCP_RW_BUFFER_SIZE;
1829 }
1830 
1831 uint16_t
1832 nvme_tcp_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
1833 {
1834 	/*
1835 	 * We do not support >1 SGE in the initiator currently,
1836 	 *  so we can only return 1 here.  Once that support is
1837 	 *  added, this should return ctrlr->cdata.nvmf_specific.msdbd
1838 	 *  instead.
1839 	 */
1840 	return 1;
1841 }
1842 
1843 void *
1844 nvme_tcp_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
1845 {
1846 	return NULL;
1847 }
1848 
1849 int
1850 nvme_tcp_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
1851 {
1852 	return 0;
1853 }
1854