xref: /spdk/lib/nvme/nvme_tcp.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 /*
8  * NVMe/TCP transport
9  */
10 
11 #include "nvme_internal.h"
12 
13 #include "spdk/endian.h"
14 #include "spdk/likely.h"
15 #include "spdk/string.h"
16 #include "spdk/stdinc.h"
17 #include "spdk/crc32.h"
18 #include "spdk/endian.h"
19 #include "spdk/assert.h"
20 #include "spdk/string.h"
21 #include "spdk/trace.h"
22 #include "spdk/util.h"
23 #include "spdk/nvmf.h"
24 
25 #include "spdk_internal/nvme_tcp.h"
26 #include "spdk_internal/trace_defs.h"
27 
28 #define NVME_TCP_RW_BUFFER_SIZE 131072
29 
30 /* For async connect workloads, allow more time since we are more likely
31  * to be processing lots ICREQs at once.
32  */
33 #define ICREQ_TIMEOUT_SYNC 2 /* in seconds */
34 #define ICREQ_TIMEOUT_ASYNC 10 /* in seconds */
35 
36 #define NVME_TCP_HPDA_DEFAULT			0
37 #define NVME_TCP_MAX_R2T_DEFAULT		1
38 #define NVME_TCP_PDU_H2C_MIN_DATA_SIZE		4096
39 
40 /*
41  * Maximum value of transport_ack_timeout used by TCP controller
42  */
43 #define NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT	31
44 
45 
46 /* NVMe TCP transport extensions for spdk_nvme_ctrlr */
47 struct nvme_tcp_ctrlr {
48 	struct spdk_nvme_ctrlr			ctrlr;
49 	char					psk_identity[NVMF_PSK_IDENTITY_LEN];
50 	uint8_t					psk[SPDK_TLS_PSK_MAX_LEN];
51 	int					psk_size;
52 	char					*tls_cipher_suite;
53 };
54 
55 struct nvme_tcp_poll_group {
56 	struct spdk_nvme_transport_poll_group group;
57 	struct spdk_sock_group *sock_group;
58 	uint32_t completions_per_qpair;
59 	int64_t num_completions;
60 
61 	TAILQ_HEAD(, nvme_tcp_qpair) needs_poll;
62 	struct spdk_nvme_tcp_stat stats;
63 };
64 
65 /* NVMe TCP qpair extensions for spdk_nvme_qpair */
66 struct nvme_tcp_qpair {
67 	struct spdk_nvme_qpair			qpair;
68 	struct spdk_sock			*sock;
69 
70 	TAILQ_HEAD(, nvme_tcp_req)		free_reqs;
71 	TAILQ_HEAD(, nvme_tcp_req)		outstanding_reqs;
72 
73 	TAILQ_HEAD(, nvme_tcp_pdu)		send_queue;
74 	struct nvme_tcp_pdu			*recv_pdu;
75 	struct nvme_tcp_pdu			*send_pdu; /* only for error pdu and init pdu */
76 	struct nvme_tcp_pdu			*send_pdus; /* Used by tcp_reqs */
77 	enum nvme_tcp_pdu_recv_state		recv_state;
78 	struct nvme_tcp_req			*tcp_reqs;
79 	struct spdk_nvme_tcp_stat		*stats;
80 
81 	uint16_t				num_entries;
82 	uint16_t				async_complete;
83 
84 	struct {
85 		uint16_t host_hdgst_enable: 1;
86 		uint16_t host_ddgst_enable: 1;
87 		uint16_t icreq_send_ack: 1;
88 		uint16_t in_connect_poll: 1;
89 		uint16_t reserved: 12;
90 	} flags;
91 
92 	/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
93 	uint32_t				maxh2cdata;
94 
95 	uint32_t				maxr2t;
96 
97 	/* 0 based value, which is used to guide the padding */
98 	uint8_t					cpda;
99 
100 	enum nvme_tcp_qpair_state		state;
101 
102 	TAILQ_ENTRY(nvme_tcp_qpair)		link;
103 	bool					needs_poll;
104 
105 	uint64_t				icreq_timeout_tsc;
106 
107 	bool					shared_stats;
108 };
109 
110 enum nvme_tcp_req_state {
111 	NVME_TCP_REQ_FREE,
112 	NVME_TCP_REQ_ACTIVE,
113 	NVME_TCP_REQ_ACTIVE_R2T,
114 };
115 
116 struct nvme_tcp_req {
117 	struct nvme_request			*req;
118 	enum nvme_tcp_req_state			state;
119 	uint16_t				cid;
120 	uint16_t				ttag;
121 	uint32_t				datao;
122 	uint32_t				expected_datao;
123 	uint32_t				r2tl_remain;
124 	uint32_t				active_r2ts;
125 	/* Used to hold a value received from subsequent R2T while we are still
126 	 * waiting for H2C complete */
127 	uint16_t				ttag_r2t_next;
128 	bool					in_capsule_data;
129 	/* It is used to track whether the req can be safely freed */
130 	union {
131 		uint8_t raw;
132 		struct {
133 			/* The last send operation completed - kernel released send buffer */
134 			uint8_t				send_ack : 1;
135 			/* Data transfer completed - target send resp or last data bit */
136 			uint8_t				data_recv : 1;
137 			/* tcp_req is waiting for completion of the previous send operation (buffer reclaim notification
138 			 * from kernel) to send H2C */
139 			uint8_t				h2c_send_waiting_ack : 1;
140 			/* tcp_req received subsequent r2t while it is still waiting for send_ack.
141 			 * Rare case, actual when dealing with target that can send several R2T requests.
142 			 * SPDK TCP target sends 1 R2T for the whole data buffer */
143 			uint8_t				r2t_waiting_h2c_complete : 1;
144 			uint8_t				reserved : 4;
145 		} bits;
146 	} ordering;
147 	struct nvme_tcp_pdu			*pdu;
148 	struct iovec				iov[NVME_TCP_MAX_SGL_DESCRIPTORS];
149 	uint32_t				iovcnt;
150 	/* Used to hold a value received from subsequent R2T while we are still
151 	 * waiting for H2C ack */
152 	uint32_t				r2tl_remain_next;
153 	struct nvme_tcp_qpair			*tqpair;
154 	TAILQ_ENTRY(nvme_tcp_req)		link;
155 	struct spdk_nvme_cpl			rsp;
156 };
157 
158 static struct spdk_nvme_tcp_stat g_dummy_stats = {};
159 
160 static void nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req);
161 static int64_t nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group
162 		*tgroup, uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
163 static void nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu);
164 static void nvme_tcp_req_complete(struct nvme_tcp_req *tcp_req, struct nvme_tcp_qpair *tqpair,
165 				  struct spdk_nvme_cpl *rsp, bool print_on_error);
166 
167 static inline struct nvme_tcp_qpair *
168 nvme_tcp_qpair(struct spdk_nvme_qpair *qpair)
169 {
170 	assert(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
171 	return SPDK_CONTAINEROF(qpair, struct nvme_tcp_qpair, qpair);
172 }
173 
174 static inline struct nvme_tcp_poll_group *
175 nvme_tcp_poll_group(struct spdk_nvme_transport_poll_group *group)
176 {
177 	return SPDK_CONTAINEROF(group, struct nvme_tcp_poll_group, group);
178 }
179 
180 static inline struct nvme_tcp_ctrlr *
181 nvme_tcp_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
182 {
183 	assert(ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_TCP);
184 	return SPDK_CONTAINEROF(ctrlr, struct nvme_tcp_ctrlr, ctrlr);
185 }
186 
187 static struct nvme_tcp_req *
188 nvme_tcp_req_get(struct nvme_tcp_qpair *tqpair)
189 {
190 	struct nvme_tcp_req *tcp_req;
191 
192 	tcp_req = TAILQ_FIRST(&tqpair->free_reqs);
193 	if (!tcp_req) {
194 		return NULL;
195 	}
196 
197 	assert(tcp_req->state == NVME_TCP_REQ_FREE);
198 	tcp_req->state = NVME_TCP_REQ_ACTIVE;
199 	TAILQ_REMOVE(&tqpair->free_reqs, tcp_req, link);
200 	tcp_req->datao = 0;
201 	tcp_req->expected_datao = 0;
202 	tcp_req->req = NULL;
203 	tcp_req->in_capsule_data = false;
204 	tcp_req->r2tl_remain = 0;
205 	tcp_req->r2tl_remain_next = 0;
206 	tcp_req->active_r2ts = 0;
207 	tcp_req->iovcnt = 0;
208 	tcp_req->ordering.raw = 0;
209 	memset(tcp_req->pdu, 0, sizeof(struct nvme_tcp_pdu));
210 	memset(&tcp_req->rsp, 0, sizeof(struct spdk_nvme_cpl));
211 
212 	return tcp_req;
213 }
214 
215 static void
216 nvme_tcp_req_put(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
217 {
218 	assert(tcp_req->state != NVME_TCP_REQ_FREE);
219 	tcp_req->state = NVME_TCP_REQ_FREE;
220 	TAILQ_INSERT_HEAD(&tqpair->free_reqs, tcp_req, link);
221 }
222 
223 static int
224 nvme_tcp_parse_addr(struct sockaddr_storage *sa, int family, const char *addr, const char *service)
225 {
226 	struct addrinfo *res;
227 	struct addrinfo hints;
228 	int ret;
229 
230 	memset(&hints, 0, sizeof(hints));
231 	hints.ai_family = family;
232 	hints.ai_socktype = SOCK_STREAM;
233 	hints.ai_protocol = 0;
234 
235 	ret = getaddrinfo(addr, service, &hints, &res);
236 	if (ret) {
237 		SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(ret), ret);
238 		return -(abs(ret));
239 	}
240 
241 	if (res->ai_addrlen > sizeof(*sa)) {
242 		SPDK_ERRLOG("getaddrinfo() ai_addrlen %zu too large\n", (size_t)res->ai_addrlen);
243 		ret = -EINVAL;
244 	} else {
245 		memcpy(sa, res->ai_addr, res->ai_addrlen);
246 	}
247 
248 	freeaddrinfo(res);
249 	return ret;
250 }
251 
252 static void
253 nvme_tcp_free_reqs(struct nvme_tcp_qpair *tqpair)
254 {
255 	free(tqpair->tcp_reqs);
256 	tqpair->tcp_reqs = NULL;
257 
258 	spdk_free(tqpair->send_pdus);
259 	tqpair->send_pdus = NULL;
260 }
261 
262 static int
263 nvme_tcp_alloc_reqs(struct nvme_tcp_qpair *tqpair)
264 {
265 	uint16_t i;
266 	struct nvme_tcp_req	*tcp_req;
267 
268 	tqpair->tcp_reqs = calloc(tqpair->num_entries, sizeof(struct nvme_tcp_req));
269 	if (tqpair->tcp_reqs == NULL) {
270 		SPDK_ERRLOG("Failed to allocate tcp_reqs on tqpair=%p\n", tqpair);
271 		goto fail;
272 	}
273 
274 	/* Add additional 2 member for the send_pdu, recv_pdu owned by the tqpair */
275 	tqpair->send_pdus = spdk_zmalloc((tqpair->num_entries + 2) * sizeof(struct nvme_tcp_pdu),
276 					 0x1000, NULL,
277 					 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
278 
279 	if (tqpair->send_pdus == NULL) {
280 		SPDK_ERRLOG("Failed to allocate send_pdus on tqpair=%p\n", tqpair);
281 		goto fail;
282 	}
283 
284 	TAILQ_INIT(&tqpair->send_queue);
285 	TAILQ_INIT(&tqpair->free_reqs);
286 	TAILQ_INIT(&tqpair->outstanding_reqs);
287 	for (i = 0; i < tqpair->num_entries; i++) {
288 		tcp_req = &tqpair->tcp_reqs[i];
289 		tcp_req->cid = i;
290 		tcp_req->tqpair = tqpair;
291 		tcp_req->pdu = &tqpair->send_pdus[i];
292 		TAILQ_INSERT_TAIL(&tqpair->free_reqs, tcp_req, link);
293 	}
294 
295 	tqpair->send_pdu = &tqpair->send_pdus[i];
296 	tqpair->recv_pdu = &tqpair->send_pdus[i + 1];
297 
298 	return 0;
299 fail:
300 	nvme_tcp_free_reqs(tqpair);
301 	return -ENOMEM;
302 }
303 
304 static void nvme_tcp_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
305 
306 static void
307 nvme_tcp_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
308 {
309 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
310 	struct nvme_tcp_pdu *pdu;
311 	int rc;
312 	struct nvme_tcp_poll_group *group;
313 
314 	if (tqpair->needs_poll) {
315 		group = nvme_tcp_poll_group(qpair->poll_group);
316 		TAILQ_REMOVE(&group->needs_poll, tqpair, link);
317 		tqpair->needs_poll = false;
318 	}
319 
320 	rc = spdk_sock_close(&tqpair->sock);
321 
322 	if (tqpair->sock != NULL) {
323 		SPDK_ERRLOG("tqpair=%p, errno=%d, rc=%d\n", tqpair, errno, rc);
324 		/* Set it to NULL manually */
325 		tqpair->sock = NULL;
326 	}
327 
328 	/* clear the send_queue */
329 	while (!TAILQ_EMPTY(&tqpair->send_queue)) {
330 		pdu = TAILQ_FIRST(&tqpair->send_queue);
331 		/* Remove the pdu from the send_queue to prevent the wrong sending out
332 		 * in the next round connection
333 		 */
334 		TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
335 	}
336 
337 	nvme_tcp_qpair_abort_reqs(qpair, 0);
338 	nvme_transport_ctrlr_disconnect_qpair_done(qpair);
339 }
340 
341 static int
342 nvme_tcp_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
343 {
344 	struct nvme_tcp_qpair *tqpair;
345 
346 	assert(qpair != NULL);
347 	nvme_tcp_qpair_abort_reqs(qpair, 0);
348 	nvme_qpair_deinit(qpair);
349 	tqpair = nvme_tcp_qpair(qpair);
350 	nvme_tcp_free_reqs(tqpair);
351 	if (!tqpair->shared_stats) {
352 		free(tqpair->stats);
353 	}
354 	free(tqpair);
355 
356 	return 0;
357 }
358 
359 static int
360 nvme_tcp_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
361 {
362 	return 0;
363 }
364 
365 static int
366 nvme_tcp_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
367 {
368 	struct nvme_tcp_ctrlr *tctrlr = nvme_tcp_ctrlr(ctrlr);
369 
370 	if (ctrlr->adminq) {
371 		nvme_tcp_ctrlr_delete_io_qpair(ctrlr, ctrlr->adminq);
372 	}
373 
374 	nvme_ctrlr_destruct_finish(ctrlr);
375 
376 	free(tctrlr);
377 
378 	return 0;
379 }
380 
381 static void
382 _pdu_write_done(void *cb_arg, int err)
383 {
384 	struct nvme_tcp_pdu *pdu = cb_arg;
385 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
386 	struct nvme_tcp_poll_group *pgroup;
387 
388 	/* If there are queued requests, we assume they are queued because they are waiting
389 	 * for resources to be released. Those resources are almost certainly released in
390 	 * response to a PDU completing here. However, to attempt to make forward progress
391 	 * the qpair needs to be polled and we can't rely on another network event to make
392 	 * that happen. Add it to a list of qpairs to poll regardless of network activity
393 	 * here.
394 	 * Besides, when tqpair state is NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL or
395 	 * NVME_TCP_QPAIR_STATE_INITIALIZING, need to add it to needs_poll list too to make
396 	 * forward progress in case that the resources are released after icreq's or CONNECT's
397 	 * resp is processed. */
398 	if (tqpair->qpair.poll_group && !tqpair->needs_poll && (!STAILQ_EMPTY(&tqpair->qpair.queued_req) ||
399 			tqpair->state == NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL ||
400 			tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING)) {
401 		pgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
402 
403 		TAILQ_INSERT_TAIL(&pgroup->needs_poll, tqpair, link);
404 		tqpair->needs_poll = true;
405 	}
406 
407 	TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
408 
409 	if (err != 0) {
410 		nvme_transport_ctrlr_disconnect_qpair(tqpair->qpair.ctrlr, &tqpair->qpair);
411 		return;
412 	}
413 
414 	assert(pdu->cb_fn != NULL);
415 	pdu->cb_fn(pdu->cb_arg);
416 }
417 
418 static void
419 _tcp_write_pdu(struct nvme_tcp_pdu *pdu)
420 {
421 	uint32_t mapped_length = 0;
422 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
423 
424 	pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, SPDK_COUNTOF(pdu->iov), pdu,
425 			       (bool)tqpair->flags.host_hdgst_enable, (bool)tqpair->flags.host_ddgst_enable,
426 			       &mapped_length);
427 	TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
428 	if (spdk_unlikely(mapped_length < pdu->data_len)) {
429 		SPDK_ERRLOG("could not map the whole %u bytes (mapped only %u bytes)\n", pdu->data_len,
430 			    mapped_length);
431 		_pdu_write_done(pdu, -EINVAL);
432 		return;
433 	}
434 	pdu->sock_req.cb_fn = _pdu_write_done;
435 	pdu->sock_req.cb_arg = pdu;
436 	tqpair->stats->submitted_requests++;
437 	spdk_sock_writev_async(tqpair->sock, &pdu->sock_req);
438 }
439 
440 static void
441 data_crc32_accel_done(void *cb_arg, int status)
442 {
443 	struct nvme_tcp_pdu *pdu = cb_arg;
444 
445 	if (spdk_unlikely(status)) {
446 		SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu);
447 		_pdu_write_done(pdu, status);
448 		return;
449 	}
450 
451 	pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
452 	MAKE_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
453 
454 	_tcp_write_pdu(pdu);
455 }
456 
457 static void
458 pdu_data_crc32_compute(struct nvme_tcp_pdu *pdu)
459 {
460 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
461 	uint32_t crc32c;
462 	struct nvme_tcp_poll_group *tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
463 
464 	/* Data Digest */
465 	if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] &&
466 	    tqpair->flags.host_ddgst_enable) {
467 		/* Only support this limited case for the first step */
468 		if ((nvme_qpair_get_state(&tqpair->qpair) >= NVME_QPAIR_CONNECTED) &&
469 		    (tgroup != NULL && tgroup->group.group->accel_fn_table.submit_accel_crc32c) &&
470 		    spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0))) {
471 			tgroup->group.group->accel_fn_table.submit_accel_crc32c(tgroup->group.group->ctx,
472 					&pdu->data_digest_crc32, pdu->data_iov,
473 					pdu->data_iovcnt, 0, data_crc32_accel_done, pdu);
474 			return;
475 		}
476 
477 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
478 		crc32c = crc32c ^ SPDK_CRC32C_XOR;
479 		MAKE_DIGEST_WORD(pdu->data_digest, crc32c);
480 	}
481 
482 	_tcp_write_pdu(pdu);
483 }
484 
485 static int
486 nvme_tcp_qpair_write_pdu(struct nvme_tcp_qpair *tqpair,
487 			 struct nvme_tcp_pdu *pdu,
488 			 nvme_tcp_qpair_xfer_complete_cb cb_fn,
489 			 void *cb_arg)
490 {
491 	int hlen;
492 	uint32_t crc32c;
493 
494 	hlen = pdu->hdr.common.hlen;
495 	pdu->cb_fn = cb_fn;
496 	pdu->cb_arg = cb_arg;
497 	pdu->qpair = tqpair;
498 
499 	/* Header Digest */
500 	if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->flags.host_hdgst_enable) {
501 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
502 		MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c);
503 	}
504 
505 	pdu_data_crc32_compute(pdu);
506 
507 	return 0;
508 }
509 
510 /*
511  * Build SGL describing contiguous payload buffer.
512  */
513 static int
514 nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
515 {
516 	struct nvme_request *req = tcp_req->req;
517 
518 	/* ubsan complains about applying zero offset to null pointer if contig_or_cb_arg is NULL,
519 	 * so just double cast it to make it go away */
520 	tcp_req->iov[0].iov_base = (void *)((uintptr_t)req->payload.contig_or_cb_arg + req->payload_offset);
521 	tcp_req->iov[0].iov_len = req->payload_size;
522 	tcp_req->iovcnt = 1;
523 
524 	SPDK_DEBUGLOG(nvme, "enter\n");
525 
526 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
527 
528 	return 0;
529 }
530 
531 /*
532  * Build SGL describing scattered payload buffer.
533  */
534 static int
535 nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
536 {
537 	int rc;
538 	uint32_t length, remaining_size, iovcnt = 0, max_num_sgl;
539 	struct nvme_request *req = tcp_req->req;
540 
541 	SPDK_DEBUGLOG(nvme, "enter\n");
542 
543 	assert(req->payload_size != 0);
544 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
545 	assert(req->payload.reset_sgl_fn != NULL);
546 	assert(req->payload.next_sge_fn != NULL);
547 	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
548 
549 	max_num_sgl = spdk_min(req->qpair->ctrlr->max_sges, NVME_TCP_MAX_SGL_DESCRIPTORS);
550 	remaining_size = req->payload_size;
551 
552 	do {
553 		rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &tcp_req->iov[iovcnt].iov_base,
554 					      &length);
555 		if (rc) {
556 			return -1;
557 		}
558 
559 		length = spdk_min(length, remaining_size);
560 		tcp_req->iov[iovcnt].iov_len = length;
561 		remaining_size -= length;
562 		iovcnt++;
563 	} while (remaining_size > 0 && iovcnt < max_num_sgl);
564 
565 
566 	/* Should be impossible if we did our sgl checks properly up the stack, but do a sanity check here. */
567 	if (remaining_size > 0) {
568 		SPDK_ERRLOG("Failed to construct tcp_req=%p, and the iovcnt=%u, remaining_size=%u\n",
569 			    tcp_req, iovcnt, remaining_size);
570 		return -1;
571 	}
572 
573 	tcp_req->iovcnt = iovcnt;
574 
575 	return 0;
576 }
577 
578 static int
579 nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
580 		  struct nvme_tcp_req *tcp_req)
581 {
582 	struct spdk_nvme_ctrlr *ctrlr = tqpair->qpair.ctrlr;
583 	int rc = 0;
584 	enum spdk_nvme_data_transfer xfer;
585 	uint32_t max_in_capsule_data_size;
586 
587 	tcp_req->req = req;
588 	req->cmd.cid = tcp_req->cid;
589 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
590 	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
591 	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
592 	req->cmd.dptr.sgl1.unkeyed.length = req->payload_size;
593 
594 	if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
595 		rc = nvme_tcp_build_contig_request(tqpair, tcp_req);
596 	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
597 		rc = nvme_tcp_build_sgl_request(tqpair, tcp_req);
598 	} else {
599 		rc = -1;
600 	}
601 
602 	if (rc) {
603 		return rc;
604 	}
605 
606 	if (req->cmd.opc == SPDK_NVME_OPC_FABRIC) {
607 		struct spdk_nvmf_capsule_cmd *nvmf_cmd = (struct spdk_nvmf_capsule_cmd *)&req->cmd;
608 
609 		xfer = spdk_nvme_opc_get_data_transfer(nvmf_cmd->fctype);
610 	} else {
611 		xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
612 	}
613 	if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
614 		max_in_capsule_data_size = ctrlr->ioccsz_bytes;
615 		if ((req->cmd.opc == SPDK_NVME_OPC_FABRIC) || nvme_qpair_is_admin_queue(&tqpair->qpair)) {
616 			max_in_capsule_data_size = SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE;
617 		}
618 
619 		if (req->payload_size <= max_in_capsule_data_size) {
620 			req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
621 			req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
622 			req->cmd.dptr.sgl1.address = 0;
623 			tcp_req->in_capsule_data = true;
624 		}
625 	}
626 
627 	return 0;
628 }
629 
630 static inline bool
631 nvme_tcp_req_complete_safe(struct nvme_tcp_req *tcp_req)
632 {
633 	if (!(tcp_req->ordering.bits.send_ack && tcp_req->ordering.bits.data_recv)) {
634 		return false;
635 	}
636 
637 	assert(tcp_req->state == NVME_TCP_REQ_ACTIVE);
638 	assert(tcp_req->tqpair != NULL);
639 	assert(tcp_req->req != NULL);
640 
641 	SPDK_DEBUGLOG(nvme, "complete tcp_req(%p) on tqpair=%p\n", tcp_req, tcp_req->tqpair);
642 
643 	if (!tcp_req->tqpair->qpair.in_completion_context) {
644 		tcp_req->tqpair->async_complete++;
645 	}
646 
647 	nvme_tcp_req_complete(tcp_req, tcp_req->tqpair, &tcp_req->rsp, true);
648 	return true;
649 }
650 
651 static void
652 nvme_tcp_qpair_cmd_send_complete(void *cb_arg)
653 {
654 	struct nvme_tcp_req *tcp_req = cb_arg;
655 
656 	SPDK_DEBUGLOG(nvme, "tcp req %p, cid %u, qid %u\n", tcp_req, tcp_req->cid,
657 		      tcp_req->tqpair->qpair.id);
658 	tcp_req->ordering.bits.send_ack = 1;
659 	/* Handle the r2t case */
660 	if (spdk_unlikely(tcp_req->ordering.bits.h2c_send_waiting_ack)) {
661 		SPDK_DEBUGLOG(nvme, "tcp req %p, send H2C data\n", tcp_req);
662 		nvme_tcp_send_h2c_data(tcp_req);
663 	} else {
664 		nvme_tcp_req_complete_safe(tcp_req);
665 	}
666 }
667 
668 static int
669 nvme_tcp_qpair_capsule_cmd_send(struct nvme_tcp_qpair *tqpair,
670 				struct nvme_tcp_req *tcp_req)
671 {
672 	struct nvme_tcp_pdu *pdu;
673 	struct spdk_nvme_tcp_cmd *capsule_cmd;
674 	uint32_t plen = 0, alignment;
675 	uint8_t pdo;
676 
677 	SPDK_DEBUGLOG(nvme, "enter\n");
678 	pdu = tcp_req->pdu;
679 
680 	capsule_cmd = &pdu->hdr.capsule_cmd;
681 	capsule_cmd->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
682 	plen = capsule_cmd->common.hlen = sizeof(*capsule_cmd);
683 	capsule_cmd->ccsqe = tcp_req->req->cmd;
684 
685 	SPDK_DEBUGLOG(nvme, "capsule_cmd cid=%u on tqpair(%p)\n", tcp_req->req->cmd.cid, tqpair);
686 
687 	if (tqpair->flags.host_hdgst_enable) {
688 		SPDK_DEBUGLOG(nvme, "Header digest is enabled for capsule command on tcp_req=%p\n",
689 			      tcp_req);
690 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
691 		plen += SPDK_NVME_TCP_DIGEST_LEN;
692 	}
693 
694 	if ((tcp_req->req->payload_size == 0) || !tcp_req->in_capsule_data) {
695 		goto end;
696 	}
697 
698 	pdo = plen;
699 	pdu->padding_len = 0;
700 	if (tqpair->cpda) {
701 		alignment = (tqpair->cpda + 1) << 2;
702 		if (alignment > plen) {
703 			pdu->padding_len = alignment - plen;
704 			pdo = alignment;
705 			plen = alignment;
706 		}
707 	}
708 
709 	capsule_cmd->common.pdo = pdo;
710 	plen += tcp_req->req->payload_size;
711 	if (tqpair->flags.host_ddgst_enable) {
712 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
713 		plen += SPDK_NVME_TCP_DIGEST_LEN;
714 	}
715 
716 	tcp_req->datao = 0;
717 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req->iov, tcp_req->iovcnt,
718 				  0, tcp_req->req->payload_size);
719 end:
720 	capsule_cmd->common.plen = plen;
721 	return nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_qpair_cmd_send_complete, tcp_req);
722 
723 }
724 
725 static int
726 nvme_tcp_qpair_submit_request(struct spdk_nvme_qpair *qpair,
727 			      struct nvme_request *req)
728 {
729 	struct nvme_tcp_qpair *tqpair;
730 	struct nvme_tcp_req *tcp_req;
731 
732 	tqpair = nvme_tcp_qpair(qpair);
733 	assert(tqpair != NULL);
734 	assert(req != NULL);
735 
736 	tcp_req = nvme_tcp_req_get(tqpair);
737 	if (!tcp_req) {
738 		tqpair->stats->queued_requests++;
739 		/* Inform the upper layer to try again later. */
740 		return -EAGAIN;
741 	}
742 
743 	if (nvme_tcp_req_init(tqpair, req, tcp_req)) {
744 		SPDK_ERRLOG("nvme_tcp_req_init() failed\n");
745 		nvme_tcp_req_put(tqpair, tcp_req);
746 		return -1;
747 	}
748 
749 	spdk_trace_record(TRACE_NVME_TCP_SUBMIT, qpair->id, 0, (uintptr_t)req, req->cb_arg,
750 			  (uint32_t)req->cmd.cid, (uint32_t)req->cmd.opc,
751 			  req->cmd.cdw10, req->cmd.cdw11, req->cmd.cdw12);
752 	TAILQ_INSERT_TAIL(&tqpair->outstanding_reqs, tcp_req, link);
753 	return nvme_tcp_qpair_capsule_cmd_send(tqpair, tcp_req);
754 }
755 
756 static int
757 nvme_tcp_qpair_reset(struct spdk_nvme_qpair *qpair)
758 {
759 	return 0;
760 }
761 
762 static void
763 nvme_tcp_req_complete(struct nvme_tcp_req *tcp_req,
764 		      struct nvme_tcp_qpair *tqpair,
765 		      struct spdk_nvme_cpl *rsp,
766 		      bool print_on_error)
767 {
768 	struct spdk_nvme_cpl	cpl;
769 	spdk_nvme_cmd_cb	user_cb;
770 	void			*user_cb_arg;
771 	struct spdk_nvme_qpair	*qpair;
772 	struct nvme_request	*req;
773 	bool			error, print_error;
774 
775 	assert(tcp_req->req != NULL);
776 	req = tcp_req->req;
777 
778 	/* Cache arguments to be passed to nvme_complete_request since tcp_req can be zeroed when released */
779 	memcpy(&cpl, rsp, sizeof(cpl));
780 	user_cb		= req->cb_fn;
781 	user_cb_arg	= req->cb_arg;
782 	qpair		= req->qpair;
783 
784 	error = spdk_nvme_cpl_is_error(rsp);
785 	print_error = error && print_on_error && !qpair->ctrlr->opts.disable_error_logging;
786 
787 	if (print_error) {
788 		spdk_nvme_qpair_print_command(qpair, &req->cmd);
789 	}
790 
791 	if (print_error || SPDK_DEBUGLOG_FLAG_ENABLED("nvme")) {
792 		spdk_nvme_qpair_print_completion(qpair, rsp);
793 	}
794 
795 	spdk_trace_record(TRACE_NVME_TCP_COMPLETE, qpair->id, 0, (uintptr_t)req, req->cb_arg,
796 			  (uint32_t)req->cmd.cid, (uint32_t)cpl.status_raw);
797 	TAILQ_REMOVE(&tcp_req->tqpair->outstanding_reqs, tcp_req, link);
798 	nvme_tcp_req_put(tqpair, tcp_req);
799 	nvme_free_request(req);
800 	nvme_complete_request(user_cb, user_cb_arg, qpair, req, &cpl);
801 }
802 
803 static void
804 nvme_tcp_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
805 {
806 	struct nvme_tcp_req *tcp_req, *tmp;
807 	struct spdk_nvme_cpl cpl = {};
808 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
809 
810 	cpl.sqid = qpair->id;
811 	cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
812 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
813 	cpl.status.dnr = dnr;
814 
815 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
816 		nvme_tcp_req_complete(tcp_req, tqpair, &cpl, true);
817 	}
818 }
819 
820 static inline void
821 nvme_tcp_qpair_set_recv_state(struct nvme_tcp_qpair *tqpair,
822 			      enum nvme_tcp_pdu_recv_state state)
823 {
824 	if (tqpair->recv_state == state) {
825 		SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n",
826 			    tqpair, state);
827 		return;
828 	}
829 
830 	if (state == NVME_TCP_PDU_RECV_STATE_ERROR) {
831 		assert(TAILQ_EMPTY(&tqpair->outstanding_reqs));
832 	}
833 
834 	tqpair->recv_state = state;
835 }
836 
837 static void
838 nvme_tcp_qpair_send_h2c_term_req_complete(void *cb_arg)
839 {
840 	struct nvme_tcp_qpair *tqpair = cb_arg;
841 
842 	tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
843 }
844 
845 static void
846 nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
847 				 enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset)
848 {
849 	struct nvme_tcp_pdu *rsp_pdu;
850 	struct spdk_nvme_tcp_term_req_hdr *h2c_term_req;
851 	uint32_t h2c_term_req_hdr_len = sizeof(*h2c_term_req);
852 	uint8_t copy_len;
853 
854 	rsp_pdu = tqpair->send_pdu;
855 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
856 	h2c_term_req = &rsp_pdu->hdr.term_req;
857 	h2c_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
858 	h2c_term_req->common.hlen = h2c_term_req_hdr_len;
859 
860 	if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
861 	    (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
862 		DSET32(&h2c_term_req->fei, error_offset);
863 	}
864 
865 	copy_len = pdu->hdr.common.hlen;
866 	if (copy_len > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE) {
867 		copy_len = SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE;
868 	}
869 
870 	/* Copy the error info into the buffer */
871 	memcpy((uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len, pdu->hdr.raw, copy_len);
872 	nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len, copy_len);
873 
874 	/* Contain the header len of the wrong received pdu */
875 	h2c_term_req->common.plen = h2c_term_req->common.hlen + copy_len;
876 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
877 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_send_h2c_term_req_complete, tqpair);
878 }
879 
880 static bool
881 nvme_tcp_qpair_recv_state_valid(struct nvme_tcp_qpair *tqpair)
882 {
883 	switch (tqpair->state) {
884 	case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND:
885 	case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL:
886 	case NVME_TCP_QPAIR_STATE_RUNNING:
887 		return true;
888 	default:
889 		return false;
890 	}
891 }
892 
893 static void
894 nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
895 {
896 	struct nvme_tcp_pdu *pdu;
897 	uint32_t error_offset = 0;
898 	enum spdk_nvme_tcp_term_req_fes fes;
899 	uint32_t expected_hlen, hd_len = 0;
900 	bool plen_error = false;
901 
902 	pdu = tqpair->recv_pdu;
903 
904 	SPDK_DEBUGLOG(nvme, "pdu type = %d\n", pdu->hdr.common.pdu_type);
905 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP) {
906 		if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
907 			SPDK_ERRLOG("Already received IC_RESP PDU, and we should reject this pdu=%p\n", pdu);
908 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
909 			goto err;
910 		}
911 		expected_hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
912 		if (pdu->hdr.common.plen != expected_hlen) {
913 			plen_error = true;
914 		}
915 	} else {
916 		if (spdk_unlikely(!nvme_tcp_qpair_recv_state_valid(tqpair))) {
917 			SPDK_ERRLOG("The TCP/IP tqpair connection is not negotiated\n");
918 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
919 			goto err;
920 		}
921 
922 		switch (pdu->hdr.common.pdu_type) {
923 		case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
924 			expected_hlen = sizeof(struct spdk_nvme_tcp_rsp);
925 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
926 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
927 			}
928 
929 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
930 				plen_error = true;
931 			}
932 			break;
933 		case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
934 			expected_hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
935 			if (pdu->hdr.common.plen < pdu->hdr.common.pdo) {
936 				plen_error = true;
937 			}
938 			break;
939 		case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
940 			expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
941 			if ((pdu->hdr.common.plen <= expected_hlen) ||
942 			    (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
943 				plen_error = true;
944 			}
945 			break;
946 		case SPDK_NVME_TCP_PDU_TYPE_R2T:
947 			expected_hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
948 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
949 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
950 			}
951 
952 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
953 				plen_error = true;
954 			}
955 			break;
956 
957 		default:
958 			SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu->hdr.common.pdu_type);
959 			fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
960 			error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
961 			goto err;
962 		}
963 	}
964 
965 	if (pdu->hdr.common.hlen != expected_hlen) {
966 		SPDK_ERRLOG("Expected PDU header length %u, got %u\n",
967 			    expected_hlen, pdu->hdr.common.hlen);
968 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
969 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen);
970 		goto err;
971 
972 	} else if (plen_error) {
973 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
974 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen);
975 		goto err;
976 	} else {
977 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
978 		nvme_tcp_pdu_calc_psh_len(tqpair->recv_pdu, tqpair->flags.host_hdgst_enable);
979 		return;
980 	}
981 err:
982 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
983 }
984 
985 static struct nvme_tcp_req *
986 get_nvme_active_req_by_cid(struct nvme_tcp_qpair *tqpair, uint32_t cid)
987 {
988 	assert(tqpair != NULL);
989 	if ((cid >= tqpair->num_entries) || (tqpair->tcp_reqs[cid].state == NVME_TCP_REQ_FREE)) {
990 		return NULL;
991 	}
992 
993 	return &tqpair->tcp_reqs[cid];
994 }
995 
996 static void
997 nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
998 				 struct nvme_tcp_pdu *pdu, uint32_t *reaped)
999 {
1000 	struct nvme_tcp_req *tcp_req;
1001 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
1002 	uint8_t flags;
1003 
1004 	tcp_req = pdu->req;
1005 	assert(tcp_req != NULL);
1006 
1007 	SPDK_DEBUGLOG(nvme, "enter\n");
1008 	c2h_data = &pdu->hdr.c2h_data;
1009 	tcp_req->datao += pdu->data_len;
1010 	flags = c2h_data->common.flags;
1011 
1012 	if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU) {
1013 		if (tcp_req->datao == tcp_req->req->payload_size) {
1014 			tcp_req->rsp.status.p = 0;
1015 		} else {
1016 			tcp_req->rsp.status.p = 1;
1017 		}
1018 
1019 		tcp_req->rsp.cid = tcp_req->cid;
1020 		tcp_req->rsp.sqid = tqpair->qpair.id;
1021 		if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) {
1022 			tcp_req->ordering.bits.data_recv = 1;
1023 			if (nvme_tcp_req_complete_safe(tcp_req)) {
1024 				(*reaped)++;
1025 			}
1026 		}
1027 	}
1028 }
1029 
1030 static const char *spdk_nvme_tcp_term_req_fes_str[] = {
1031 	"Invalid PDU Header Field",
1032 	"PDU Sequence Error",
1033 	"Header Digest Error",
1034 	"Data Transfer Out of Range",
1035 	"Data Transfer Limit Exceeded",
1036 	"Unsupported parameter",
1037 };
1038 
1039 static void
1040 nvme_tcp_c2h_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *c2h_term_req)
1041 {
1042 	SPDK_ERRLOG("Error info of pdu(%p): %s\n", c2h_term_req,
1043 		    spdk_nvme_tcp_term_req_fes_str[c2h_term_req->fes]);
1044 	if ((c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
1045 	    (c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
1046 		SPDK_DEBUGLOG(nvme, "The offset from the start of the PDU header is %u\n",
1047 			      DGET32(c2h_term_req->fei));
1048 	}
1049 	/* we may also need to dump some other info here */
1050 }
1051 
1052 static void
1053 nvme_tcp_c2h_term_req_payload_handle(struct nvme_tcp_qpair *tqpair,
1054 				     struct nvme_tcp_pdu *pdu)
1055 {
1056 	nvme_tcp_c2h_term_req_dump(&pdu->hdr.term_req);
1057 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1058 }
1059 
1060 static void
1061 _nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1062 {
1063 	struct nvme_tcp_pdu *pdu;
1064 
1065 	assert(tqpair != NULL);
1066 	pdu = tqpair->recv_pdu;
1067 
1068 	switch (pdu->hdr.common.pdu_type) {
1069 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1070 		nvme_tcp_c2h_data_payload_handle(tqpair, pdu, reaped);
1071 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1072 		break;
1073 
1074 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1075 		nvme_tcp_c2h_term_req_payload_handle(tqpair, pdu);
1076 		break;
1077 
1078 	default:
1079 		/* The code should not go to here */
1080 		SPDK_ERRLOG("The code should not go to here\n");
1081 		break;
1082 	}
1083 }
1084 
1085 static void
1086 tcp_data_recv_crc32_done(void *cb_arg, int status)
1087 {
1088 	struct nvme_tcp_req *tcp_req = cb_arg;
1089 	struct nvme_tcp_pdu *pdu;
1090 	struct nvme_tcp_qpair *tqpair;
1091 	int rc;
1092 	struct nvme_tcp_poll_group *pgroup;
1093 	int dummy_reaped = 0;
1094 
1095 	pdu = tcp_req->pdu;
1096 	assert(pdu != NULL);
1097 
1098 	tqpair = tcp_req->tqpair;
1099 	assert(tqpair != NULL);
1100 
1101 	if (tqpair->qpair.poll_group && !tqpair->needs_poll) {
1102 		pgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
1103 		TAILQ_INSERT_TAIL(&pgroup->needs_poll, tqpair, link);
1104 		tqpair->needs_poll = true;
1105 	}
1106 
1107 	if (spdk_unlikely(status)) {
1108 		SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu);
1109 		tcp_req->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
1110 		goto end;
1111 	}
1112 
1113 	pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
1114 	rc = MATCH_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
1115 	if (rc == 0) {
1116 		SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1117 		tcp_req->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
1118 	}
1119 
1120 end:
1121 	nvme_tcp_c2h_data_payload_handle(tqpair, tcp_req->pdu, &dummy_reaped);
1122 }
1123 
1124 static void
1125 nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
1126 			    uint32_t *reaped)
1127 {
1128 	int rc = 0;
1129 	struct nvme_tcp_pdu *pdu = tqpair->recv_pdu;
1130 	uint32_t crc32c;
1131 	struct nvme_tcp_poll_group *tgroup;
1132 	struct nvme_tcp_req *tcp_req = pdu->req;
1133 
1134 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1135 	SPDK_DEBUGLOG(nvme, "enter\n");
1136 
1137 	/* The request can be NULL, e.g. in case of C2HTermReq */
1138 	if (spdk_likely(tcp_req != NULL)) {
1139 		tcp_req->expected_datao += pdu->data_len;
1140 	}
1141 
1142 	/* check data digest if need */
1143 	if (pdu->ddgst_enable) {
1144 		/* But if the data digest is enabled, tcp_req cannot be NULL */
1145 		assert(tcp_req != NULL);
1146 		tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
1147 		/* Only support this limitated case that the request has only one c2h pdu */
1148 		if ((nvme_qpair_get_state(&tqpair->qpair) >= NVME_QPAIR_CONNECTED) &&
1149 		    (tgroup != NULL && tgroup->group.group->accel_fn_table.submit_accel_crc32c) &&
1150 		    spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)
1151 				&& tcp_req->req->payload_size == pdu->data_len)) {
1152 			tcp_req->pdu->hdr = pdu->hdr;
1153 			tcp_req->pdu->req = tcp_req;
1154 			memcpy(tcp_req->pdu->data_digest, pdu->data_digest, sizeof(pdu->data_digest));
1155 			memcpy(tcp_req->pdu->data_iov, pdu->data_iov, sizeof(pdu->data_iov[0]) * pdu->data_iovcnt);
1156 			tcp_req->pdu->data_iovcnt = pdu->data_iovcnt;
1157 			tcp_req->pdu->data_len = pdu->data_len;
1158 
1159 			nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1160 			tgroup->group.group->accel_fn_table.submit_accel_crc32c(tgroup->group.group->ctx,
1161 					&tcp_req->pdu->data_digest_crc32, tcp_req->pdu->data_iov,
1162 					tcp_req->pdu->data_iovcnt, 0, tcp_data_recv_crc32_done, tcp_req);
1163 			return;
1164 		}
1165 
1166 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
1167 		crc32c = crc32c ^ SPDK_CRC32C_XOR;
1168 		rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
1169 		if (rc == 0) {
1170 			SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1171 			tcp_req = pdu->req;
1172 			assert(tcp_req != NULL);
1173 			tcp_req->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
1174 		}
1175 	}
1176 
1177 	_nvme_tcp_pdu_payload_handle(tqpair, reaped);
1178 }
1179 
1180 static void
1181 nvme_tcp_send_icreq_complete(void *cb_arg)
1182 {
1183 	struct nvme_tcp_qpair *tqpair = cb_arg;
1184 
1185 	SPDK_DEBUGLOG(nvme, "Complete the icreq send for tqpair=%p %u\n", tqpair, tqpair->qpair.id);
1186 
1187 	tqpair->flags.icreq_send_ack = true;
1188 
1189 	if (tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING) {
1190 		SPDK_DEBUGLOG(nvme, "tqpair %p %u, finalize icresp\n", tqpair, tqpair->qpair.id);
1191 		tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND;
1192 	}
1193 }
1194 
1195 static void
1196 nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair,
1197 		       struct nvme_tcp_pdu *pdu)
1198 {
1199 	struct spdk_nvme_tcp_ic_resp *ic_resp = &pdu->hdr.ic_resp;
1200 	uint32_t error_offset = 0;
1201 	enum spdk_nvme_tcp_term_req_fes fes;
1202 	int recv_buf_size;
1203 
1204 	/* Only PFV 0 is defined currently */
1205 	if (ic_resp->pfv != 0) {
1206 		SPDK_ERRLOG("Expected ICResp PFV %u, got %u\n", 0u, ic_resp->pfv);
1207 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1208 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, pfv);
1209 		goto end;
1210 	}
1211 
1212 	if (ic_resp->maxh2cdata < NVME_TCP_PDU_H2C_MIN_DATA_SIZE) {
1213 		SPDK_ERRLOG("Expected ICResp maxh2cdata >=%u, got %u\n", NVME_TCP_PDU_H2C_MIN_DATA_SIZE,
1214 			    ic_resp->maxh2cdata);
1215 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1216 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, maxh2cdata);
1217 		goto end;
1218 	}
1219 	tqpair->maxh2cdata = ic_resp->maxh2cdata;
1220 
1221 	if (ic_resp->cpda > SPDK_NVME_TCP_CPDA_MAX) {
1222 		SPDK_ERRLOG("Expected ICResp cpda <=%u, got %u\n", SPDK_NVME_TCP_CPDA_MAX, ic_resp->cpda);
1223 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1224 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, cpda);
1225 		goto end;
1226 	}
1227 	tqpair->cpda = ic_resp->cpda;
1228 
1229 	tqpair->flags.host_hdgst_enable = ic_resp->dgst.bits.hdgst_enable ? true : false;
1230 	tqpair->flags.host_ddgst_enable = ic_resp->dgst.bits.ddgst_enable ? true : false;
1231 	SPDK_DEBUGLOG(nvme, "host_hdgst_enable: %u\n", tqpair->flags.host_hdgst_enable);
1232 	SPDK_DEBUGLOG(nvme, "host_ddgst_enable: %u\n", tqpair->flags.host_ddgst_enable);
1233 
1234 	/* Now that we know whether digests are enabled, properly size the receive buffer to
1235 	 * handle several incoming 4K read commands according to SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR
1236 	 * parameter. */
1237 	recv_buf_size = 0x1000 + sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
1238 
1239 	if (tqpair->flags.host_hdgst_enable) {
1240 		recv_buf_size += SPDK_NVME_TCP_DIGEST_LEN;
1241 	}
1242 
1243 	if (tqpair->flags.host_ddgst_enable) {
1244 		recv_buf_size += SPDK_NVME_TCP_DIGEST_LEN;
1245 	}
1246 
1247 	if (spdk_sock_set_recvbuf(tqpair->sock, recv_buf_size * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR) < 0) {
1248 		SPDK_WARNLOG("Unable to allocate enough memory for receive buffer on tqpair=%p with size=%d\n",
1249 			     tqpair,
1250 			     recv_buf_size);
1251 		/* Not fatal. */
1252 	}
1253 
1254 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1255 
1256 	if (!tqpair->flags.icreq_send_ack) {
1257 		tqpair->state = NVME_TCP_QPAIR_STATE_INITIALIZING;
1258 		SPDK_DEBUGLOG(nvme, "tqpair %p %u, waiting icreq ack\n", tqpair, tqpair->qpair.id);
1259 		return;
1260 	}
1261 
1262 	tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND;
1263 	return;
1264 end:
1265 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1266 }
1267 
1268 static void
1269 nvme_tcp_capsule_resp_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1270 				 uint32_t *reaped)
1271 {
1272 	struct nvme_tcp_req *tcp_req;
1273 	struct spdk_nvme_tcp_rsp *capsule_resp = &pdu->hdr.capsule_resp;
1274 	uint32_t cid, error_offset = 0;
1275 	enum spdk_nvme_tcp_term_req_fes fes;
1276 
1277 	SPDK_DEBUGLOG(nvme, "enter\n");
1278 	cid = capsule_resp->rccqe.cid;
1279 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1280 
1281 	if (!tcp_req) {
1282 		SPDK_ERRLOG("no tcp_req is found with cid=%u for tqpair=%p\n", cid, tqpair);
1283 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1284 		error_offset = offsetof(struct spdk_nvme_tcp_rsp, rccqe);
1285 		goto end;
1286 	}
1287 
1288 	assert(tcp_req->req != NULL);
1289 
1290 	tcp_req->rsp = capsule_resp->rccqe;
1291 	tcp_req->ordering.bits.data_recv = 1;
1292 
1293 	/* Recv the pdu again */
1294 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1295 
1296 	if (nvme_tcp_req_complete_safe(tcp_req)) {
1297 		(*reaped)++;
1298 	}
1299 
1300 	return;
1301 
1302 end:
1303 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1304 }
1305 
1306 static void
1307 nvme_tcp_c2h_term_req_hdr_handle(struct nvme_tcp_qpair *tqpair,
1308 				 struct nvme_tcp_pdu *pdu)
1309 {
1310 	struct spdk_nvme_tcp_term_req_hdr *c2h_term_req = &pdu->hdr.term_req;
1311 	uint32_t error_offset = 0;
1312 	enum spdk_nvme_tcp_term_req_fes fes;
1313 
1314 	if (c2h_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) {
1315 		SPDK_ERRLOG("Fatal Error Status(FES) is unknown for c2h_term_req pdu=%p\n", pdu);
1316 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1317 		error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes);
1318 		goto end;
1319 	}
1320 
1321 	/* set the data buffer */
1322 	nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + c2h_term_req->common.hlen,
1323 			      c2h_term_req->common.plen - c2h_term_req->common.hlen);
1324 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1325 	return;
1326 end:
1327 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1328 }
1329 
1330 static void
1331 nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1332 {
1333 	struct nvme_tcp_req *tcp_req;
1334 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data = &pdu->hdr.c2h_data;
1335 	uint32_t error_offset = 0;
1336 	enum spdk_nvme_tcp_term_req_fes fes;
1337 	int flags = c2h_data->common.flags;
1338 
1339 	SPDK_DEBUGLOG(nvme, "enter\n");
1340 	SPDK_DEBUGLOG(nvme, "c2h_data info on tqpair(%p): datao=%u, datal=%u, cccid=%d\n",
1341 		      tqpair, c2h_data->datao, c2h_data->datal, c2h_data->cccid);
1342 	tcp_req = get_nvme_active_req_by_cid(tqpair, c2h_data->cccid);
1343 	if (!tcp_req) {
1344 		SPDK_ERRLOG("no tcp_req found for c2hdata cid=%d\n", c2h_data->cccid);
1345 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1346 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, cccid);
1347 		goto end;
1348 
1349 	}
1350 
1351 	SPDK_DEBUGLOG(nvme, "tcp_req(%p) on tqpair(%p): expected_datao=%u, payload_size=%u\n",
1352 		      tcp_req, tqpair, tcp_req->expected_datao, tcp_req->req->payload_size);
1353 
1354 	if (spdk_unlikely((flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) &&
1355 			  !(flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU))) {
1356 		SPDK_ERRLOG("Invalid flag flags=%d in c2h_data=%p\n", flags, c2h_data);
1357 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1358 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, common);
1359 		goto end;
1360 	}
1361 
1362 	if (c2h_data->datal > tcp_req->req->payload_size) {
1363 		SPDK_ERRLOG("Invalid datal for tcp_req(%p), datal(%u) exceeds payload_size(%u)\n",
1364 			    tcp_req, c2h_data->datal, tcp_req->req->payload_size);
1365 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1366 		goto end;
1367 	}
1368 
1369 	if (tcp_req->expected_datao != c2h_data->datao) {
1370 		SPDK_ERRLOG("Invalid datao for tcp_req(%p), received datal(%u) != expected datao(%u) in tcp_req\n",
1371 			    tcp_req, c2h_data->datao, tcp_req->expected_datao);
1372 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1373 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datao);
1374 		goto end;
1375 	}
1376 
1377 	if ((c2h_data->datao + c2h_data->datal) > tcp_req->req->payload_size) {
1378 		SPDK_ERRLOG("Invalid data range for tcp_req(%p), received (datao(%u) + datal(%u)) > datao(%u) in tcp_req\n",
1379 			    tcp_req, c2h_data->datao, c2h_data->datal, tcp_req->req->payload_size);
1380 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1381 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datal);
1382 		goto end;
1383 
1384 	}
1385 
1386 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req->iov, tcp_req->iovcnt,
1387 				  c2h_data->datao, c2h_data->datal);
1388 	pdu->req = tcp_req;
1389 
1390 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1391 	return;
1392 
1393 end:
1394 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1395 }
1396 
1397 static void
1398 nvme_tcp_qpair_h2c_data_send_complete(void *cb_arg)
1399 {
1400 	struct nvme_tcp_req *tcp_req = cb_arg;
1401 
1402 	assert(tcp_req != NULL);
1403 
1404 	tcp_req->ordering.bits.send_ack = 1;
1405 	if (tcp_req->r2tl_remain) {
1406 		nvme_tcp_send_h2c_data(tcp_req);
1407 	} else {
1408 		assert(tcp_req->active_r2ts > 0);
1409 		tcp_req->active_r2ts--;
1410 		tcp_req->state = NVME_TCP_REQ_ACTIVE;
1411 
1412 		if (tcp_req->ordering.bits.r2t_waiting_h2c_complete) {
1413 			tcp_req->ordering.bits.r2t_waiting_h2c_complete = 0;
1414 			SPDK_DEBUGLOG(nvme, "tcp_req %p: continue r2t\n", tcp_req);
1415 			assert(tcp_req->active_r2ts > 0);
1416 			tcp_req->ttag = tcp_req->ttag_r2t_next;
1417 			tcp_req->r2tl_remain = tcp_req->r2tl_remain_next;
1418 			tcp_req->state = NVME_TCP_REQ_ACTIVE_R2T;
1419 			nvme_tcp_send_h2c_data(tcp_req);
1420 			return;
1421 		}
1422 
1423 		/* Need also call this function to free the resource */
1424 		nvme_tcp_req_complete_safe(tcp_req);
1425 	}
1426 }
1427 
1428 static void
1429 nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req)
1430 {
1431 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(tcp_req->req->qpair);
1432 	struct nvme_tcp_pdu *rsp_pdu;
1433 	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
1434 	uint32_t plen, pdo, alignment;
1435 
1436 	/* Reinit the send_ack and h2c_send_waiting_ack bits */
1437 	tcp_req->ordering.bits.send_ack = 0;
1438 	tcp_req->ordering.bits.h2c_send_waiting_ack = 0;
1439 	rsp_pdu = tcp_req->pdu;
1440 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
1441 	h2c_data = &rsp_pdu->hdr.h2c_data;
1442 
1443 	h2c_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
1444 	plen = h2c_data->common.hlen = sizeof(*h2c_data);
1445 	h2c_data->cccid = tcp_req->cid;
1446 	h2c_data->ttag = tcp_req->ttag;
1447 	h2c_data->datao = tcp_req->datao;
1448 
1449 	h2c_data->datal = spdk_min(tcp_req->r2tl_remain, tqpair->maxh2cdata);
1450 	nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->iov, tcp_req->iovcnt,
1451 				  h2c_data->datao, h2c_data->datal);
1452 	tcp_req->r2tl_remain -= h2c_data->datal;
1453 
1454 	if (tqpair->flags.host_hdgst_enable) {
1455 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1456 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1457 	}
1458 
1459 	rsp_pdu->padding_len = 0;
1460 	pdo = plen;
1461 	if (tqpair->cpda) {
1462 		alignment = (tqpair->cpda + 1) << 2;
1463 		if (alignment > plen) {
1464 			rsp_pdu->padding_len = alignment - plen;
1465 			pdo = plen = alignment;
1466 		}
1467 	}
1468 
1469 	h2c_data->common.pdo = pdo;
1470 	plen += h2c_data->datal;
1471 	if (tqpair->flags.host_ddgst_enable) {
1472 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
1473 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1474 	}
1475 
1476 	h2c_data->common.plen = plen;
1477 	tcp_req->datao += h2c_data->datal;
1478 	if (!tcp_req->r2tl_remain) {
1479 		h2c_data->common.flags |= SPDK_NVME_TCP_H2C_DATA_FLAGS_LAST_PDU;
1480 	}
1481 
1482 	SPDK_DEBUGLOG(nvme, "h2c_data info: datao=%u, datal=%u, pdu_len=%u for tqpair=%p\n",
1483 		      h2c_data->datao, h2c_data->datal, h2c_data->common.plen, tqpair);
1484 
1485 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_h2c_data_send_complete, tcp_req);
1486 }
1487 
1488 static void
1489 nvme_tcp_r2t_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1490 {
1491 	struct nvme_tcp_req *tcp_req;
1492 	struct spdk_nvme_tcp_r2t_hdr *r2t = &pdu->hdr.r2t;
1493 	uint32_t cid, error_offset = 0;
1494 	enum spdk_nvme_tcp_term_req_fes fes;
1495 
1496 	SPDK_DEBUGLOG(nvme, "enter\n");
1497 	cid = r2t->cccid;
1498 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1499 	if (!tcp_req) {
1500 		SPDK_ERRLOG("Cannot find tcp_req for tqpair=%p\n", tqpair);
1501 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1502 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, cccid);
1503 		goto end;
1504 	}
1505 
1506 	SPDK_DEBUGLOG(nvme, "r2t info: r2to=%u, r2tl=%u for tqpair=%p\n", r2t->r2to, r2t->r2tl,
1507 		      tqpair);
1508 
1509 	if (tcp_req->state == NVME_TCP_REQ_ACTIVE) {
1510 		assert(tcp_req->active_r2ts == 0);
1511 		tcp_req->state = NVME_TCP_REQ_ACTIVE_R2T;
1512 	}
1513 
1514 	if (tcp_req->datao != r2t->r2to) {
1515 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1516 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2to);
1517 		goto end;
1518 
1519 	}
1520 
1521 	if ((r2t->r2tl + r2t->r2to) > tcp_req->req->payload_size) {
1522 		SPDK_ERRLOG("Invalid R2T info for tcp_req=%p: (r2to(%u) + r2tl(%u)) exceeds payload_size(%u)\n",
1523 			    tcp_req, r2t->r2to, r2t->r2tl, tqpair->maxh2cdata);
1524 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1525 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2tl);
1526 		goto end;
1527 	}
1528 
1529 	tcp_req->active_r2ts++;
1530 	if (spdk_unlikely(tcp_req->active_r2ts > tqpair->maxr2t)) {
1531 		if (tcp_req->state == NVME_TCP_REQ_ACTIVE_R2T && !tcp_req->ordering.bits.send_ack) {
1532 			/* We receive a subsequent R2T while we are waiting for H2C transfer to complete */
1533 			SPDK_DEBUGLOG(nvme, "received a subsequent R2T\n");
1534 			assert(tcp_req->active_r2ts == tqpair->maxr2t + 1);
1535 			tcp_req->ttag_r2t_next = r2t->ttag;
1536 			tcp_req->r2tl_remain_next = r2t->r2tl;
1537 			tcp_req->ordering.bits.r2t_waiting_h2c_complete = 1;
1538 			nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1539 			return;
1540 		} else {
1541 			fes = SPDK_NVME_TCP_TERM_REQ_FES_R2T_LIMIT_EXCEEDED;
1542 			SPDK_ERRLOG("Invalid R2T: Maximum number of R2T exceeded! Max: %u for tqpair=%p\n", tqpair->maxr2t,
1543 				    tqpair);
1544 			goto end;
1545 		}
1546 	}
1547 
1548 	tcp_req->ttag = r2t->ttag;
1549 	tcp_req->r2tl_remain = r2t->r2tl;
1550 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1551 
1552 	if (spdk_likely(tcp_req->ordering.bits.send_ack)) {
1553 		nvme_tcp_send_h2c_data(tcp_req);
1554 	} else {
1555 		tcp_req->ordering.bits.h2c_send_waiting_ack = 1;
1556 	}
1557 
1558 	return;
1559 
1560 end:
1561 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1562 
1563 }
1564 
1565 static void
1566 nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1567 {
1568 	struct nvme_tcp_pdu *pdu;
1569 	int rc;
1570 	uint32_t crc32c, error_offset = 0;
1571 	enum spdk_nvme_tcp_term_req_fes fes;
1572 
1573 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1574 	pdu = tqpair->recv_pdu;
1575 
1576 	SPDK_DEBUGLOG(nvme, "enter: pdu type =%u\n", pdu->hdr.common.pdu_type);
1577 	/* check header digest if needed */
1578 	if (pdu->has_hdgst) {
1579 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
1580 		rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
1581 		if (rc == 0) {
1582 			SPDK_ERRLOG("header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1583 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1584 			nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1585 			return;
1586 
1587 		}
1588 	}
1589 
1590 	switch (pdu->hdr.common.pdu_type) {
1591 	case SPDK_NVME_TCP_PDU_TYPE_IC_RESP:
1592 		nvme_tcp_icresp_handle(tqpair, pdu);
1593 		break;
1594 	case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
1595 		nvme_tcp_capsule_resp_hdr_handle(tqpair, pdu, reaped);
1596 		break;
1597 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1598 		nvme_tcp_c2h_data_hdr_handle(tqpair, pdu);
1599 		break;
1600 
1601 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1602 		nvme_tcp_c2h_term_req_hdr_handle(tqpair, pdu);
1603 		break;
1604 	case SPDK_NVME_TCP_PDU_TYPE_R2T:
1605 		nvme_tcp_r2t_hdr_handle(tqpair, pdu);
1606 		break;
1607 
1608 	default:
1609 		SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu->hdr.common.pdu_type);
1610 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1611 		error_offset = 1;
1612 		nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1613 		break;
1614 	}
1615 
1616 }
1617 
1618 static int
1619 nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_completions)
1620 {
1621 	int rc = 0;
1622 	struct nvme_tcp_pdu *pdu;
1623 	uint32_t data_len;
1624 	enum nvme_tcp_pdu_recv_state prev_state;
1625 
1626 	*reaped = tqpair->async_complete;
1627 	tqpair->async_complete = 0;
1628 
1629 	/* The loop here is to allow for several back-to-back state changes. */
1630 	do {
1631 		if (*reaped >= max_completions) {
1632 			break;
1633 		}
1634 
1635 		prev_state = tqpair->recv_state;
1636 		pdu = tqpair->recv_pdu;
1637 		switch (tqpair->recv_state) {
1638 		/* If in a new state */
1639 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
1640 			memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
1641 			nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1642 			break;
1643 		/* Wait for the pdu common header */
1644 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
1645 			assert(pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr));
1646 			rc = nvme_tcp_read_data(tqpair->sock,
1647 						sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
1648 						(uint8_t *)&pdu->hdr.common + pdu->ch_valid_bytes);
1649 			if (rc < 0) {
1650 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1651 				break;
1652 			}
1653 			pdu->ch_valid_bytes += rc;
1654 			if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
1655 				return NVME_TCP_PDU_IN_PROGRESS;
1656 			}
1657 
1658 			/* The command header of this PDU has now been read from the socket. */
1659 			nvme_tcp_pdu_ch_handle(tqpair);
1660 			break;
1661 		/* Wait for the pdu specific header  */
1662 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
1663 			assert(pdu->psh_valid_bytes < pdu->psh_len);
1664 			rc = nvme_tcp_read_data(tqpair->sock,
1665 						pdu->psh_len - pdu->psh_valid_bytes,
1666 						(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
1667 			if (rc < 0) {
1668 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1669 				break;
1670 			}
1671 
1672 			pdu->psh_valid_bytes += rc;
1673 			if (pdu->psh_valid_bytes < pdu->psh_len) {
1674 				return NVME_TCP_PDU_IN_PROGRESS;
1675 			}
1676 
1677 			/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
1678 			nvme_tcp_pdu_psh_handle(tqpair, reaped);
1679 			break;
1680 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
1681 			/* check whether the data is valid, if not we just return */
1682 			if (!pdu->data_len) {
1683 				return NVME_TCP_PDU_IN_PROGRESS;
1684 			}
1685 
1686 			data_len = pdu->data_len;
1687 			/* data digest */
1688 			if (spdk_unlikely((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
1689 					  tqpair->flags.host_ddgst_enable)) {
1690 				data_len += SPDK_NVME_TCP_DIGEST_LEN;
1691 				pdu->ddgst_enable = true;
1692 			}
1693 
1694 			rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
1695 			if (rc < 0) {
1696 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1697 				break;
1698 			}
1699 
1700 			pdu->rw_offset += rc;
1701 			if (pdu->rw_offset < data_len) {
1702 				return NVME_TCP_PDU_IN_PROGRESS;
1703 			}
1704 
1705 			assert(pdu->rw_offset == data_len);
1706 			/* All of this PDU has now been read from the socket. */
1707 			nvme_tcp_pdu_payload_handle(tqpair, reaped);
1708 			break;
1709 		case NVME_TCP_PDU_RECV_STATE_QUIESCING:
1710 			if (TAILQ_EMPTY(&tqpair->outstanding_reqs)) {
1711 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1712 			}
1713 			break;
1714 		case NVME_TCP_PDU_RECV_STATE_ERROR:
1715 			memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
1716 			return NVME_TCP_PDU_FATAL;
1717 		default:
1718 			assert(0);
1719 			break;
1720 		}
1721 	} while (prev_state != tqpair->recv_state);
1722 
1723 	return rc > 0 ? 0 : rc;
1724 }
1725 
1726 static void
1727 nvme_tcp_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
1728 {
1729 	uint64_t t02;
1730 	struct nvme_tcp_req *tcp_req, *tmp;
1731 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
1732 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
1733 	struct spdk_nvme_ctrlr_process *active_proc;
1734 
1735 	/* Don't check timeouts during controller initialization. */
1736 	if (ctrlr->state != NVME_CTRLR_STATE_READY) {
1737 		return;
1738 	}
1739 
1740 	if (nvme_qpair_is_admin_queue(qpair)) {
1741 		active_proc = nvme_ctrlr_get_current_process(ctrlr);
1742 	} else {
1743 		active_proc = qpair->active_proc;
1744 	}
1745 
1746 	/* Only check timeouts if the current process has a timeout callback. */
1747 	if (active_proc == NULL || active_proc->timeout_cb_fn == NULL) {
1748 		return;
1749 	}
1750 
1751 	t02 = spdk_get_ticks();
1752 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
1753 		assert(tcp_req->req != NULL);
1754 
1755 		if (nvme_request_check_timeout(tcp_req->req, tcp_req->cid, active_proc, t02)) {
1756 			/*
1757 			 * The requests are in order, so as soon as one has not timed out,
1758 			 * stop iterating.
1759 			 */
1760 			break;
1761 		}
1762 	}
1763 }
1764 
1765 static int nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr,
1766 		struct spdk_nvme_qpair *qpair);
1767 
1768 static int
1769 nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
1770 {
1771 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
1772 	uint32_t reaped;
1773 	int rc;
1774 
1775 	if (qpair->poll_group == NULL) {
1776 		rc = spdk_sock_flush(tqpair->sock);
1777 		if (rc < 0 && errno != EAGAIN) {
1778 			SPDK_ERRLOG("Failed to flush tqpair=%p (%d): %s\n", tqpair,
1779 				    errno, spdk_strerror(errno));
1780 			if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
1781 				nvme_tcp_qpair_check_timeout(qpair);
1782 			}
1783 			goto fail;
1784 		}
1785 	}
1786 
1787 	if (max_completions == 0) {
1788 		max_completions = spdk_max(tqpair->num_entries, 1);
1789 	} else {
1790 		max_completions = spdk_min(max_completions, tqpair->num_entries);
1791 	}
1792 
1793 	reaped = 0;
1794 	rc = nvme_tcp_read_pdu(tqpair, &reaped, max_completions);
1795 	if (rc < 0) {
1796 		SPDK_DEBUGLOG(nvme, "Error polling CQ! (%d): %s\n",
1797 			      errno, spdk_strerror(errno));
1798 		goto fail;
1799 	}
1800 
1801 	if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
1802 		nvme_tcp_qpair_check_timeout(qpair);
1803 	}
1804 
1805 	if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING)) {
1806 		rc = nvme_tcp_ctrlr_connect_qpair_poll(qpair->ctrlr, qpair);
1807 		if (rc != 0 && rc != -EAGAIN) {
1808 			SPDK_ERRLOG("Failed to connect tqpair=%p\n", tqpair);
1809 			goto fail;
1810 		} else if (rc == 0) {
1811 			/* Once the connection is completed, we can submit queued requests */
1812 			nvme_qpair_resubmit_requests(qpair, tqpair->num_entries);
1813 		}
1814 	}
1815 
1816 	return reaped;
1817 fail:
1818 
1819 	/*
1820 	 * Since admin queues take the ctrlr_lock before entering this function,
1821 	 * we can call nvme_transport_ctrlr_disconnect_qpair. For other qpairs we need
1822 	 * to call the generic function which will take the lock for us.
1823 	 */
1824 	qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
1825 
1826 	if (nvme_qpair_is_admin_queue(qpair)) {
1827 		nvme_transport_ctrlr_disconnect_qpair(qpair->ctrlr, qpair);
1828 	} else {
1829 		nvme_ctrlr_disconnect_qpair(qpair);
1830 	}
1831 	return -ENXIO;
1832 }
1833 
1834 static void
1835 nvme_tcp_qpair_sock_cb(void *ctx, struct spdk_sock_group *group, struct spdk_sock *sock)
1836 {
1837 	struct spdk_nvme_qpair *qpair = ctx;
1838 	struct nvme_tcp_poll_group *pgroup = nvme_tcp_poll_group(qpair->poll_group);
1839 	int32_t num_completions;
1840 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
1841 
1842 	if (tqpair->needs_poll) {
1843 		TAILQ_REMOVE(&pgroup->needs_poll, tqpair, link);
1844 		tqpair->needs_poll = false;
1845 	}
1846 
1847 	num_completions = spdk_nvme_qpair_process_completions(qpair, pgroup->completions_per_qpair);
1848 
1849 	if (pgroup->num_completions >= 0 && num_completions >= 0) {
1850 		pgroup->num_completions += num_completions;
1851 		pgroup->stats.nvme_completions += num_completions;
1852 	} else {
1853 		pgroup->num_completions = -ENXIO;
1854 	}
1855 }
1856 
1857 static int
1858 nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
1859 {
1860 	struct spdk_nvme_tcp_ic_req *ic_req;
1861 	struct nvme_tcp_pdu *pdu;
1862 	uint32_t timeout_in_sec;
1863 
1864 	pdu = tqpair->send_pdu;
1865 	memset(tqpair->send_pdu, 0, sizeof(*tqpair->send_pdu));
1866 	ic_req = &pdu->hdr.ic_req;
1867 
1868 	ic_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
1869 	ic_req->common.hlen = ic_req->common.plen = sizeof(*ic_req);
1870 	ic_req->pfv = 0;
1871 	ic_req->maxr2t = NVME_TCP_MAX_R2T_DEFAULT - 1;
1872 	ic_req->hpda = NVME_TCP_HPDA_DEFAULT;
1873 
1874 	ic_req->dgst.bits.hdgst_enable = tqpair->qpair.ctrlr->opts.header_digest;
1875 	ic_req->dgst.bits.ddgst_enable = tqpair->qpair.ctrlr->opts.data_digest;
1876 
1877 	nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_send_icreq_complete, tqpair);
1878 
1879 	timeout_in_sec = tqpair->qpair.async ? ICREQ_TIMEOUT_ASYNC : ICREQ_TIMEOUT_SYNC;
1880 	tqpair->icreq_timeout_tsc = spdk_get_ticks() + (timeout_in_sec * spdk_get_ticks_hz());
1881 	return 0;
1882 }
1883 
1884 static int
1885 nvme_tcp_qpair_connect_sock(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1886 {
1887 	struct sockaddr_storage dst_addr;
1888 	struct sockaddr_storage src_addr;
1889 	int rc;
1890 	struct nvme_tcp_qpair *tqpair;
1891 	int family;
1892 	long int port;
1893 	char *sock_impl_name;
1894 	struct spdk_sock_impl_opts impl_opts = {};
1895 	size_t impl_opts_size = sizeof(impl_opts);
1896 	struct spdk_sock_opts opts;
1897 	struct nvme_tcp_ctrlr *tcp_ctrlr;
1898 
1899 	tqpair = nvme_tcp_qpair(qpair);
1900 
1901 	switch (ctrlr->trid.adrfam) {
1902 	case SPDK_NVMF_ADRFAM_IPV4:
1903 		family = AF_INET;
1904 		break;
1905 	case SPDK_NVMF_ADRFAM_IPV6:
1906 		family = AF_INET6;
1907 		break;
1908 	default:
1909 		SPDK_ERRLOG("Unhandled ADRFAM %d\n", ctrlr->trid.adrfam);
1910 		rc = -1;
1911 		return rc;
1912 	}
1913 
1914 	SPDK_DEBUGLOG(nvme, "adrfam %d ai_family %d\n", ctrlr->trid.adrfam, family);
1915 
1916 	memset(&dst_addr, 0, sizeof(dst_addr));
1917 
1918 	port = spdk_strtol(ctrlr->trid.trsvcid, 10);
1919 	if (port <= 0 || port >= INT_MAX) {
1920 		SPDK_ERRLOG("Invalid port: %s\n", ctrlr->trid.trsvcid);
1921 		rc = -1;
1922 		return rc;
1923 	}
1924 
1925 	SPDK_DEBUGLOG(nvme, "trsvcid is %s\n", ctrlr->trid.trsvcid);
1926 	rc = nvme_tcp_parse_addr(&dst_addr, family, ctrlr->trid.traddr, ctrlr->trid.trsvcid);
1927 	if (rc != 0) {
1928 		SPDK_ERRLOG("dst_addr nvme_tcp_parse_addr() failed\n");
1929 		return rc;
1930 	}
1931 
1932 	if (ctrlr->opts.src_addr[0] || ctrlr->opts.src_svcid[0]) {
1933 		memset(&src_addr, 0, sizeof(src_addr));
1934 		rc = nvme_tcp_parse_addr(&src_addr, family, ctrlr->opts.src_addr, ctrlr->opts.src_svcid);
1935 		if (rc != 0) {
1936 			SPDK_ERRLOG("src_addr nvme_tcp_parse_addr() failed\n");
1937 			return rc;
1938 		}
1939 	}
1940 
1941 	tcp_ctrlr = SPDK_CONTAINEROF(ctrlr, struct nvme_tcp_ctrlr, ctrlr);
1942 	sock_impl_name = tcp_ctrlr->psk[0] ? "ssl" : NULL;
1943 	SPDK_DEBUGLOG(nvme, "sock_impl_name is %s\n", sock_impl_name);
1944 
1945 	if (sock_impl_name) {
1946 		spdk_sock_impl_get_opts(sock_impl_name, &impl_opts, &impl_opts_size);
1947 		impl_opts.tls_version = SPDK_TLS_VERSION_1_3;
1948 		impl_opts.psk_identity = tcp_ctrlr->psk_identity;
1949 		impl_opts.psk_key = tcp_ctrlr->psk;
1950 		impl_opts.psk_key_size = tcp_ctrlr->psk_size;
1951 		impl_opts.tls_cipher_suites = tcp_ctrlr->tls_cipher_suite;
1952 	}
1953 	opts.opts_size = sizeof(opts);
1954 	spdk_sock_get_default_opts(&opts);
1955 	opts.priority = ctrlr->trid.priority;
1956 	opts.zcopy = !nvme_qpair_is_admin_queue(qpair);
1957 	if (ctrlr->opts.transport_ack_timeout) {
1958 		opts.ack_timeout = 1ULL << ctrlr->opts.transport_ack_timeout;
1959 	}
1960 	if (sock_impl_name) {
1961 		opts.impl_opts = &impl_opts;
1962 		opts.impl_opts_size = sizeof(impl_opts);
1963 	}
1964 	tqpair->sock = spdk_sock_connect_ext(ctrlr->trid.traddr, port, sock_impl_name, &opts);
1965 	if (!tqpair->sock) {
1966 		SPDK_ERRLOG("sock connection error of tqpair=%p with addr=%s, port=%ld\n",
1967 			    tqpair, ctrlr->trid.traddr, port);
1968 		rc = -1;
1969 		return rc;
1970 	}
1971 
1972 	return 0;
1973 }
1974 
1975 static int
1976 nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1977 {
1978 	struct nvme_tcp_qpair *tqpair;
1979 	int rc;
1980 
1981 	tqpair = nvme_tcp_qpair(qpair);
1982 
1983 	/* Prevent this function from being called recursively, as it could lead to issues with
1984 	 * nvme_fabric_qpair_connect_poll() if the connect response is received in the recursive
1985 	 * call.
1986 	 */
1987 	if (tqpair->flags.in_connect_poll) {
1988 		return -EAGAIN;
1989 	}
1990 
1991 	tqpair->flags.in_connect_poll = 1;
1992 
1993 	switch (tqpair->state) {
1994 	case NVME_TCP_QPAIR_STATE_INVALID:
1995 	case NVME_TCP_QPAIR_STATE_INITIALIZING:
1996 		if (spdk_get_ticks() > tqpair->icreq_timeout_tsc) {
1997 			SPDK_ERRLOG("Failed to construct the tqpair=%p via correct icresp\n", tqpair);
1998 			rc = -ETIMEDOUT;
1999 			break;
2000 		}
2001 		rc = -EAGAIN;
2002 		break;
2003 	case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND:
2004 		rc = nvme_fabric_qpair_connect_async(&tqpair->qpair, tqpair->num_entries + 1);
2005 		if (rc < 0) {
2006 			SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
2007 			break;
2008 		}
2009 		tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL;
2010 		rc = -EAGAIN;
2011 		break;
2012 	case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL:
2013 		rc = nvme_fabric_qpair_connect_poll(&tqpair->qpair);
2014 		if (rc == 0) {
2015 			tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
2016 			nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
2017 		} else if (rc != -EAGAIN) {
2018 			SPDK_ERRLOG("Failed to poll NVMe-oF Fabric CONNECT command\n");
2019 		}
2020 		break;
2021 	case NVME_TCP_QPAIR_STATE_RUNNING:
2022 		rc = 0;
2023 		break;
2024 	default:
2025 		assert(false);
2026 		rc = -EINVAL;
2027 		break;
2028 	}
2029 
2030 	tqpair->flags.in_connect_poll = 0;
2031 	return rc;
2032 }
2033 
2034 static int
2035 nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
2036 {
2037 	int rc = 0;
2038 	struct nvme_tcp_qpair *tqpair;
2039 	struct nvme_tcp_poll_group *tgroup;
2040 
2041 	tqpair = nvme_tcp_qpair(qpair);
2042 
2043 	if (!tqpair->sock) {
2044 		rc = nvme_tcp_qpair_connect_sock(ctrlr, qpair);
2045 		if (rc < 0) {
2046 			return rc;
2047 		}
2048 	}
2049 
2050 	if (qpair->poll_group) {
2051 		rc = nvme_poll_group_connect_qpair(qpair);
2052 		if (rc) {
2053 			SPDK_ERRLOG("Unable to activate the tcp qpair.\n");
2054 			return rc;
2055 		}
2056 		tgroup = nvme_tcp_poll_group(qpair->poll_group);
2057 		tqpair->stats = &tgroup->stats;
2058 		tqpair->shared_stats = true;
2059 	} else {
2060 		/* When resetting a controller, we disconnect adminq and then reconnect. The stats
2061 		 * is not freed when disconnecting. So when reconnecting, don't allocate memory
2062 		 * again.
2063 		 */
2064 		if (tqpair->stats == NULL) {
2065 			tqpair->stats = calloc(1, sizeof(*tqpair->stats));
2066 			if (!tqpair->stats) {
2067 				SPDK_ERRLOG("tcp stats memory allocation failed\n");
2068 				return -ENOMEM;
2069 			}
2070 		}
2071 	}
2072 
2073 	tqpair->maxr2t = NVME_TCP_MAX_R2T_DEFAULT;
2074 	/* Explicitly set the state and recv_state of tqpair */
2075 	tqpair->state = NVME_TCP_QPAIR_STATE_INVALID;
2076 	if (tqpair->recv_state != NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY) {
2077 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
2078 	}
2079 	rc = nvme_tcp_qpair_icreq_send(tqpair);
2080 	if (rc != 0) {
2081 		SPDK_ERRLOG("Unable to connect the tqpair\n");
2082 		return rc;
2083 	}
2084 
2085 	return rc;
2086 }
2087 
2088 static struct spdk_nvme_qpair *
2089 nvme_tcp_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
2090 			    uint16_t qid, uint32_t qsize,
2091 			    enum spdk_nvme_qprio qprio,
2092 			    uint32_t num_requests, bool async)
2093 {
2094 	struct nvme_tcp_qpair *tqpair;
2095 	struct spdk_nvme_qpair *qpair;
2096 	int rc;
2097 
2098 	if (qsize < SPDK_NVME_QUEUE_MIN_ENTRIES) {
2099 		SPDK_ERRLOG("Failed to create qpair with size %u. Minimum queue size is %d.\n",
2100 			    qsize, SPDK_NVME_QUEUE_MIN_ENTRIES);
2101 		return NULL;
2102 	}
2103 
2104 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
2105 	if (!tqpair) {
2106 		SPDK_ERRLOG("failed to get create tqpair\n");
2107 		return NULL;
2108 	}
2109 
2110 	/* Set num_entries one less than queue size. According to NVMe
2111 	 * and NVMe-oF specs we can not submit queue size requests,
2112 	 * one slot shall always remain empty.
2113 	 */
2114 	tqpair->num_entries = qsize - 1;
2115 	qpair = &tqpair->qpair;
2116 	rc = nvme_qpair_init(qpair, qid, ctrlr, qprio, num_requests, async);
2117 	if (rc != 0) {
2118 		free(tqpair);
2119 		return NULL;
2120 	}
2121 
2122 	rc = nvme_tcp_alloc_reqs(tqpair);
2123 	if (rc) {
2124 		nvme_tcp_ctrlr_delete_io_qpair(ctrlr, qpair);
2125 		return NULL;
2126 	}
2127 
2128 	/* spdk_nvme_qpair_get_optimal_poll_group needs socket information.
2129 	 * So create the socket first when creating a qpair. */
2130 	rc = nvme_tcp_qpair_connect_sock(ctrlr, qpair);
2131 	if (rc) {
2132 		nvme_tcp_ctrlr_delete_io_qpair(ctrlr, qpair);
2133 		return NULL;
2134 	}
2135 
2136 	return qpair;
2137 }
2138 
2139 static struct spdk_nvme_qpair *
2140 nvme_tcp_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
2141 			       const struct spdk_nvme_io_qpair_opts *opts)
2142 {
2143 	return nvme_tcp_ctrlr_create_qpair(ctrlr, qid, opts->io_queue_size, opts->qprio,
2144 					   opts->io_queue_requests, opts->async_mode);
2145 }
2146 
2147 /* We have to use the typedef in the function declaration to appease astyle. */
2148 typedef struct spdk_nvme_ctrlr spdk_nvme_ctrlr_t;
2149 
2150 static int
2151 nvme_tcp_generate_tls_credentials(struct nvme_tcp_ctrlr *tctrlr)
2152 {
2153 	int rc;
2154 	uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {};
2155 	uint8_t psk_configured[SPDK_TLS_PSK_MAX_LEN] = {};
2156 	uint8_t tls_cipher_suite;
2157 	uint8_t psk_retained_hash;
2158 	uint64_t psk_configured_size;
2159 
2160 	assert(tctrlr != NULL);
2161 
2162 	rc = nvme_tcp_parse_interchange_psk(tctrlr->ctrlr.opts.psk, psk_configured, sizeof(psk_configured),
2163 					    &psk_configured_size, &psk_retained_hash);
2164 	if (rc < 0) {
2165 		SPDK_ERRLOG("Failed to parse PSK interchange!\n");
2166 		goto finish;
2167 	}
2168 
2169 	/* The Base64 string encodes the configured PSK (32 or 48 bytes binary).
2170 	 * This check also ensures that psk_configured_size is smaller than
2171 	 * psk_retained buffer size. */
2172 	if (psk_configured_size == SHA256_DIGEST_LENGTH) {
2173 		tls_cipher_suite = NVME_TCP_CIPHER_AES_128_GCM_SHA256;
2174 		tctrlr->tls_cipher_suite = "TLS_AES_128_GCM_SHA256";
2175 	} else if (psk_configured_size == SHA384_DIGEST_LENGTH) {
2176 		tls_cipher_suite = NVME_TCP_CIPHER_AES_256_GCM_SHA384;
2177 		tctrlr->tls_cipher_suite = "TLS_AES_256_GCM_SHA384";
2178 	} else {
2179 		SPDK_ERRLOG("Unrecognized cipher suite!\n");
2180 		rc = -ENOTSUP;
2181 		goto finish;
2182 	}
2183 
2184 	rc = nvme_tcp_generate_psk_identity(tctrlr->psk_identity, sizeof(tctrlr->psk_identity),
2185 					    tctrlr->ctrlr.opts.hostnqn, tctrlr->ctrlr.trid.subnqn,
2186 					    tls_cipher_suite);
2187 	if (rc) {
2188 		SPDK_ERRLOG("could not generate PSK identity\n");
2189 		goto finish;
2190 	}
2191 
2192 	/* No hash indicates that Configured PSK must be used as Retained PSK. */
2193 	if (psk_retained_hash == NVME_TCP_HASH_ALGORITHM_NONE) {
2194 		assert(psk_configured_size < sizeof(psk_retained));
2195 		memcpy(psk_retained, psk_configured, psk_configured_size);
2196 		rc = psk_configured_size;
2197 	} else {
2198 		/* Derive retained PSK. */
2199 		rc = nvme_tcp_derive_retained_psk(psk_configured, psk_configured_size, tctrlr->ctrlr.opts.hostnqn,
2200 						  psk_retained, sizeof(psk_retained), psk_retained_hash);
2201 		if (rc < 0) {
2202 			SPDK_ERRLOG("Unable to derive retained PSK!\n");
2203 			goto finish;
2204 		}
2205 	}
2206 
2207 	rc = nvme_tcp_derive_tls_psk(psk_retained, rc, tctrlr->psk_identity, tctrlr->psk,
2208 				     sizeof(tctrlr->psk), tls_cipher_suite);
2209 	if (rc < 0) {
2210 		SPDK_ERRLOG("Could not generate TLS PSK!\n");
2211 		return rc;
2212 	}
2213 
2214 	tctrlr->psk_size = rc;
2215 	rc = 0;
2216 
2217 finish:
2218 	spdk_memset_s(psk_configured, sizeof(psk_configured), 0, sizeof(psk_configured));
2219 
2220 	return rc;
2221 }
2222 
2223 static spdk_nvme_ctrlr_t *
2224 nvme_tcp_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
2225 			 const struct spdk_nvme_ctrlr_opts *opts,
2226 			 void *devhandle)
2227 {
2228 	struct nvme_tcp_ctrlr *tctrlr;
2229 	int rc;
2230 
2231 	tctrlr = calloc(1, sizeof(*tctrlr));
2232 	if (tctrlr == NULL) {
2233 		SPDK_ERRLOG("could not allocate ctrlr\n");
2234 		return NULL;
2235 	}
2236 
2237 	tctrlr->ctrlr.opts = *opts;
2238 	tctrlr->ctrlr.trid = *trid;
2239 
2240 	if (opts->psk[0] != '\0') {
2241 		rc = nvme_tcp_generate_tls_credentials(tctrlr);
2242 		spdk_memset_s(&tctrlr->ctrlr.opts.psk, sizeof(tctrlr->ctrlr.opts.psk), 0,
2243 			      sizeof(tctrlr->ctrlr.opts.psk));
2244 
2245 		if (rc != 0) {
2246 			free(tctrlr);
2247 			return NULL;
2248 		}
2249 	}
2250 
2251 	if (opts->transport_ack_timeout > NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT) {
2252 		SPDK_NOTICELOG("transport_ack_timeout exceeds max value %d, use max value\n",
2253 			       NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
2254 		tctrlr->ctrlr.opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT;
2255 	}
2256 
2257 	rc = nvme_ctrlr_construct(&tctrlr->ctrlr);
2258 	if (rc != 0) {
2259 		free(tctrlr);
2260 		return NULL;
2261 	}
2262 
2263 	tctrlr->ctrlr.adminq = nvme_tcp_ctrlr_create_qpair(&tctrlr->ctrlr, 0,
2264 			       tctrlr->ctrlr.opts.admin_queue_size, 0,
2265 			       tctrlr->ctrlr.opts.admin_queue_size, true);
2266 	if (!tctrlr->ctrlr.adminq) {
2267 		SPDK_ERRLOG("failed to create admin qpair\n");
2268 		nvme_tcp_ctrlr_destruct(&tctrlr->ctrlr);
2269 		return NULL;
2270 	}
2271 
2272 	if (nvme_ctrlr_add_process(&tctrlr->ctrlr, 0) != 0) {
2273 		SPDK_ERRLOG("nvme_ctrlr_add_process() failed\n");
2274 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
2275 		return NULL;
2276 	}
2277 
2278 	return &tctrlr->ctrlr;
2279 }
2280 
2281 static uint32_t
2282 nvme_tcp_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
2283 {
2284 	/* TCP transport doesn't limit maximum IO transfer size. */
2285 	return UINT32_MAX;
2286 }
2287 
2288 static uint16_t
2289 nvme_tcp_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
2290 {
2291 	return NVME_TCP_MAX_SGL_DESCRIPTORS;
2292 }
2293 
2294 static int
2295 nvme_tcp_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
2296 				int (*iter_fn)(struct nvme_request *req, void *arg),
2297 				void *arg)
2298 {
2299 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2300 	struct nvme_tcp_req *tcp_req, *tmp;
2301 	int rc;
2302 
2303 	assert(iter_fn != NULL);
2304 
2305 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
2306 		assert(tcp_req->req != NULL);
2307 
2308 		rc = iter_fn(tcp_req->req, arg);
2309 		if (rc != 0) {
2310 			return rc;
2311 		}
2312 	}
2313 
2314 	return 0;
2315 }
2316 
2317 static void
2318 nvme_tcp_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
2319 {
2320 	struct nvme_tcp_req *tcp_req, *tmp;
2321 	struct spdk_nvme_cpl cpl = {};
2322 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2323 
2324 	cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
2325 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2326 
2327 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
2328 		assert(tcp_req->req != NULL);
2329 		if (tcp_req->req->cmd.opc != SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
2330 			continue;
2331 		}
2332 
2333 		nvme_tcp_req_complete(tcp_req, tqpair, &cpl, false);
2334 	}
2335 }
2336 
2337 static struct spdk_nvme_transport_poll_group *
2338 nvme_tcp_poll_group_create(void)
2339 {
2340 	struct nvme_tcp_poll_group *group = calloc(1, sizeof(*group));
2341 
2342 	if (group == NULL) {
2343 		SPDK_ERRLOG("Unable to allocate poll group.\n");
2344 		return NULL;
2345 	}
2346 
2347 	TAILQ_INIT(&group->needs_poll);
2348 
2349 	group->sock_group = spdk_sock_group_create(group);
2350 	if (group->sock_group == NULL) {
2351 		free(group);
2352 		SPDK_ERRLOG("Unable to allocate sock group.\n");
2353 		return NULL;
2354 	}
2355 
2356 	return &group->group;
2357 }
2358 
2359 static struct spdk_nvme_transport_poll_group *
2360 nvme_tcp_qpair_get_optimal_poll_group(struct spdk_nvme_qpair *qpair)
2361 {
2362 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2363 	struct spdk_sock_group *group = NULL;
2364 	int rc;
2365 
2366 	rc = spdk_sock_get_optimal_sock_group(tqpair->sock, &group, NULL);
2367 	if (!rc && group != NULL) {
2368 		return spdk_sock_group_get_ctx(group);
2369 	}
2370 
2371 	return NULL;
2372 }
2373 
2374 static int
2375 nvme_tcp_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
2376 {
2377 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(qpair->poll_group);
2378 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2379 
2380 	if (spdk_sock_group_add_sock(group->sock_group, tqpair->sock, nvme_tcp_qpair_sock_cb, qpair)) {
2381 		return -EPROTO;
2382 	}
2383 	return 0;
2384 }
2385 
2386 static int
2387 nvme_tcp_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
2388 {
2389 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(qpair->poll_group);
2390 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2391 
2392 	if (tqpair->needs_poll) {
2393 		TAILQ_REMOVE(&group->needs_poll, tqpair, link);
2394 		tqpair->needs_poll = false;
2395 	}
2396 
2397 	if (tqpair->sock && group->sock_group) {
2398 		if (spdk_sock_group_remove_sock(group->sock_group, tqpair->sock)) {
2399 			return -EPROTO;
2400 		}
2401 	}
2402 	return 0;
2403 }
2404 
2405 static int
2406 nvme_tcp_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
2407 			struct spdk_nvme_qpair *qpair)
2408 {
2409 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2410 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(tgroup);
2411 
2412 	/* disconnected qpairs won't have a sock to add. */
2413 	if (nvme_qpair_get_state(qpair) >= NVME_QPAIR_CONNECTED) {
2414 		if (spdk_sock_group_add_sock(group->sock_group, tqpair->sock, nvme_tcp_qpair_sock_cb, qpair)) {
2415 			return -EPROTO;
2416 		}
2417 	}
2418 
2419 	return 0;
2420 }
2421 
2422 static int
2423 nvme_tcp_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
2424 			   struct spdk_nvme_qpair *qpair)
2425 {
2426 	struct nvme_tcp_qpair *tqpair;
2427 	struct nvme_tcp_poll_group *group;
2428 
2429 	assert(qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs);
2430 
2431 	tqpair = nvme_tcp_qpair(qpair);
2432 	group = nvme_tcp_poll_group(tgroup);
2433 
2434 	assert(tqpair->shared_stats == true);
2435 	tqpair->stats = &g_dummy_stats;
2436 
2437 	if (tqpair->needs_poll) {
2438 		TAILQ_REMOVE(&group->needs_poll, tqpair, link);
2439 		tqpair->needs_poll = false;
2440 	}
2441 
2442 	return 0;
2443 }
2444 
2445 static int64_t
2446 nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
2447 					uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
2448 {
2449 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(tgroup);
2450 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
2451 	struct nvme_tcp_qpair *tqpair, *tmp_tqpair;
2452 	int num_events;
2453 
2454 	group->completions_per_qpair = completions_per_qpair;
2455 	group->num_completions = 0;
2456 	group->stats.polls++;
2457 
2458 	num_events = spdk_sock_group_poll(group->sock_group);
2459 
2460 	STAILQ_FOREACH_SAFE(qpair, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_qpair) {
2461 		disconnected_qpair_cb(qpair, tgroup->group->ctx);
2462 	}
2463 
2464 	/* If any qpairs were marked as needing to be polled due to an asynchronous write completion
2465 	 * and they weren't polled as a consequence of calling spdk_sock_group_poll above, poll them now. */
2466 	TAILQ_FOREACH_SAFE(tqpair, &group->needs_poll, link, tmp_tqpair) {
2467 		nvme_tcp_qpair_sock_cb(&tqpair->qpair, group->sock_group, tqpair->sock);
2468 	}
2469 
2470 	if (spdk_unlikely(num_events < 0)) {
2471 		return num_events;
2472 	}
2473 
2474 	group->stats.idle_polls += !num_events;
2475 	group->stats.socket_completions += num_events;
2476 
2477 	return group->num_completions;
2478 }
2479 
2480 static int
2481 nvme_tcp_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
2482 {
2483 	int rc;
2484 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(tgroup);
2485 
2486 	if (!STAILQ_EMPTY(&tgroup->connected_qpairs) || !STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
2487 		return -EBUSY;
2488 	}
2489 
2490 	rc = spdk_sock_group_close(&group->sock_group);
2491 	if (rc != 0) {
2492 		SPDK_ERRLOG("Failed to close the sock group for a tcp poll group.\n");
2493 		assert(false);
2494 	}
2495 
2496 	free(tgroup);
2497 
2498 	return 0;
2499 }
2500 
2501 static int
2502 nvme_tcp_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
2503 			      struct spdk_nvme_transport_poll_group_stat **_stats)
2504 {
2505 	struct nvme_tcp_poll_group *group;
2506 	struct spdk_nvme_transport_poll_group_stat *stats;
2507 
2508 	if (tgroup == NULL || _stats == NULL) {
2509 		SPDK_ERRLOG("Invalid stats or group pointer\n");
2510 		return -EINVAL;
2511 	}
2512 
2513 	group = nvme_tcp_poll_group(tgroup);
2514 
2515 	stats = calloc(1, sizeof(*stats));
2516 	if (!stats) {
2517 		SPDK_ERRLOG("Can't allocate memory for TCP stats\n");
2518 		return -ENOMEM;
2519 	}
2520 	stats->trtype = SPDK_NVME_TRANSPORT_TCP;
2521 	memcpy(&stats->tcp, &group->stats, sizeof(group->stats));
2522 
2523 	*_stats = stats;
2524 
2525 	return 0;
2526 }
2527 
2528 static void
2529 nvme_tcp_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
2530 			       struct spdk_nvme_transport_poll_group_stat *stats)
2531 {
2532 	free(stats);
2533 }
2534 
2535 const struct spdk_nvme_transport_ops tcp_ops = {
2536 	.name = "TCP",
2537 	.type = SPDK_NVME_TRANSPORT_TCP,
2538 	.ctrlr_construct = nvme_tcp_ctrlr_construct,
2539 	.ctrlr_scan = nvme_fabric_ctrlr_scan,
2540 	.ctrlr_destruct = nvme_tcp_ctrlr_destruct,
2541 	.ctrlr_enable = nvme_tcp_ctrlr_enable,
2542 
2543 	.ctrlr_set_reg_4 = nvme_fabric_ctrlr_set_reg_4,
2544 	.ctrlr_set_reg_8 = nvme_fabric_ctrlr_set_reg_8,
2545 	.ctrlr_get_reg_4 = nvme_fabric_ctrlr_get_reg_4,
2546 	.ctrlr_get_reg_8 = nvme_fabric_ctrlr_get_reg_8,
2547 	.ctrlr_set_reg_4_async = nvme_fabric_ctrlr_set_reg_4_async,
2548 	.ctrlr_set_reg_8_async = nvme_fabric_ctrlr_set_reg_8_async,
2549 	.ctrlr_get_reg_4_async = nvme_fabric_ctrlr_get_reg_4_async,
2550 	.ctrlr_get_reg_8_async = nvme_fabric_ctrlr_get_reg_8_async,
2551 
2552 	.ctrlr_get_max_xfer_size = nvme_tcp_ctrlr_get_max_xfer_size,
2553 	.ctrlr_get_max_sges = nvme_tcp_ctrlr_get_max_sges,
2554 
2555 	.ctrlr_create_io_qpair = nvme_tcp_ctrlr_create_io_qpair,
2556 	.ctrlr_delete_io_qpair = nvme_tcp_ctrlr_delete_io_qpair,
2557 	.ctrlr_connect_qpair = nvme_tcp_ctrlr_connect_qpair,
2558 	.ctrlr_disconnect_qpair = nvme_tcp_ctrlr_disconnect_qpair,
2559 
2560 	.qpair_abort_reqs = nvme_tcp_qpair_abort_reqs,
2561 	.qpair_reset = nvme_tcp_qpair_reset,
2562 	.qpair_submit_request = nvme_tcp_qpair_submit_request,
2563 	.qpair_process_completions = nvme_tcp_qpair_process_completions,
2564 	.qpair_iterate_requests = nvme_tcp_qpair_iterate_requests,
2565 	.admin_qpair_abort_aers = nvme_tcp_admin_qpair_abort_aers,
2566 
2567 	.poll_group_create = nvme_tcp_poll_group_create,
2568 	.qpair_get_optimal_poll_group = nvme_tcp_qpair_get_optimal_poll_group,
2569 	.poll_group_connect_qpair = nvme_tcp_poll_group_connect_qpair,
2570 	.poll_group_disconnect_qpair = nvme_tcp_poll_group_disconnect_qpair,
2571 	.poll_group_add = nvme_tcp_poll_group_add,
2572 	.poll_group_remove = nvme_tcp_poll_group_remove,
2573 	.poll_group_process_completions = nvme_tcp_poll_group_process_completions,
2574 	.poll_group_destroy = nvme_tcp_poll_group_destroy,
2575 	.poll_group_get_stats = nvme_tcp_poll_group_get_stats,
2576 	.poll_group_free_stats = nvme_tcp_poll_group_free_stats,
2577 };
2578 
2579 SPDK_NVME_TRANSPORT_REGISTER(tcp, &tcp_ops);
2580 
2581 SPDK_TRACE_REGISTER_FN(nvme_tcp, "nvme_tcp", TRACE_GROUP_NVME_TCP)
2582 {
2583 	struct spdk_trace_tpoint_opts opts[] = {
2584 		{
2585 			"NVME_TCP_SUBMIT", TRACE_NVME_TCP_SUBMIT,
2586 			OWNER_NVME_TCP_QP, OBJECT_NVME_TCP_REQ, 1,
2587 			{	{ "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
2588 				{ "cid", SPDK_TRACE_ARG_TYPE_INT, 4 },
2589 				{ "opc", SPDK_TRACE_ARG_TYPE_INT, 4 },
2590 				{ "dw10", SPDK_TRACE_ARG_TYPE_PTR, 4 },
2591 				{ "dw11", SPDK_TRACE_ARG_TYPE_PTR, 4 },
2592 				{ "dw12", SPDK_TRACE_ARG_TYPE_PTR, 4 }
2593 			}
2594 		},
2595 		{
2596 			"NVME_TCP_COMPLETE", TRACE_NVME_TCP_COMPLETE,
2597 			OWNER_NVME_TCP_QP, OBJECT_NVME_TCP_REQ, 0,
2598 			{	{ "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
2599 				{ "cid", SPDK_TRACE_ARG_TYPE_INT, 4 },
2600 				{ "cpl", SPDK_TRACE_ARG_TYPE_PTR, 4 }
2601 			}
2602 		},
2603 	};
2604 
2605 	spdk_trace_register_object(OBJECT_NVME_TCP_REQ, 'p');
2606 	spdk_trace_register_owner(OWNER_NVME_TCP_QP, 'q');
2607 	spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
2608 }
2609