xref: /spdk/lib/nvme/nvme_tcp.c (revision ee32a82bfd3ff5b1a10ed775ee06f0eaffce60eb)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 /*
8  * NVMe/TCP transport
9  */
10 
11 #include "nvme_internal.h"
12 
13 #include "spdk/endian.h"
14 #include "spdk/likely.h"
15 #include "spdk/string.h"
16 #include "spdk/stdinc.h"
17 #include "spdk/crc32.h"
18 #include "spdk/assert.h"
19 #include "spdk/trace.h"
20 #include "spdk/util.h"
21 #include "spdk/nvmf.h"
22 #include "spdk/dma.h"
23 
24 #include "spdk_internal/nvme_tcp.h"
25 #include "spdk_internal/trace_defs.h"
26 
27 #define NVME_TCP_RW_BUFFER_SIZE 131072
28 
29 /* For async connect workloads, allow more time since we are more likely
30  * to be processing lots ICREQs at once.
31  */
32 #define ICREQ_TIMEOUT_SYNC 2 /* in seconds */
33 #define ICREQ_TIMEOUT_ASYNC 10 /* in seconds */
34 
35 #define NVME_TCP_HPDA_DEFAULT			0
36 #define NVME_TCP_MAX_R2T_DEFAULT		1
37 #define NVME_TCP_PDU_H2C_MIN_DATA_SIZE		4096
38 
39 /*
40  * Maximum value of transport_ack_timeout used by TCP controller
41  */
42 #define NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT	31
43 
44 enum nvme_tcp_qpair_state {
45 	NVME_TCP_QPAIR_STATE_INVALID = 0,
46 	NVME_TCP_QPAIR_STATE_INITIALIZING = 1,
47 	NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND = 2,
48 	NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL = 3,
49 	NVME_TCP_QPAIR_STATE_AUTHENTICATING = 4,
50 	NVME_TCP_QPAIR_STATE_RUNNING = 5,
51 	NVME_TCP_QPAIR_STATE_EXITING = 6,
52 	NVME_TCP_QPAIR_STATE_EXITED = 7,
53 };
54 
55 /* NVMe TCP transport extensions for spdk_nvme_ctrlr */
56 struct nvme_tcp_ctrlr {
57 	struct spdk_nvme_ctrlr			ctrlr;
58 	char					psk_identity[NVMF_PSK_IDENTITY_LEN];
59 	uint8_t					psk[SPDK_TLS_PSK_MAX_LEN];
60 	int					psk_size;
61 	char					*tls_cipher_suite;
62 };
63 
64 struct nvme_tcp_poll_group {
65 	struct spdk_nvme_transport_poll_group group;
66 	struct spdk_sock_group *sock_group;
67 	uint32_t completions_per_qpair;
68 	int64_t num_completions;
69 
70 	TAILQ_HEAD(, nvme_tcp_qpair) needs_poll;
71 	struct spdk_nvme_tcp_stat stats;
72 };
73 
74 /* NVMe TCP qpair extensions for spdk_nvme_qpair */
75 struct nvme_tcp_qpair {
76 	struct spdk_nvme_qpair			qpair;
77 	struct spdk_sock			*sock;
78 
79 	TAILQ_HEAD(, nvme_tcp_req)		free_reqs;
80 	TAILQ_HEAD(, nvme_tcp_req)		outstanding_reqs;
81 
82 	TAILQ_HEAD(, nvme_tcp_pdu)		send_queue;
83 	struct nvme_tcp_pdu			*recv_pdu;
84 	struct nvme_tcp_pdu			*send_pdu; /* only for error pdu and init pdu */
85 	struct nvme_tcp_pdu			*send_pdus; /* Used by tcp_reqs */
86 	enum nvme_tcp_pdu_recv_state		recv_state;
87 	struct nvme_tcp_req			*tcp_reqs;
88 	struct spdk_nvme_tcp_stat		*stats;
89 
90 	uint16_t				num_entries;
91 	uint16_t				async_complete;
92 
93 	struct {
94 		uint16_t host_hdgst_enable: 1;
95 		uint16_t host_ddgst_enable: 1;
96 		uint16_t icreq_send_ack: 1;
97 		uint16_t in_connect_poll: 1;
98 		uint16_t reserved: 12;
99 	} flags;
100 
101 	/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
102 	uint32_t				maxh2cdata;
103 
104 	uint32_t				maxr2t;
105 
106 	/* 0 based value, which is used to guide the padding */
107 	uint8_t					cpda;
108 
109 	enum nvme_tcp_qpair_state		state;
110 
111 	TAILQ_ENTRY(nvme_tcp_qpair)		link;
112 	bool					needs_poll;
113 
114 	uint64_t				icreq_timeout_tsc;
115 
116 	bool					shared_stats;
117 };
118 
119 enum nvme_tcp_req_state {
120 	NVME_TCP_REQ_FREE,
121 	NVME_TCP_REQ_ACTIVE,
122 	NVME_TCP_REQ_ACTIVE_R2T,
123 };
124 
125 struct nvme_tcp_req {
126 	struct nvme_request			*req;
127 	enum nvme_tcp_req_state			state;
128 	uint16_t				cid;
129 	uint16_t				ttag;
130 	uint32_t				datao;
131 	uint32_t				expected_datao;
132 	uint32_t				r2tl_remain;
133 	uint32_t				active_r2ts;
134 	/* Used to hold a value received from subsequent R2T while we are still
135 	 * waiting for H2C complete */
136 	uint16_t				ttag_r2t_next;
137 	bool					in_capsule_data;
138 	/* It is used to track whether the req can be safely freed */
139 	union {
140 		uint8_t raw;
141 		struct {
142 			/* The last send operation completed - kernel released send buffer */
143 			uint8_t				send_ack : 1;
144 			/* Data transfer completed - target send resp or last data bit */
145 			uint8_t				data_recv : 1;
146 			/* tcp_req is waiting for completion of the previous send operation (buffer reclaim notification
147 			 * from kernel) to send H2C */
148 			uint8_t				h2c_send_waiting_ack : 1;
149 			/* tcp_req received subsequent r2t while it is still waiting for send_ack.
150 			 * Rare case, actual when dealing with target that can send several R2T requests.
151 			 * SPDK TCP target sends 1 R2T for the whole data buffer */
152 			uint8_t				r2t_waiting_h2c_complete : 1;
153 			/* Accel operation is in progress */
154 			uint8_t				in_progress_accel : 1;
155 			uint8_t				domain_in_use: 1;
156 			uint8_t				reserved : 2;
157 		} bits;
158 	} ordering;
159 	struct nvme_tcp_pdu			*pdu;
160 	struct iovec				iov[NVME_TCP_MAX_SGL_DESCRIPTORS];
161 	uint32_t				iovcnt;
162 	/* Used to hold a value received from subsequent R2T while we are still
163 	 * waiting for H2C ack */
164 	uint32_t				r2tl_remain_next;
165 	struct nvme_tcp_qpair			*tqpair;
166 	TAILQ_ENTRY(nvme_tcp_req)		link;
167 	struct spdk_nvme_cpl			rsp;
168 	uint8_t					rsvd1[32];
169 };
170 SPDK_STATIC_ASSERT(sizeof(struct nvme_tcp_req) % SPDK_CACHE_LINE_SIZE == 0, "unaligned size");
171 
172 static struct spdk_nvme_tcp_stat g_dummy_stats = {};
173 
174 static void nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req);
175 static int64_t nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group
176 		*tgroup, uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
177 static void nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu);
178 static void nvme_tcp_req_complete(struct nvme_tcp_req *tcp_req, struct nvme_tcp_qpair *tqpair,
179 				  struct spdk_nvme_cpl *rsp, bool print_on_error);
180 
181 static inline struct nvme_tcp_qpair *
182 nvme_tcp_qpair(struct spdk_nvme_qpair *qpair)
183 {
184 	assert(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
185 	return SPDK_CONTAINEROF(qpair, struct nvme_tcp_qpair, qpair);
186 }
187 
188 static inline struct nvme_tcp_poll_group *
189 nvme_tcp_poll_group(struct spdk_nvme_transport_poll_group *group)
190 {
191 	return SPDK_CONTAINEROF(group, struct nvme_tcp_poll_group, group);
192 }
193 
194 static inline struct nvme_tcp_ctrlr *
195 nvme_tcp_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
196 {
197 	assert(ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_TCP);
198 	return SPDK_CONTAINEROF(ctrlr, struct nvme_tcp_ctrlr, ctrlr);
199 }
200 
201 static struct nvme_tcp_req *
202 nvme_tcp_req_get(struct nvme_tcp_qpair *tqpair)
203 {
204 	struct nvme_tcp_req *tcp_req;
205 
206 	tcp_req = TAILQ_FIRST(&tqpair->free_reqs);
207 	if (!tcp_req) {
208 		return NULL;
209 	}
210 
211 	assert(tcp_req->state == NVME_TCP_REQ_FREE);
212 	tcp_req->state = NVME_TCP_REQ_ACTIVE;
213 	TAILQ_REMOVE(&tqpair->free_reqs, tcp_req, link);
214 	tcp_req->datao = 0;
215 	tcp_req->expected_datao = 0;
216 	tcp_req->req = NULL;
217 	tcp_req->in_capsule_data = false;
218 	tcp_req->r2tl_remain = 0;
219 	tcp_req->r2tl_remain_next = 0;
220 	tcp_req->active_r2ts = 0;
221 	tcp_req->iovcnt = 0;
222 	tcp_req->ordering.raw = 0;
223 	memset(tcp_req->pdu, 0, sizeof(struct nvme_tcp_pdu));
224 	memset(&tcp_req->rsp, 0, sizeof(struct spdk_nvme_cpl));
225 
226 	return tcp_req;
227 }
228 
229 static void
230 nvme_tcp_req_put(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
231 {
232 	assert(tcp_req->state != NVME_TCP_REQ_FREE);
233 	tcp_req->state = NVME_TCP_REQ_FREE;
234 	TAILQ_INSERT_HEAD(&tqpair->free_reqs, tcp_req, link);
235 }
236 
237 static inline void
238 nvme_tcp_accel_submit_crc32c(struct nvme_tcp_poll_group *tgroup, struct nvme_tcp_req *treq,
239 			     uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, uint32_t seed,
240 			     spdk_nvme_accel_completion_cb cb_fn, void *cb_arg)
241 {
242 	struct spdk_nvme_poll_group *pg = tgroup->group.group;
243 
244 	treq->ordering.bits.in_progress_accel = 1;
245 	pg->accel_fn_table.submit_accel_crc32c(pg->ctx, dst, iovs, iovcnt, seed, cb_fn, cb_arg);
246 }
247 
248 static inline void
249 nvme_tcp_accel_finish_sequence(struct nvme_tcp_poll_group *tgroup, struct nvme_tcp_req *treq,
250 			       void *seq, spdk_nvme_accel_completion_cb cb_fn, void *cb_arg)
251 {
252 	struct spdk_nvme_poll_group *pg = tgroup->group.group;
253 
254 	treq->ordering.bits.in_progress_accel = 1;
255 	pg->accel_fn_table.finish_sequence(seq, cb_fn, cb_arg);
256 }
257 
258 static inline void
259 nvme_tcp_accel_reverse_sequence(struct nvme_tcp_poll_group *tgroup, void *seq)
260 {
261 	struct spdk_nvme_poll_group *pg = tgroup->group.group;
262 
263 	pg->accel_fn_table.reverse_sequence(seq);
264 }
265 
266 static inline int
267 nvme_tcp_accel_append_crc32c(struct nvme_tcp_poll_group *tgroup, void **seq, uint32_t *dst,
268 			     struct iovec *iovs, uint32_t iovcnt, uint32_t seed,
269 			     spdk_nvme_accel_step_cb cb_fn, void *cb_arg)
270 {
271 	struct spdk_nvme_poll_group *pg = tgroup->group.group;
272 
273 	return pg->accel_fn_table.append_crc32c(pg->ctx, seq, dst, iovs, iovcnt, NULL, NULL,
274 						seed, cb_fn, cb_arg);
275 }
276 
277 static void
278 nvme_tcp_free_reqs(struct nvme_tcp_qpair *tqpair)
279 {
280 	free(tqpair->tcp_reqs);
281 	tqpair->tcp_reqs = NULL;
282 
283 	spdk_free(tqpair->send_pdus);
284 	tqpair->send_pdus = NULL;
285 }
286 
287 static int
288 nvme_tcp_alloc_reqs(struct nvme_tcp_qpair *tqpair)
289 {
290 	uint16_t i;
291 	struct nvme_tcp_req *tcp_req;
292 
293 	tqpair->tcp_reqs = aligned_alloc(SPDK_CACHE_LINE_SIZE,
294 					 tqpair->num_entries * sizeof(*tcp_req));
295 	if (tqpair->tcp_reqs == NULL) {
296 		SPDK_ERRLOG("Failed to allocate tcp_reqs on tqpair=%p\n", tqpair);
297 		goto fail;
298 	}
299 
300 	/* Add additional 2 member for the send_pdu, recv_pdu owned by the tqpair */
301 	tqpair->send_pdus = spdk_zmalloc((tqpair->num_entries + 2) * sizeof(struct nvme_tcp_pdu),
302 					 0x1000, NULL,
303 					 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA);
304 
305 	if (tqpair->send_pdus == NULL) {
306 		SPDK_ERRLOG("Failed to allocate send_pdus on tqpair=%p\n", tqpair);
307 		goto fail;
308 	}
309 
310 	memset(tqpair->tcp_reqs, 0, tqpair->num_entries * sizeof(*tcp_req));
311 	TAILQ_INIT(&tqpair->send_queue);
312 	TAILQ_INIT(&tqpair->free_reqs);
313 	TAILQ_INIT(&tqpair->outstanding_reqs);
314 	tqpair->qpair.queue_depth = 0;
315 	for (i = 0; i < tqpair->num_entries; i++) {
316 		tcp_req = &tqpair->tcp_reqs[i];
317 		tcp_req->cid = i;
318 		tcp_req->tqpair = tqpair;
319 		tcp_req->pdu = &tqpair->send_pdus[i];
320 		TAILQ_INSERT_TAIL(&tqpair->free_reqs, tcp_req, link);
321 	}
322 
323 	tqpair->send_pdu = &tqpair->send_pdus[i];
324 	tqpair->recv_pdu = &tqpair->send_pdus[i + 1];
325 
326 	return 0;
327 fail:
328 	nvme_tcp_free_reqs(tqpair);
329 	return -ENOMEM;
330 }
331 
332 static inline void
333 nvme_tcp_qpair_set_recv_state(struct nvme_tcp_qpair *tqpair,
334 			      enum nvme_tcp_pdu_recv_state state)
335 {
336 	if (tqpair->recv_state == state) {
337 		SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n",
338 			    tqpair, state);
339 		return;
340 	}
341 
342 	if (state == NVME_TCP_PDU_RECV_STATE_ERROR) {
343 		assert(TAILQ_EMPTY(&tqpair->outstanding_reqs));
344 	}
345 
346 	tqpair->recv_state = state;
347 }
348 
349 static void nvme_tcp_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
350 
351 static void
352 nvme_tcp_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
353 {
354 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
355 	struct nvme_tcp_pdu *pdu;
356 	int rc;
357 	struct nvme_tcp_poll_group *group;
358 
359 	if (tqpair->needs_poll) {
360 		group = nvme_tcp_poll_group(qpair->poll_group);
361 		TAILQ_REMOVE(&group->needs_poll, tqpair, link);
362 		tqpair->needs_poll = false;
363 	}
364 
365 	rc = spdk_sock_close(&tqpair->sock);
366 
367 	if (tqpair->sock != NULL) {
368 		SPDK_ERRLOG("tqpair=%p, errno=%d, rc=%d\n", tqpair, errno, rc);
369 		/* Set it to NULL manually */
370 		tqpair->sock = NULL;
371 	}
372 
373 	/* clear the send_queue */
374 	while (!TAILQ_EMPTY(&tqpair->send_queue)) {
375 		pdu = TAILQ_FIRST(&tqpair->send_queue);
376 		/* Remove the pdu from the send_queue to prevent the wrong sending out
377 		 * in the next round connection
378 		 */
379 		TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
380 	}
381 
382 	nvme_tcp_qpair_abort_reqs(qpair, qpair->abort_dnr);
383 
384 	/* If the qpair is marked as asynchronous, let it go through the process_completions() to
385 	 * let any outstanding requests (e.g. those with outstanding accel operations) complete.
386 	 * Otherwise, there's no way of waiting for them, so tqpair->outstanding_reqs has to be
387 	 * empty.
388 	 */
389 	if (qpair->async) {
390 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
391 	} else {
392 		assert(TAILQ_EMPTY(&tqpair->outstanding_reqs));
393 		nvme_transport_ctrlr_disconnect_qpair_done(qpair);
394 	}
395 }
396 
397 static int
398 nvme_tcp_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
399 {
400 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
401 
402 	assert(qpair != NULL);
403 	nvme_tcp_qpair_abort_reqs(qpair, qpair->abort_dnr);
404 	assert(TAILQ_EMPTY(&tqpair->outstanding_reqs));
405 
406 	nvme_qpair_deinit(qpair);
407 	nvme_tcp_free_reqs(tqpair);
408 	if (!tqpair->shared_stats) {
409 		free(tqpair->stats);
410 	}
411 	free(tqpair);
412 
413 	return 0;
414 }
415 
416 static int
417 nvme_tcp_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
418 {
419 	return 0;
420 }
421 
422 static int
423 nvme_tcp_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
424 {
425 	struct nvme_tcp_ctrlr *tctrlr = nvme_tcp_ctrlr(ctrlr);
426 
427 	if (ctrlr->adminq) {
428 		nvme_tcp_ctrlr_delete_io_qpair(ctrlr, ctrlr->adminq);
429 	}
430 
431 	nvme_ctrlr_destruct_finish(ctrlr);
432 
433 	free(tctrlr);
434 
435 	return 0;
436 }
437 
438 /* If there are queued requests, we assume they are queued because they are waiting
439  * for resources to be released. Those resources are almost certainly released in
440  * response to a PDU completing. However, to attempt to make forward progress
441  * the qpair needs to be polled and we can't rely on another network event to make
442  * that happen. Add it to a list of qpairs to poll regardless of network activity.
443  *
444  * Besides, when tqpair state is NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL or
445  * NVME_TCP_QPAIR_STATE_INITIALIZING, need to add it to needs_poll list too to make
446  * forward progress in case that the resources are released after icreq's or CONNECT's
447  * resp is processed. */
448 static void
449 nvme_tcp_cond_schedule_qpair_polling(struct nvme_tcp_qpair *tqpair)
450 {
451 	struct nvme_tcp_poll_group *pgroup;
452 
453 	if (tqpair->needs_poll || !tqpair->qpair.poll_group) {
454 		return;
455 	}
456 
457 	if (STAILQ_EMPTY(&tqpair->qpair.queued_req) &&
458 	    spdk_likely(tqpair->state != NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL &&
459 			tqpair->state != NVME_TCP_QPAIR_STATE_INITIALIZING)) {
460 		return;
461 	}
462 
463 	pgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
464 	TAILQ_INSERT_TAIL(&pgroup->needs_poll, tqpair, link);
465 	tqpair->needs_poll = true;
466 }
467 
468 static void
469 pdu_write_done(void *cb_arg, int err)
470 {
471 	struct nvme_tcp_pdu *pdu = cb_arg;
472 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
473 
474 	nvme_tcp_cond_schedule_qpair_polling(tqpair);
475 	TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
476 
477 	if (err != 0) {
478 		nvme_transport_ctrlr_disconnect_qpair(tqpair->qpair.ctrlr, &tqpair->qpair);
479 		return;
480 	}
481 
482 	assert(pdu->cb_fn != NULL);
483 	pdu->cb_fn(pdu->cb_arg);
484 }
485 
486 static void
487 pdu_write_fail(struct nvme_tcp_pdu *pdu, int status)
488 {
489 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
490 
491 	/* This function is similar to pdu_write_done(), but it should be called before a PDU is
492 	 * sent over the socket */
493 	TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
494 	pdu_write_done(pdu, status);
495 }
496 
497 static void
498 pdu_seq_fail(struct nvme_tcp_pdu *pdu, int status)
499 {
500 	struct nvme_tcp_req *treq = pdu->req;
501 
502 	SPDK_ERRLOG("Failed to execute accel sequence: %d\n", status);
503 	nvme_tcp_cond_schedule_qpair_polling(pdu->qpair);
504 	treq->rsp.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
505 	nvme_tcp_req_complete(treq, treq->tqpair, &treq->rsp, true);
506 }
507 
508 static void
509 _tcp_write_pdu(struct nvme_tcp_pdu *pdu)
510 {
511 	uint32_t mapped_length = 0;
512 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
513 
514 	pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, SPDK_COUNTOF(pdu->iov), pdu,
515 			       (bool)tqpair->flags.host_hdgst_enable, (bool)tqpair->flags.host_ddgst_enable,
516 			       &mapped_length);
517 	TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
518 	if (spdk_unlikely(mapped_length < pdu->data_len)) {
519 		SPDK_ERRLOG("could not map the whole %u bytes (mapped only %u bytes)\n", pdu->data_len,
520 			    mapped_length);
521 		pdu_write_done(pdu, -EINVAL);
522 		return;
523 	}
524 	pdu->sock_req.cb_fn = pdu_write_done;
525 	pdu->sock_req.cb_arg = pdu;
526 	tqpair->stats->submitted_requests++;
527 	spdk_sock_writev_async(tqpair->sock, &pdu->sock_req);
528 }
529 
530 static void
531 tcp_write_pdu_seq_cb(void *ctx, int status)
532 {
533 	struct nvme_tcp_pdu *pdu = ctx;
534 	struct nvme_tcp_req *treq = pdu->req;
535 	struct nvme_request *req = treq->req;
536 
537 	assert(treq->ordering.bits.in_progress_accel);
538 	treq->ordering.bits.in_progress_accel = 0;
539 
540 	req->accel_sequence = NULL;
541 	if (spdk_unlikely(status != 0)) {
542 		pdu_seq_fail(pdu, status);
543 		return;
544 	}
545 
546 	_tcp_write_pdu(pdu);
547 }
548 
549 static void
550 tcp_write_pdu(struct nvme_tcp_pdu *pdu)
551 {
552 	struct nvme_tcp_req *treq = pdu->req;
553 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
554 	struct nvme_tcp_poll_group *tgroup;
555 	struct nvme_request *req;
556 
557 	if (spdk_likely(treq != NULL)) {
558 		req = treq->req;
559 		if (req->accel_sequence != NULL &&
560 		    spdk_nvme_opc_get_data_transfer(req->cmd.opc) == SPDK_NVME_DATA_HOST_TO_CONTROLLER &&
561 		    pdu->data_len > 0) {
562 			assert(tqpair->qpair.poll_group != NULL);
563 			tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
564 			nvme_tcp_accel_finish_sequence(tgroup, treq, req->accel_sequence,
565 						       tcp_write_pdu_seq_cb, pdu);
566 			return;
567 		}
568 	}
569 
570 	_tcp_write_pdu(pdu);
571 }
572 
573 static void
574 pdu_accel_compute_crc32_done(void *cb_arg, int status)
575 {
576 	struct nvme_tcp_pdu *pdu = cb_arg;
577 	struct nvme_tcp_req *req = pdu->req;
578 
579 	assert(req->ordering.bits.in_progress_accel);
580 	req->ordering.bits.in_progress_accel = 0;
581 
582 	if (spdk_unlikely(status)) {
583 		SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu);
584 		pdu_write_fail(pdu, status);
585 		return;
586 	}
587 
588 	pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
589 	MAKE_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
590 
591 	_tcp_write_pdu(pdu);
592 }
593 
594 static void
595 pdu_accel_compute_crc32_seq_cb(void *cb_arg, int status)
596 {
597 	struct nvme_tcp_pdu *pdu = cb_arg;
598 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
599 	struct nvme_tcp_poll_group *tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
600 	struct nvme_tcp_req *treq = pdu->req;
601 	struct nvme_request *req = treq->req;
602 
603 	assert(treq->ordering.bits.in_progress_accel);
604 	treq->ordering.bits.in_progress_accel = 0;
605 
606 	req->accel_sequence = NULL;
607 	if (spdk_unlikely(status != 0)) {
608 		pdu_seq_fail(pdu, status);
609 		return;
610 	}
611 
612 	nvme_tcp_accel_submit_crc32c(tgroup, pdu->req, &pdu->data_digest_crc32,
613 				     pdu->data_iov, pdu->data_iovcnt, 0,
614 				     pdu_accel_compute_crc32_done, pdu);
615 }
616 
617 static void
618 pdu_accel_seq_compute_crc32_done(void *cb_arg)
619 {
620 	struct nvme_tcp_pdu *pdu = cb_arg;
621 
622 	pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
623 	MAKE_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
624 }
625 
626 static bool
627 pdu_accel_compute_crc32(struct nvme_tcp_pdu *pdu)
628 {
629 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
630 	struct nvme_tcp_poll_group *tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
631 	struct nvme_request *req = ((struct nvme_tcp_req *)pdu->req)->req;
632 	int rc;
633 
634 	/* Only support this limited case for the first step */
635 	if (spdk_unlikely(nvme_qpair_get_state(&tqpair->qpair) < NVME_QPAIR_CONNECTED ||
636 			  pdu->dif_ctx != NULL ||
637 			  pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT != 0)) {
638 		return false;
639 	}
640 
641 	if (tqpair->qpair.poll_group == NULL) {
642 		return false;
643 	}
644 
645 	if (tgroup->group.group->accel_fn_table.append_crc32c != NULL) {
646 		rc = nvme_tcp_accel_append_crc32c(tgroup, &req->accel_sequence,
647 						  &pdu->data_digest_crc32,
648 						  pdu->data_iov, pdu->data_iovcnt, 0,
649 						  pdu_accel_seq_compute_crc32_done, pdu);
650 		if (spdk_unlikely(rc != 0)) {
651 			/* If accel is out of resources, fall back to non-accelerated crc32 */
652 			if (rc == -ENOMEM) {
653 				return false;
654 			}
655 
656 			SPDK_ERRLOG("Failed to append crc32c operation: %d\n", rc);
657 			pdu_write_fail(pdu, rc);
658 			return true;
659 		}
660 
661 		tcp_write_pdu(pdu);
662 		return true;
663 	} else if (tgroup->group.group->accel_fn_table.submit_accel_crc32c != NULL) {
664 		if (req->accel_sequence != NULL) {
665 			nvme_tcp_accel_finish_sequence(tgroup, pdu->req, req->accel_sequence,
666 						       pdu_accel_compute_crc32_seq_cb, pdu);
667 		} else {
668 			nvme_tcp_accel_submit_crc32c(tgroup, pdu->req, &pdu->data_digest_crc32,
669 						     pdu->data_iov, pdu->data_iovcnt, 0,
670 						     pdu_accel_compute_crc32_done, pdu);
671 		}
672 
673 		return true;
674 	}
675 
676 	return false;
677 }
678 
679 static void
680 pdu_compute_crc32_seq_cb(void *cb_arg, int status)
681 {
682 	struct nvme_tcp_pdu *pdu = cb_arg;
683 	struct nvme_tcp_req *treq = pdu->req;
684 	struct nvme_request *req = treq->req;
685 	uint32_t crc32c;
686 
687 	assert(treq->ordering.bits.in_progress_accel);
688 	treq->ordering.bits.in_progress_accel = 0;
689 
690 	req->accel_sequence = NULL;
691 	if (spdk_unlikely(status != 0)) {
692 		pdu_seq_fail(pdu, status);
693 		return;
694 	}
695 
696 	crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
697 	crc32c = crc32c ^ SPDK_CRC32C_XOR;
698 	MAKE_DIGEST_WORD(pdu->data_digest, crc32c);
699 
700 	_tcp_write_pdu(pdu);
701 }
702 
703 static void
704 pdu_compute_crc32(struct nvme_tcp_pdu *pdu)
705 {
706 	struct nvme_tcp_qpair *tqpair = pdu->qpair;
707 	struct nvme_tcp_poll_group *tgroup;
708 	struct nvme_request *req;
709 	uint32_t crc32c;
710 
711 	/* Data Digest */
712 	if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] &&
713 	    tqpair->flags.host_ddgst_enable) {
714 		if (pdu_accel_compute_crc32(pdu)) {
715 			return;
716 		}
717 
718 		req = ((struct nvme_tcp_req *)pdu->req)->req;
719 		if (req->accel_sequence != NULL) {
720 			tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
721 			nvme_tcp_accel_finish_sequence(tgroup, pdu->req, req->accel_sequence,
722 						       pdu_compute_crc32_seq_cb, pdu);
723 			return;
724 		}
725 
726 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
727 		crc32c = crc32c ^ SPDK_CRC32C_XOR;
728 		MAKE_DIGEST_WORD(pdu->data_digest, crc32c);
729 	}
730 
731 	tcp_write_pdu(pdu);
732 }
733 
734 static int
735 nvme_tcp_qpair_write_pdu(struct nvme_tcp_qpair *tqpair,
736 			 struct nvme_tcp_pdu *pdu,
737 			 nvme_tcp_qpair_xfer_complete_cb cb_fn,
738 			 void *cb_arg)
739 {
740 	int hlen;
741 	uint32_t crc32c;
742 
743 	hlen = pdu->hdr.common.hlen;
744 	pdu->cb_fn = cb_fn;
745 	pdu->cb_arg = cb_arg;
746 	pdu->qpair = tqpair;
747 
748 	/* Header Digest */
749 	if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->flags.host_hdgst_enable) {
750 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
751 		MAKE_DIGEST_WORD((uint8_t *)&pdu->hdr.raw[hlen], crc32c);
752 	}
753 
754 	pdu_compute_crc32(pdu);
755 
756 	return 0;
757 }
758 
759 static int
760 nvme_tcp_try_memory_translation(struct nvme_tcp_req *tcp_req, void **addr, uint32_t length)
761 {
762 	struct nvme_request *req = tcp_req->req;
763 	struct spdk_memory_domain_translation_result translation = {
764 		.iov_count = 0,
765 		.size = sizeof(translation)
766 	};
767 	int rc;
768 
769 	if (!tcp_req->ordering.bits.domain_in_use) {
770 		return 0;
771 	}
772 
773 	rc = spdk_memory_domain_translate_data(req->payload.opts->memory_domain,
774 					       req->payload.opts->memory_domain_ctx, spdk_memory_domain_get_system_domain(), NULL, *addr, length,
775 					       &translation);
776 	if (spdk_unlikely(rc || translation.iov_count != 1)) {
777 		SPDK_ERRLOG("DMA memory translation failed, rc %d, iov_count %u\n", rc, translation.iov_count);
778 		return -EFAULT;
779 	}
780 
781 	assert(length == translation.iov.iov_len);
782 	*addr = translation.iov.iov_base;
783 	return 0;
784 }
785 
786 /*
787  * Build SGL describing contiguous payload buffer.
788  */
789 static int
790 nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
791 {
792 	struct nvme_request *req = tcp_req->req;
793 
794 	/* ubsan complains about applying zero offset to null pointer if contig_or_cb_arg is NULL,
795 	 * so just double cast it to make it go away */
796 	void *addr = (void *)((uintptr_t)req->payload.contig_or_cb_arg + req->payload_offset);
797 	size_t length = req->payload_size;
798 	int rc;
799 
800 	SPDK_DEBUGLOG(nvme, "enter\n");
801 
802 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
803 	rc = nvme_tcp_try_memory_translation(tcp_req, &addr, length);
804 	if (spdk_unlikely(rc)) {
805 		return rc;
806 	}
807 
808 	tcp_req->iov[0].iov_base = addr;
809 	tcp_req->iov[0].iov_len = length;
810 	tcp_req->iovcnt = 1;
811 	return 0;
812 }
813 
814 /*
815  * Build SGL describing scattered payload buffer.
816  */
817 static int
818 nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
819 {
820 	int rc;
821 	uint32_t length, remaining_size, iovcnt = 0, max_num_sgl;
822 	struct nvme_request *req = tcp_req->req;
823 
824 	SPDK_DEBUGLOG(nvme, "enter\n");
825 
826 	assert(req->payload_size != 0);
827 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
828 	assert(req->payload.reset_sgl_fn != NULL);
829 	assert(req->payload.next_sge_fn != NULL);
830 	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
831 
832 	max_num_sgl = spdk_min(req->qpair->ctrlr->max_sges, NVME_TCP_MAX_SGL_DESCRIPTORS);
833 	remaining_size = req->payload_size;
834 
835 	do {
836 		void *addr;
837 
838 		rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &addr, &length);
839 		if (rc) {
840 			return -1;
841 		}
842 
843 		rc = nvme_tcp_try_memory_translation(tcp_req, &addr, length);
844 		if (spdk_unlikely(rc)) {
845 			return rc;
846 		}
847 
848 		length = spdk_min(length, remaining_size);
849 		tcp_req->iov[iovcnt].iov_base = addr;
850 		tcp_req->iov[iovcnt].iov_len = length;
851 		remaining_size -= length;
852 		iovcnt++;
853 	} while (remaining_size > 0 && iovcnt < max_num_sgl);
854 
855 
856 	/* Should be impossible if we did our sgl checks properly up the stack, but do a sanity check here. */
857 	if (remaining_size > 0) {
858 		SPDK_ERRLOG("Failed to construct tcp_req=%p, and the iovcnt=%u, remaining_size=%u\n",
859 			    tcp_req, iovcnt, remaining_size);
860 		return -1;
861 	}
862 
863 	tcp_req->iovcnt = iovcnt;
864 
865 	return 0;
866 }
867 
868 static int
869 nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
870 		  struct nvme_tcp_req *tcp_req)
871 {
872 	struct spdk_nvme_ctrlr *ctrlr = tqpair->qpair.ctrlr;
873 	int rc = 0;
874 	enum spdk_nvme_data_transfer xfer;
875 	uint32_t max_in_capsule_data_size;
876 
877 	tcp_req->req = req;
878 	tcp_req->ordering.bits.domain_in_use = (req->payload.opts && req->payload.opts->memory_domain);
879 
880 	req->cmd.cid = tcp_req->cid;
881 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
882 	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
883 	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
884 	req->cmd.dptr.sgl1.unkeyed.length = req->payload_size;
885 
886 	if (spdk_unlikely(req->cmd.opc == SPDK_NVME_OPC_FABRIC)) {
887 		struct spdk_nvmf_capsule_cmd *nvmf_cmd = (struct spdk_nvmf_capsule_cmd *)&req->cmd;
888 
889 		xfer = spdk_nvme_opc_get_data_transfer(nvmf_cmd->fctype);
890 	} else {
891 		xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
892 	}
893 
894 	/* For c2h delay filling in the iov until the data arrives.
895 	 * For h2c some delay is also possible if data doesn't fit into cmd capsule (not implemented). */
896 	if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
897 		if (xfer != SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
898 			rc = nvme_tcp_build_contig_request(tqpair, tcp_req);
899 		}
900 	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
901 		if (xfer != SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
902 			rc = nvme_tcp_build_sgl_request(tqpair, tcp_req);
903 		}
904 	} else {
905 		rc = -1;
906 	}
907 
908 	if (rc) {
909 		return rc;
910 	}
911 
912 	if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
913 		max_in_capsule_data_size = ctrlr->ioccsz_bytes;
914 		if (spdk_unlikely((req->cmd.opc == SPDK_NVME_OPC_FABRIC) ||
915 				  nvme_qpair_is_admin_queue(&tqpair->qpair))) {
916 			max_in_capsule_data_size = SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE;
917 		}
918 
919 		if (req->payload_size <= max_in_capsule_data_size) {
920 			req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
921 			req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
922 			req->cmd.dptr.sgl1.address = 0;
923 			tcp_req->in_capsule_data = true;
924 		}
925 	}
926 
927 	return 0;
928 }
929 
930 static inline bool
931 nvme_tcp_req_complete_safe(struct nvme_tcp_req *tcp_req)
932 {
933 	if (!(tcp_req->ordering.bits.send_ack && tcp_req->ordering.bits.data_recv &&
934 	      !tcp_req->ordering.bits.in_progress_accel)) {
935 		return false;
936 	}
937 
938 	assert(tcp_req->state == NVME_TCP_REQ_ACTIVE);
939 	assert(tcp_req->tqpair != NULL);
940 	assert(tcp_req->req != NULL);
941 
942 	nvme_tcp_req_complete(tcp_req, tcp_req->tqpair, &tcp_req->rsp, true);
943 	return true;
944 }
945 
946 static void
947 nvme_tcp_qpair_cmd_send_complete(void *cb_arg)
948 {
949 	struct nvme_tcp_req *tcp_req = cb_arg;
950 
951 	SPDK_DEBUGLOG(nvme, "tcp req %p, cid %u, qid %u\n", tcp_req, tcp_req->cid,
952 		      tcp_req->tqpair->qpair.id);
953 	tcp_req->ordering.bits.send_ack = 1;
954 	/* Handle the r2t case */
955 	if (spdk_unlikely(tcp_req->ordering.bits.h2c_send_waiting_ack)) {
956 		SPDK_DEBUGLOG(nvme, "tcp req %p, send H2C data\n", tcp_req);
957 		nvme_tcp_send_h2c_data(tcp_req);
958 	} else {
959 		if (tcp_req->in_capsule_data && tcp_req->ordering.bits.domain_in_use) {
960 			spdk_memory_domain_invalidate_data(tcp_req->req->payload.opts->memory_domain,
961 							   tcp_req->req->payload.opts->memory_domain_ctx, tcp_req->iov, tcp_req->iovcnt);
962 		}
963 
964 		nvme_tcp_req_complete_safe(tcp_req);
965 	}
966 }
967 
968 static int
969 nvme_tcp_qpair_capsule_cmd_send(struct nvme_tcp_qpair *tqpair,
970 				struct nvme_tcp_req *tcp_req)
971 {
972 	struct nvme_tcp_pdu *pdu;
973 	struct spdk_nvme_tcp_cmd *capsule_cmd;
974 	uint32_t plen = 0, alignment;
975 	uint8_t pdo;
976 
977 	SPDK_DEBUGLOG(nvme, "enter\n");
978 	pdu = tcp_req->pdu;
979 	pdu->req = tcp_req;
980 
981 	capsule_cmd = &pdu->hdr.capsule_cmd;
982 	capsule_cmd->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
983 	plen = capsule_cmd->common.hlen = sizeof(*capsule_cmd);
984 	capsule_cmd->ccsqe = tcp_req->req->cmd;
985 
986 	SPDK_DEBUGLOG(nvme, "capsule_cmd cid=%u on tqpair(%p)\n", tcp_req->req->cmd.cid, tqpair);
987 
988 	if (tqpair->flags.host_hdgst_enable) {
989 		SPDK_DEBUGLOG(nvme, "Header digest is enabled for capsule command on tcp_req=%p\n",
990 			      tcp_req);
991 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
992 		plen += SPDK_NVME_TCP_DIGEST_LEN;
993 	}
994 
995 	if ((tcp_req->req->payload_size == 0) || !tcp_req->in_capsule_data) {
996 		goto end;
997 	}
998 
999 	pdo = plen;
1000 	pdu->padding_len = 0;
1001 	if (tqpair->cpda) {
1002 		alignment = (tqpair->cpda + 1) << 2;
1003 		if (alignment > plen) {
1004 			pdu->padding_len = alignment - plen;
1005 			pdo = alignment;
1006 			plen = alignment;
1007 		}
1008 	}
1009 
1010 	capsule_cmd->common.pdo = pdo;
1011 	plen += tcp_req->req->payload_size;
1012 	if (tqpair->flags.host_ddgst_enable) {
1013 		capsule_cmd->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
1014 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1015 	}
1016 
1017 	tcp_req->datao = 0;
1018 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req->iov, tcp_req->iovcnt,
1019 				  0, tcp_req->req->payload_size);
1020 end:
1021 	capsule_cmd->common.plen = plen;
1022 	return nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_qpair_cmd_send_complete, tcp_req);
1023 
1024 }
1025 
1026 static int
1027 nvme_tcp_qpair_submit_request(struct spdk_nvme_qpair *qpair,
1028 			      struct nvme_request *req)
1029 {
1030 	struct nvme_tcp_qpair *tqpair;
1031 	struct nvme_tcp_req *tcp_req;
1032 
1033 	tqpair = nvme_tcp_qpair(qpair);
1034 	assert(tqpair != NULL);
1035 	assert(req != NULL);
1036 
1037 	tcp_req = nvme_tcp_req_get(tqpair);
1038 	if (!tcp_req) {
1039 		tqpair->stats->queued_requests++;
1040 		/* Inform the upper layer to try again later. */
1041 		return -EAGAIN;
1042 	}
1043 
1044 	if (spdk_unlikely(nvme_tcp_req_init(tqpair, req, tcp_req))) {
1045 		SPDK_ERRLOG("nvme_tcp_req_init() failed\n");
1046 		nvme_tcp_req_put(tqpair, tcp_req);
1047 		return -1;
1048 	}
1049 
1050 	tqpair->qpair.queue_depth++;
1051 	spdk_trace_record(TRACE_NVME_TCP_SUBMIT, qpair->id, 0, (uintptr_t)tcp_req->pdu, req->cb_arg,
1052 			  (uint32_t)req->cmd.cid, (uint32_t)req->cmd.opc,
1053 			  req->cmd.cdw10, req->cmd.cdw11, req->cmd.cdw12, tqpair->qpair.queue_depth);
1054 	TAILQ_INSERT_TAIL(&tqpair->outstanding_reqs, tcp_req, link);
1055 	return nvme_tcp_qpair_capsule_cmd_send(tqpair, tcp_req);
1056 }
1057 
1058 static int
1059 nvme_tcp_qpair_reset(struct spdk_nvme_qpair *qpair)
1060 {
1061 	return 0;
1062 }
1063 
1064 static void
1065 nvme_tcp_req_complete(struct nvme_tcp_req *tcp_req,
1066 		      struct nvme_tcp_qpair *tqpair,
1067 		      struct spdk_nvme_cpl *rsp,
1068 		      bool print_on_error)
1069 {
1070 	struct spdk_nvme_cpl	cpl;
1071 	struct spdk_nvme_qpair	*qpair;
1072 	struct nvme_request	*req;
1073 	bool			print_error;
1074 
1075 	assert(tcp_req->req != NULL);
1076 	req = tcp_req->req;
1077 	qpair = req->qpair;
1078 
1079 	SPDK_DEBUGLOG(nvme, "complete tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);
1080 
1081 	if (!tcp_req->tqpair->qpair.in_completion_context) {
1082 		tcp_req->tqpair->async_complete++;
1083 	}
1084 
1085 	/* Cache arguments to be passed to nvme_complete_request since tcp_req can be zeroed when released */
1086 	memcpy(&cpl, rsp, sizeof(cpl));
1087 
1088 	if (spdk_unlikely(spdk_nvme_cpl_is_error(rsp))) {
1089 		print_error = print_on_error && !qpair->ctrlr->opts.disable_error_logging;
1090 
1091 		if (print_error) {
1092 			spdk_nvme_qpair_print_command(qpair, &req->cmd);
1093 		}
1094 
1095 		if (print_error || SPDK_DEBUGLOG_FLAG_ENABLED("nvme")) {
1096 			spdk_nvme_qpair_print_completion(qpair, rsp);
1097 		}
1098 	}
1099 
1100 	tqpair->qpair.queue_depth--;
1101 	spdk_trace_record(TRACE_NVME_TCP_COMPLETE, qpair->id, 0, (uintptr_t)tcp_req->pdu, req->cb_arg,
1102 			  (uint32_t)req->cmd.cid, (uint32_t)cpl.status_raw, tqpair->qpair.queue_depth);
1103 	TAILQ_REMOVE(&tcp_req->tqpair->outstanding_reqs, tcp_req, link);
1104 	nvme_tcp_req_put(tqpair, tcp_req);
1105 	nvme_complete_request(req->cb_fn, req->cb_arg, req->qpair, req, &cpl);
1106 }
1107 
1108 static void
1109 nvme_tcp_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
1110 {
1111 	struct nvme_tcp_req *tcp_req, *tmp;
1112 	struct spdk_nvme_cpl cpl = {};
1113 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
1114 
1115 	cpl.sqid = qpair->id;
1116 	cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
1117 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
1118 	cpl.status.dnr = dnr;
1119 
1120 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
1121 		/* We cannot abort requests with accel operations in progress */
1122 		if (tcp_req->ordering.bits.in_progress_accel) {
1123 			continue;
1124 		}
1125 
1126 		nvme_tcp_req_complete(tcp_req, tqpair, &cpl, true);
1127 	}
1128 }
1129 
1130 static void
1131 nvme_tcp_qpair_send_h2c_term_req_complete(void *cb_arg)
1132 {
1133 	struct nvme_tcp_qpair *tqpair = cb_arg;
1134 
1135 	tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
1136 }
1137 
1138 static void
1139 nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1140 				 enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset)
1141 {
1142 	struct nvme_tcp_pdu *rsp_pdu;
1143 	struct spdk_nvme_tcp_term_req_hdr *h2c_term_req;
1144 	uint32_t h2c_term_req_hdr_len = sizeof(*h2c_term_req);
1145 	uint8_t copy_len;
1146 
1147 	rsp_pdu = tqpair->send_pdu;
1148 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
1149 	h2c_term_req = &rsp_pdu->hdr.term_req;
1150 	h2c_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
1151 	h2c_term_req->common.hlen = h2c_term_req_hdr_len;
1152 
1153 	if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
1154 	    (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
1155 		DSET32(&h2c_term_req->fei, error_offset);
1156 	}
1157 
1158 	copy_len = pdu->hdr.common.hlen;
1159 	if (copy_len > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE) {
1160 		copy_len = SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE;
1161 	}
1162 
1163 	/* Copy the error info into the buffer */
1164 	memcpy((uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len, pdu->hdr.raw, copy_len);
1165 	nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len, copy_len);
1166 
1167 	/* Contain the header len of the wrong received pdu */
1168 	h2c_term_req->common.plen = h2c_term_req->common.hlen + copy_len;
1169 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1170 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_send_h2c_term_req_complete, tqpair);
1171 }
1172 
1173 static bool
1174 nvme_tcp_qpair_recv_state_valid(struct nvme_tcp_qpair *tqpair)
1175 {
1176 	switch (tqpair->state) {
1177 	case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND:
1178 	case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL:
1179 	case NVME_TCP_QPAIR_STATE_AUTHENTICATING:
1180 	case NVME_TCP_QPAIR_STATE_RUNNING:
1181 		return true;
1182 	default:
1183 		return false;
1184 	}
1185 }
1186 
1187 static void
1188 nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
1189 {
1190 	struct nvme_tcp_pdu *pdu;
1191 	uint32_t error_offset = 0;
1192 	enum spdk_nvme_tcp_term_req_fes fes;
1193 	uint32_t expected_hlen, hd_len = 0;
1194 	bool plen_error = false;
1195 
1196 	pdu = tqpair->recv_pdu;
1197 
1198 	SPDK_DEBUGLOG(nvme, "pdu type = %d\n", pdu->hdr.common.pdu_type);
1199 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP) {
1200 		if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
1201 			SPDK_ERRLOG("Already received IC_RESP PDU, and we should reject this pdu=%p\n", pdu);
1202 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
1203 			goto err;
1204 		}
1205 		expected_hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1206 		if (pdu->hdr.common.plen != expected_hlen) {
1207 			plen_error = true;
1208 		}
1209 	} else {
1210 		if (spdk_unlikely(!nvme_tcp_qpair_recv_state_valid(tqpair))) {
1211 			SPDK_ERRLOG("The TCP/IP tqpair connection is not negotiated\n");
1212 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
1213 			goto err;
1214 		}
1215 
1216 		switch (pdu->hdr.common.pdu_type) {
1217 		case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
1218 			expected_hlen = sizeof(struct spdk_nvme_tcp_rsp);
1219 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
1220 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
1221 			}
1222 
1223 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
1224 				plen_error = true;
1225 			}
1226 			break;
1227 		case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1228 			expected_hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
1229 			if (pdu->hdr.common.plen < pdu->hdr.common.pdo) {
1230 				plen_error = true;
1231 			}
1232 			break;
1233 		case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1234 			expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
1235 			if ((pdu->hdr.common.plen <= expected_hlen) ||
1236 			    (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
1237 				plen_error = true;
1238 			}
1239 			break;
1240 		case SPDK_NVME_TCP_PDU_TYPE_R2T:
1241 			expected_hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
1242 			if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
1243 				hd_len = SPDK_NVME_TCP_DIGEST_LEN;
1244 			}
1245 
1246 			if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
1247 				plen_error = true;
1248 			}
1249 			break;
1250 
1251 		default:
1252 			SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu->hdr.common.pdu_type);
1253 			fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1254 			error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
1255 			goto err;
1256 		}
1257 	}
1258 
1259 	if (pdu->hdr.common.hlen != expected_hlen) {
1260 		SPDK_ERRLOG("Expected PDU header length %u, got %u\n",
1261 			    expected_hlen, pdu->hdr.common.hlen);
1262 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1263 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen);
1264 		goto err;
1265 
1266 	} else if (plen_error) {
1267 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1268 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen);
1269 		goto err;
1270 	} else {
1271 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1272 		nvme_tcp_pdu_calc_psh_len(tqpair->recv_pdu, tqpair->flags.host_hdgst_enable);
1273 		return;
1274 	}
1275 err:
1276 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1277 }
1278 
1279 static struct nvme_tcp_req *
1280 get_nvme_active_req_by_cid(struct nvme_tcp_qpair *tqpair, uint32_t cid)
1281 {
1282 	assert(tqpair != NULL);
1283 	if ((cid >= tqpair->num_entries) || (tqpair->tcp_reqs[cid].state == NVME_TCP_REQ_FREE)) {
1284 		return NULL;
1285 	}
1286 
1287 	return &tqpair->tcp_reqs[cid];
1288 }
1289 
1290 static void
1291 nvme_tcp_recv_payload_seq_cb(void *cb_arg, int status)
1292 {
1293 	struct nvme_tcp_req *treq = cb_arg;
1294 	struct nvme_request *req = treq->req;
1295 	struct nvme_tcp_qpair *tqpair = treq->tqpair;
1296 
1297 	assert(treq->ordering.bits.in_progress_accel);
1298 	treq->ordering.bits.in_progress_accel = 0;
1299 
1300 	nvme_tcp_cond_schedule_qpair_polling(tqpair);
1301 
1302 	req->accel_sequence = NULL;
1303 	if (spdk_unlikely(status != 0)) {
1304 		pdu_seq_fail(treq->pdu, status);
1305 		return;
1306 	}
1307 
1308 	nvme_tcp_req_complete_safe(treq);
1309 }
1310 
1311 static void
1312 nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
1313 				 struct nvme_tcp_pdu *pdu, uint32_t *reaped)
1314 {
1315 	struct nvme_tcp_req *tcp_req;
1316 	struct nvme_tcp_poll_group *tgroup;
1317 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
1318 	uint8_t flags;
1319 
1320 	tcp_req = pdu->req;
1321 	assert(tcp_req != NULL);
1322 
1323 	SPDK_DEBUGLOG(nvme, "enter\n");
1324 	c2h_data = &pdu->hdr.c2h_data;
1325 	tcp_req->datao += pdu->data_len;
1326 	flags = c2h_data->common.flags;
1327 
1328 	if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU) {
1329 		if (tcp_req->datao == tcp_req->req->payload_size) {
1330 			tcp_req->rsp.status.p = 0;
1331 		} else {
1332 			tcp_req->rsp.status.p = 1;
1333 		}
1334 
1335 		tcp_req->rsp.cid = tcp_req->cid;
1336 		tcp_req->rsp.sqid = tqpair->qpair.id;
1337 		if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) {
1338 			tcp_req->ordering.bits.data_recv = 1;
1339 			if (tcp_req->req->accel_sequence != NULL) {
1340 				tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
1341 				nvme_tcp_accel_reverse_sequence(tgroup, tcp_req->req->accel_sequence);
1342 				nvme_tcp_accel_finish_sequence(tgroup, tcp_req,
1343 							       tcp_req->req->accel_sequence,
1344 							       nvme_tcp_recv_payload_seq_cb,
1345 							       tcp_req);
1346 				return;
1347 			}
1348 
1349 			if (nvme_tcp_req_complete_safe(tcp_req)) {
1350 				(*reaped)++;
1351 			}
1352 		}
1353 	}
1354 }
1355 
1356 static const char *spdk_nvme_tcp_term_req_fes_str[] = {
1357 	"Invalid PDU Header Field",
1358 	"PDU Sequence Error",
1359 	"Header Digest Error",
1360 	"Data Transfer Out of Range",
1361 	"Data Transfer Limit Exceeded",
1362 	"Unsupported parameter",
1363 };
1364 
1365 static void
1366 nvme_tcp_c2h_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *c2h_term_req)
1367 {
1368 	SPDK_ERRLOG("Error info of pdu(%p): %s\n", c2h_term_req,
1369 		    spdk_nvme_tcp_term_req_fes_str[c2h_term_req->fes]);
1370 	if ((c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
1371 	    (c2h_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
1372 		SPDK_DEBUGLOG(nvme, "The offset from the start of the PDU header is %u\n",
1373 			      DGET32(c2h_term_req->fei));
1374 	}
1375 	/* we may also need to dump some other info here */
1376 }
1377 
1378 static void
1379 nvme_tcp_c2h_term_req_payload_handle(struct nvme_tcp_qpair *tqpair,
1380 				     struct nvme_tcp_pdu *pdu)
1381 {
1382 	nvme_tcp_c2h_term_req_dump(&pdu->hdr.term_req);
1383 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1384 }
1385 
1386 static void
1387 _nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1388 {
1389 	struct nvme_tcp_pdu *pdu;
1390 
1391 	assert(tqpair != NULL);
1392 	pdu = tqpair->recv_pdu;
1393 
1394 	switch (pdu->hdr.common.pdu_type) {
1395 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
1396 		nvme_tcp_c2h_data_payload_handle(tqpair, pdu, reaped);
1397 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1398 		break;
1399 
1400 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
1401 		nvme_tcp_c2h_term_req_payload_handle(tqpair, pdu);
1402 		break;
1403 
1404 	default:
1405 		/* The code should not go to here */
1406 		SPDK_ERRLOG("The code should not go to here\n");
1407 		break;
1408 	}
1409 }
1410 
1411 static void
1412 nvme_tcp_accel_recv_compute_crc32_done(void *cb_arg, int status)
1413 {
1414 	struct nvme_tcp_req *tcp_req = cb_arg;
1415 	struct nvme_tcp_pdu *pdu;
1416 	struct nvme_tcp_qpair *tqpair;
1417 	int rc;
1418 	int dummy_reaped = 0;
1419 
1420 	pdu = tcp_req->pdu;
1421 	assert(pdu != NULL);
1422 
1423 	tqpair = tcp_req->tqpair;
1424 	assert(tqpair != NULL);
1425 
1426 	assert(tcp_req->ordering.bits.in_progress_accel);
1427 	tcp_req->ordering.bits.in_progress_accel = 0;
1428 
1429 	nvme_tcp_cond_schedule_qpair_polling(tqpair);
1430 
1431 	if (spdk_unlikely(status)) {
1432 		SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu);
1433 		tcp_req->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
1434 		goto end;
1435 	}
1436 
1437 	pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
1438 	rc = MATCH_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
1439 	if (rc == 0) {
1440 		SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1441 		tcp_req->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
1442 	}
1443 
1444 end:
1445 	nvme_tcp_c2h_data_payload_handle(tqpair, tcp_req->pdu, &dummy_reaped);
1446 }
1447 
1448 static void
1449 nvme_tcp_req_copy_pdu(struct nvme_tcp_req *treq, struct nvme_tcp_pdu *pdu)
1450 {
1451 	treq->pdu->hdr = pdu->hdr;
1452 	treq->pdu->req = treq;
1453 	memcpy(treq->pdu->data_digest, pdu->data_digest, sizeof(pdu->data_digest));
1454 	memcpy(treq->pdu->data_iov, pdu->data_iov, sizeof(pdu->data_iov[0]) * pdu->data_iovcnt);
1455 	treq->pdu->data_iovcnt = pdu->data_iovcnt;
1456 	treq->pdu->data_len = pdu->data_len;
1457 }
1458 
1459 static void
1460 nvme_tcp_accel_seq_recv_compute_crc32_done(void *cb_arg)
1461 {
1462 	struct nvme_tcp_req *treq = cb_arg;
1463 	struct nvme_tcp_qpair *tqpair = treq->tqpair;
1464 	struct nvme_tcp_pdu *pdu = treq->pdu;
1465 	bool result;
1466 
1467 	pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
1468 	result = MATCH_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
1469 	if (spdk_unlikely(!result)) {
1470 		SPDK_ERRLOG("data digest error on tqpair=(%p)\n", tqpair);
1471 		treq->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
1472 	}
1473 }
1474 
1475 static bool
1476 nvme_tcp_accel_recv_compute_crc32(struct nvme_tcp_req *treq, struct nvme_tcp_pdu *pdu)
1477 {
1478 	struct nvme_tcp_qpair *tqpair = treq->tqpair;
1479 	struct nvme_tcp_poll_group *tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
1480 	struct nvme_request *req = treq->req;
1481 	int rc, dummy = 0;
1482 
1483 	/* Only support this limited case that the request has only one c2h pdu */
1484 	if (spdk_unlikely(nvme_qpair_get_state(&tqpair->qpair) < NVME_QPAIR_CONNECTED ||
1485 			  tqpair->qpair.poll_group == NULL || pdu->dif_ctx != NULL ||
1486 			  pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT != 0 ||
1487 			  pdu->data_len != req->payload_size)) {
1488 		return false;
1489 	}
1490 
1491 	if (tgroup->group.group->accel_fn_table.append_crc32c != NULL) {
1492 		nvme_tcp_req_copy_pdu(treq, pdu);
1493 		rc = nvme_tcp_accel_append_crc32c(tgroup, &req->accel_sequence,
1494 						  &treq->pdu->data_digest_crc32,
1495 						  treq->pdu->data_iov, treq->pdu->data_iovcnt, 0,
1496 						  nvme_tcp_accel_seq_recv_compute_crc32_done, treq);
1497 		if (spdk_unlikely(rc != 0)) {
1498 			/* If accel is out of resources, fall back to non-accelerated crc32 */
1499 			if (rc == -ENOMEM) {
1500 				return false;
1501 			}
1502 
1503 			SPDK_ERRLOG("Failed to append crc32c operation: %d\n", rc);
1504 			treq->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
1505 		}
1506 
1507 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1508 		nvme_tcp_c2h_data_payload_handle(tqpair, treq->pdu, &dummy);
1509 		return true;
1510 	} else if (tgroup->group.group->accel_fn_table.submit_accel_crc32c != NULL) {
1511 		nvme_tcp_req_copy_pdu(treq, pdu);
1512 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1513 		nvme_tcp_accel_submit_crc32c(tgroup, treq, &treq->pdu->data_digest_crc32,
1514 					     treq->pdu->data_iov, treq->pdu->data_iovcnt, 0,
1515 					     nvme_tcp_accel_recv_compute_crc32_done, treq);
1516 		return true;
1517 	}
1518 
1519 	return false;
1520 }
1521 
1522 static void
1523 nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
1524 			    uint32_t *reaped)
1525 {
1526 	int rc = 0;
1527 	struct nvme_tcp_pdu *pdu = tqpair->recv_pdu;
1528 	uint32_t crc32c;
1529 	struct nvme_tcp_req *tcp_req = pdu->req;
1530 
1531 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1532 	SPDK_DEBUGLOG(nvme, "enter\n");
1533 
1534 	/* The request can be NULL, e.g. in case of C2HTermReq */
1535 	if (spdk_likely(tcp_req != NULL)) {
1536 		tcp_req->expected_datao += pdu->data_len;
1537 	}
1538 
1539 	/* check data digest if need */
1540 	if (pdu->ddgst_enable) {
1541 		/* But if the data digest is enabled, tcp_req cannot be NULL */
1542 		assert(tcp_req != NULL);
1543 		if (nvme_tcp_accel_recv_compute_crc32(tcp_req, pdu)) {
1544 			return;
1545 		}
1546 
1547 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
1548 		crc32c = crc32c ^ SPDK_CRC32C_XOR;
1549 		rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
1550 		if (rc == 0) {
1551 			SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1552 			tcp_req = pdu->req;
1553 			assert(tcp_req != NULL);
1554 			tcp_req->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
1555 		}
1556 	}
1557 
1558 	_nvme_tcp_pdu_payload_handle(tqpair, reaped);
1559 }
1560 
1561 static void
1562 nvme_tcp_send_icreq_complete(void *cb_arg)
1563 {
1564 	struct nvme_tcp_qpair *tqpair = cb_arg;
1565 
1566 	SPDK_DEBUGLOG(nvme, "Complete the icreq send for tqpair=%p %u\n", tqpair, tqpair->qpair.id);
1567 
1568 	tqpair->flags.icreq_send_ack = true;
1569 
1570 	if (tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING) {
1571 		SPDK_DEBUGLOG(nvme, "tqpair %p %u, finalize icresp\n", tqpair, tqpair->qpair.id);
1572 		tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND;
1573 	}
1574 }
1575 
1576 static void
1577 nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair,
1578 		       struct nvme_tcp_pdu *pdu)
1579 {
1580 	struct spdk_nvme_tcp_ic_resp *ic_resp = &pdu->hdr.ic_resp;
1581 	uint32_t error_offset = 0;
1582 	enum spdk_nvme_tcp_term_req_fes fes;
1583 	int recv_buf_size;
1584 
1585 	/* Only PFV 0 is defined currently */
1586 	if (ic_resp->pfv != 0) {
1587 		SPDK_ERRLOG("Expected ICResp PFV %u, got %u\n", 0u, ic_resp->pfv);
1588 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1589 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, pfv);
1590 		goto end;
1591 	}
1592 
1593 	if (ic_resp->maxh2cdata < NVME_TCP_PDU_H2C_MIN_DATA_SIZE) {
1594 		SPDK_ERRLOG("Expected ICResp maxh2cdata >=%u, got %u\n", NVME_TCP_PDU_H2C_MIN_DATA_SIZE,
1595 			    ic_resp->maxh2cdata);
1596 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1597 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, maxh2cdata);
1598 		goto end;
1599 	}
1600 	tqpair->maxh2cdata = ic_resp->maxh2cdata;
1601 
1602 	if (ic_resp->cpda > SPDK_NVME_TCP_CPDA_MAX) {
1603 		SPDK_ERRLOG("Expected ICResp cpda <=%u, got %u\n", SPDK_NVME_TCP_CPDA_MAX, ic_resp->cpda);
1604 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1605 		error_offset = offsetof(struct spdk_nvme_tcp_ic_resp, cpda);
1606 		goto end;
1607 	}
1608 	tqpair->cpda = ic_resp->cpda;
1609 
1610 	tqpair->flags.host_hdgst_enable = ic_resp->dgst.bits.hdgst_enable ? true : false;
1611 	tqpair->flags.host_ddgst_enable = ic_resp->dgst.bits.ddgst_enable ? true : false;
1612 	SPDK_DEBUGLOG(nvme, "host_hdgst_enable: %u\n", tqpair->flags.host_hdgst_enable);
1613 	SPDK_DEBUGLOG(nvme, "host_ddgst_enable: %u\n", tqpair->flags.host_ddgst_enable);
1614 
1615 	/* Now that we know whether digests are enabled, properly size the receive buffer to
1616 	 * handle several incoming 4K read commands according to SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR
1617 	 * parameter. */
1618 	recv_buf_size = 0x1000 + sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
1619 
1620 	if (tqpair->flags.host_hdgst_enable) {
1621 		recv_buf_size += SPDK_NVME_TCP_DIGEST_LEN;
1622 	}
1623 
1624 	if (tqpair->flags.host_ddgst_enable) {
1625 		recv_buf_size += SPDK_NVME_TCP_DIGEST_LEN;
1626 	}
1627 
1628 	if (spdk_sock_set_recvbuf(tqpair->sock, recv_buf_size * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR) < 0) {
1629 		SPDK_WARNLOG("Unable to allocate enough memory for receive buffer on tqpair=%p with size=%d\n",
1630 			     tqpair,
1631 			     recv_buf_size);
1632 		/* Not fatal. */
1633 	}
1634 
1635 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1636 
1637 	if (!tqpair->flags.icreq_send_ack) {
1638 		tqpair->state = NVME_TCP_QPAIR_STATE_INITIALIZING;
1639 		SPDK_DEBUGLOG(nvme, "tqpair %p %u, waiting icreq ack\n", tqpair, tqpair->qpair.id);
1640 		return;
1641 	}
1642 
1643 	tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND;
1644 	return;
1645 end:
1646 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1647 }
1648 
1649 static void
1650 nvme_tcp_capsule_resp_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1651 				 uint32_t *reaped)
1652 {
1653 	struct nvme_tcp_req *tcp_req;
1654 	struct nvme_tcp_poll_group *tgroup;
1655 	struct spdk_nvme_tcp_rsp *capsule_resp = &pdu->hdr.capsule_resp;
1656 	uint32_t cid, error_offset = 0;
1657 	enum spdk_nvme_tcp_term_req_fes fes;
1658 
1659 	SPDK_DEBUGLOG(nvme, "enter\n");
1660 	cid = capsule_resp->rccqe.cid;
1661 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1662 
1663 	if (!tcp_req) {
1664 		SPDK_ERRLOG("no tcp_req is found with cid=%u for tqpair=%p\n", cid, tqpair);
1665 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1666 		error_offset = offsetof(struct spdk_nvme_tcp_rsp, rccqe);
1667 		goto end;
1668 	}
1669 
1670 	assert(tcp_req->req != NULL);
1671 
1672 	tcp_req->rsp = capsule_resp->rccqe;
1673 	tcp_req->ordering.bits.data_recv = 1;
1674 
1675 	/* Recv the pdu again */
1676 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1677 
1678 	if (tcp_req->req->accel_sequence != NULL) {
1679 		tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
1680 		nvme_tcp_accel_reverse_sequence(tgroup, tcp_req->req->accel_sequence);
1681 		nvme_tcp_accel_finish_sequence(tgroup, tcp_req, tcp_req->req->accel_sequence,
1682 					       nvme_tcp_recv_payload_seq_cb, tcp_req);
1683 		return;
1684 	}
1685 
1686 	if (nvme_tcp_req_complete_safe(tcp_req)) {
1687 		(*reaped)++;
1688 	}
1689 
1690 	return;
1691 
1692 end:
1693 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1694 }
1695 
1696 static void
1697 nvme_tcp_c2h_term_req_hdr_handle(struct nvme_tcp_qpair *tqpair,
1698 				 struct nvme_tcp_pdu *pdu)
1699 {
1700 	struct spdk_nvme_tcp_term_req_hdr *c2h_term_req = &pdu->hdr.term_req;
1701 	uint32_t error_offset = 0;
1702 	enum spdk_nvme_tcp_term_req_fes fes;
1703 
1704 	if (c2h_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) {
1705 		SPDK_ERRLOG("Fatal Error Status(FES) is unknown for c2h_term_req pdu=%p\n", pdu);
1706 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1707 		error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes);
1708 		goto end;
1709 	}
1710 
1711 	/* set the data buffer */
1712 	nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + c2h_term_req->common.hlen,
1713 			      c2h_term_req->common.plen - c2h_term_req->common.hlen);
1714 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1715 	return;
1716 end:
1717 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1718 }
1719 
1720 static void
1721 nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1722 {
1723 	struct nvme_tcp_req *tcp_req;
1724 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data = &pdu->hdr.c2h_data;
1725 	uint32_t error_offset = 0;
1726 	enum spdk_nvme_tcp_term_req_fes fes;
1727 	int flags = c2h_data->common.flags;
1728 	int rc;
1729 
1730 	SPDK_DEBUGLOG(nvme, "enter\n");
1731 	SPDK_DEBUGLOG(nvme, "c2h_data info on tqpair(%p): datao=%u, datal=%u, cccid=%d\n",
1732 		      tqpair, c2h_data->datao, c2h_data->datal, c2h_data->cccid);
1733 	tcp_req = get_nvme_active_req_by_cid(tqpair, c2h_data->cccid);
1734 	if (!tcp_req) {
1735 		SPDK_ERRLOG("no tcp_req found for c2hdata cid=%d\n", c2h_data->cccid);
1736 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1737 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, cccid);
1738 		goto end;
1739 
1740 	}
1741 
1742 	SPDK_DEBUGLOG(nvme, "tcp_req(%p) on tqpair(%p): expected_datao=%u, payload_size=%u\n",
1743 		      tcp_req, tqpair, tcp_req->expected_datao, tcp_req->req->payload_size);
1744 
1745 	if (spdk_unlikely((flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) &&
1746 			  !(flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU))) {
1747 		SPDK_ERRLOG("Invalid flag flags=%d in c2h_data=%p\n", flags, c2h_data);
1748 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1749 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, common);
1750 		goto end;
1751 	}
1752 
1753 	if (c2h_data->datal > tcp_req->req->payload_size) {
1754 		SPDK_ERRLOG("Invalid datal for tcp_req(%p), datal(%u) exceeds payload_size(%u)\n",
1755 			    tcp_req, c2h_data->datal, tcp_req->req->payload_size);
1756 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1757 		goto end;
1758 	}
1759 
1760 	if (tcp_req->expected_datao != c2h_data->datao) {
1761 		SPDK_ERRLOG("Invalid datao for tcp_req(%p), received datal(%u) != expected datao(%u) in tcp_req\n",
1762 			    tcp_req, c2h_data->datao, tcp_req->expected_datao);
1763 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1764 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datao);
1765 		goto end;
1766 	}
1767 
1768 	if ((c2h_data->datao + c2h_data->datal) > tcp_req->req->payload_size) {
1769 		SPDK_ERRLOG("Invalid data range for tcp_req(%p), received (datao(%u) + datal(%u)) > datao(%u) in tcp_req\n",
1770 			    tcp_req, c2h_data->datao, c2h_data->datal, tcp_req->req->payload_size);
1771 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1772 		error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, datal);
1773 		goto end;
1774 
1775 	}
1776 
1777 	if (nvme_payload_type(&tcp_req->req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
1778 		rc = nvme_tcp_build_contig_request(tqpair, tcp_req);
1779 	} else {
1780 		assert(nvme_payload_type(&tcp_req->req->payload) == NVME_PAYLOAD_TYPE_SGL);
1781 		rc = nvme_tcp_build_sgl_request(tqpair, tcp_req);
1782 	}
1783 
1784 	if (rc) {
1785 		/* Not the right error message but at least it handles the failure. */
1786 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED;
1787 		goto end;
1788 	}
1789 
1790 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req->iov, tcp_req->iovcnt,
1791 				  c2h_data->datao, c2h_data->datal);
1792 	pdu->req = tcp_req;
1793 
1794 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1795 	return;
1796 
1797 end:
1798 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1799 }
1800 
1801 static void
1802 nvme_tcp_qpair_h2c_data_send_complete(void *cb_arg)
1803 {
1804 	struct nvme_tcp_req *tcp_req = cb_arg;
1805 
1806 	assert(tcp_req != NULL);
1807 
1808 	tcp_req->ordering.bits.send_ack = 1;
1809 	if (tcp_req->r2tl_remain) {
1810 		nvme_tcp_send_h2c_data(tcp_req);
1811 	} else {
1812 		assert(tcp_req->active_r2ts > 0);
1813 		tcp_req->active_r2ts--;
1814 		tcp_req->state = NVME_TCP_REQ_ACTIVE;
1815 
1816 		if (tcp_req->ordering.bits.r2t_waiting_h2c_complete) {
1817 			tcp_req->ordering.bits.r2t_waiting_h2c_complete = 0;
1818 			SPDK_DEBUGLOG(nvme, "tcp_req %p: continue r2t\n", tcp_req);
1819 			assert(tcp_req->active_r2ts > 0);
1820 			tcp_req->ttag = tcp_req->ttag_r2t_next;
1821 			tcp_req->r2tl_remain = tcp_req->r2tl_remain_next;
1822 			tcp_req->state = NVME_TCP_REQ_ACTIVE_R2T;
1823 			nvme_tcp_send_h2c_data(tcp_req);
1824 			return;
1825 		}
1826 
1827 		if (tcp_req->ordering.bits.domain_in_use) {
1828 			spdk_memory_domain_invalidate_data(tcp_req->req->payload.opts->memory_domain,
1829 							   tcp_req->req->payload.opts->memory_domain_ctx, tcp_req->iov, tcp_req->iovcnt);
1830 		}
1831 
1832 		/* Need also call this function to free the resource */
1833 		nvme_tcp_req_complete_safe(tcp_req);
1834 	}
1835 }
1836 
1837 static void
1838 nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req)
1839 {
1840 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(tcp_req->req->qpair);
1841 	struct nvme_tcp_pdu *rsp_pdu;
1842 	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
1843 	uint32_t plen, pdo, alignment;
1844 
1845 	/* Reinit the send_ack and h2c_send_waiting_ack bits */
1846 	tcp_req->ordering.bits.send_ack = 0;
1847 	tcp_req->ordering.bits.h2c_send_waiting_ack = 0;
1848 	rsp_pdu = tcp_req->pdu;
1849 	memset(rsp_pdu, 0, sizeof(*rsp_pdu));
1850 	rsp_pdu->req = tcp_req;
1851 	h2c_data = &rsp_pdu->hdr.h2c_data;
1852 
1853 	h2c_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
1854 	plen = h2c_data->common.hlen = sizeof(*h2c_data);
1855 	h2c_data->cccid = tcp_req->cid;
1856 	h2c_data->ttag = tcp_req->ttag;
1857 	h2c_data->datao = tcp_req->datao;
1858 
1859 	h2c_data->datal = spdk_min(tcp_req->r2tl_remain, tqpair->maxh2cdata);
1860 	nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->iov, tcp_req->iovcnt,
1861 				  h2c_data->datao, h2c_data->datal);
1862 	tcp_req->r2tl_remain -= h2c_data->datal;
1863 
1864 	if (tqpair->flags.host_hdgst_enable) {
1865 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1866 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1867 	}
1868 
1869 	rsp_pdu->padding_len = 0;
1870 	pdo = plen;
1871 	if (tqpair->cpda) {
1872 		alignment = (tqpair->cpda + 1) << 2;
1873 		if (alignment > plen) {
1874 			rsp_pdu->padding_len = alignment - plen;
1875 			pdo = plen = alignment;
1876 		}
1877 	}
1878 
1879 	h2c_data->common.pdo = pdo;
1880 	plen += h2c_data->datal;
1881 	if (tqpair->flags.host_ddgst_enable) {
1882 		h2c_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
1883 		plen += SPDK_NVME_TCP_DIGEST_LEN;
1884 	}
1885 
1886 	h2c_data->common.plen = plen;
1887 	tcp_req->datao += h2c_data->datal;
1888 	if (!tcp_req->r2tl_remain) {
1889 		h2c_data->common.flags |= SPDK_NVME_TCP_H2C_DATA_FLAGS_LAST_PDU;
1890 	}
1891 
1892 	SPDK_DEBUGLOG(nvme, "h2c_data info: datao=%u, datal=%u, pdu_len=%u for tqpair=%p\n",
1893 		      h2c_data->datao, h2c_data->datal, h2c_data->common.plen, tqpair);
1894 
1895 	nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_h2c_data_send_complete, tcp_req);
1896 }
1897 
1898 static void
1899 nvme_tcp_r2t_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1900 {
1901 	struct nvme_tcp_req *tcp_req;
1902 	struct spdk_nvme_tcp_r2t_hdr *r2t = &pdu->hdr.r2t;
1903 	uint32_t cid, error_offset = 0;
1904 	enum spdk_nvme_tcp_term_req_fes fes;
1905 
1906 	SPDK_DEBUGLOG(nvme, "enter\n");
1907 	cid = r2t->cccid;
1908 	tcp_req = get_nvme_active_req_by_cid(tqpair, cid);
1909 	if (!tcp_req) {
1910 		SPDK_ERRLOG("Cannot find tcp_req for tqpair=%p\n", tqpair);
1911 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1912 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, cccid);
1913 		goto end;
1914 	}
1915 
1916 	SPDK_DEBUGLOG(nvme, "r2t info: r2to=%u, r2tl=%u for tqpair=%p\n", r2t->r2to, r2t->r2tl,
1917 		      tqpair);
1918 
1919 	if (tcp_req->state == NVME_TCP_REQ_ACTIVE) {
1920 		assert(tcp_req->active_r2ts == 0);
1921 		tcp_req->state = NVME_TCP_REQ_ACTIVE_R2T;
1922 	}
1923 
1924 	if (tcp_req->datao != r2t->r2to) {
1925 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1926 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2to);
1927 		goto end;
1928 
1929 	}
1930 
1931 	if ((r2t->r2tl + r2t->r2to) > tcp_req->req->payload_size) {
1932 		SPDK_ERRLOG("Invalid R2T info for tcp_req=%p: (r2to(%u) + r2tl(%u)) exceeds payload_size(%u)\n",
1933 			    tcp_req, r2t->r2to, r2t->r2tl, tqpair->maxh2cdata);
1934 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1935 		error_offset = offsetof(struct spdk_nvme_tcp_r2t_hdr, r2tl);
1936 		goto end;
1937 	}
1938 
1939 	tcp_req->active_r2ts++;
1940 	if (spdk_unlikely(tcp_req->active_r2ts > tqpair->maxr2t)) {
1941 		if (tcp_req->state == NVME_TCP_REQ_ACTIVE_R2T && !tcp_req->ordering.bits.send_ack) {
1942 			/* We receive a subsequent R2T while we are waiting for H2C transfer to complete */
1943 			SPDK_DEBUGLOG(nvme, "received a subsequent R2T\n");
1944 			assert(tcp_req->active_r2ts == tqpair->maxr2t + 1);
1945 			tcp_req->ttag_r2t_next = r2t->ttag;
1946 			tcp_req->r2tl_remain_next = r2t->r2tl;
1947 			tcp_req->ordering.bits.r2t_waiting_h2c_complete = 1;
1948 			nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1949 			return;
1950 		} else {
1951 			fes = SPDK_NVME_TCP_TERM_REQ_FES_R2T_LIMIT_EXCEEDED;
1952 			SPDK_ERRLOG("Invalid R2T: Maximum number of R2T exceeded! Max: %u for tqpair=%p\n", tqpair->maxr2t,
1953 				    tqpair);
1954 			goto end;
1955 		}
1956 	}
1957 
1958 	tcp_req->ttag = r2t->ttag;
1959 	tcp_req->r2tl_remain = r2t->r2tl;
1960 	nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1961 
1962 	if (spdk_likely(tcp_req->ordering.bits.send_ack)) {
1963 		nvme_tcp_send_h2c_data(tcp_req);
1964 	} else {
1965 		tcp_req->ordering.bits.h2c_send_waiting_ack = 1;
1966 	}
1967 
1968 	return;
1969 
1970 end:
1971 	nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1972 
1973 }
1974 
1975 static void
1976 nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
1977 {
1978 	struct nvme_tcp_pdu *pdu;
1979 	int rc;
1980 	uint32_t crc32c, error_offset = 0;
1981 	enum spdk_nvme_tcp_term_req_fes fes;
1982 
1983 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1984 	pdu = tqpair->recv_pdu;
1985 
1986 	SPDK_DEBUGLOG(nvme, "enter: pdu type =%u\n", pdu->hdr.common.pdu_type);
1987 	/* check header digest if needed */
1988 	if (pdu->has_hdgst) {
1989 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
1990 		rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
1991 		if (rc == 0) {
1992 			SPDK_ERRLOG("header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1993 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1994 			nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1995 			return;
1996 
1997 		}
1998 	}
1999 
2000 	switch (pdu->hdr.common.pdu_type) {
2001 	case SPDK_NVME_TCP_PDU_TYPE_IC_RESP:
2002 		nvme_tcp_icresp_handle(tqpair, pdu);
2003 		break;
2004 	case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
2005 		nvme_tcp_capsule_resp_hdr_handle(tqpair, pdu, reaped);
2006 		break;
2007 	case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
2008 		nvme_tcp_c2h_data_hdr_handle(tqpair, pdu);
2009 		break;
2010 
2011 	case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
2012 		nvme_tcp_c2h_term_req_hdr_handle(tqpair, pdu);
2013 		break;
2014 	case SPDK_NVME_TCP_PDU_TYPE_R2T:
2015 		nvme_tcp_r2t_hdr_handle(tqpair, pdu);
2016 		break;
2017 
2018 	default:
2019 		SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu->hdr.common.pdu_type);
2020 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
2021 		error_offset = 1;
2022 		nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
2023 		break;
2024 	}
2025 
2026 }
2027 
2028 static int
2029 nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_completions)
2030 {
2031 	int rc = 0;
2032 	struct nvme_tcp_pdu *pdu;
2033 	uint32_t data_len;
2034 	enum nvme_tcp_pdu_recv_state prev_state;
2035 
2036 	*reaped = tqpair->async_complete;
2037 	tqpair->async_complete = 0;
2038 
2039 	/* The loop here is to allow for several back-to-back state changes. */
2040 	do {
2041 		if (*reaped >= max_completions) {
2042 			break;
2043 		}
2044 
2045 		prev_state = tqpair->recv_state;
2046 		pdu = tqpair->recv_pdu;
2047 		switch (tqpair->recv_state) {
2048 		/* If in a new state */
2049 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
2050 			memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
2051 			nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
2052 			break;
2053 		/* Wait for the pdu common header */
2054 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
2055 			assert(pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr));
2056 			rc = nvme_tcp_read_data(tqpair->sock,
2057 						sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
2058 						(uint8_t *)&pdu->hdr.common + pdu->ch_valid_bytes);
2059 			if (rc < 0) {
2060 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
2061 				break;
2062 			}
2063 			pdu->ch_valid_bytes += rc;
2064 			if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
2065 				return NVME_TCP_PDU_IN_PROGRESS;
2066 			}
2067 
2068 			/* The command header of this PDU has now been read from the socket. */
2069 			nvme_tcp_pdu_ch_handle(tqpair);
2070 			break;
2071 		/* Wait for the pdu specific header  */
2072 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
2073 			assert(pdu->psh_valid_bytes < pdu->psh_len);
2074 			rc = nvme_tcp_read_data(tqpair->sock,
2075 						pdu->psh_len - pdu->psh_valid_bytes,
2076 						(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
2077 			if (rc < 0) {
2078 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
2079 				break;
2080 			}
2081 
2082 			pdu->psh_valid_bytes += rc;
2083 			if (pdu->psh_valid_bytes < pdu->psh_len) {
2084 				return NVME_TCP_PDU_IN_PROGRESS;
2085 			}
2086 
2087 			/* All header(ch, psh, head digits) of this PDU has now been read from the socket. */
2088 			nvme_tcp_pdu_psh_handle(tqpair, reaped);
2089 			break;
2090 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
2091 			/* check whether the data is valid, if not we just return */
2092 			if (!pdu->data_len) {
2093 				return NVME_TCP_PDU_IN_PROGRESS;
2094 			}
2095 
2096 			data_len = pdu->data_len;
2097 			/* data digest */
2098 			if (spdk_unlikely((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
2099 					  tqpair->flags.host_ddgst_enable)) {
2100 				data_len += SPDK_NVME_TCP_DIGEST_LEN;
2101 				pdu->ddgst_enable = true;
2102 			}
2103 
2104 			rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
2105 			if (rc < 0) {
2106 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
2107 				break;
2108 			}
2109 
2110 			pdu->rw_offset += rc;
2111 			if (pdu->rw_offset < data_len) {
2112 				return NVME_TCP_PDU_IN_PROGRESS;
2113 			}
2114 
2115 			assert(pdu->rw_offset == data_len);
2116 			/* All of this PDU has now been read from the socket. */
2117 			nvme_tcp_pdu_payload_handle(tqpair, reaped);
2118 			break;
2119 		case NVME_TCP_PDU_RECV_STATE_QUIESCING:
2120 			if (TAILQ_EMPTY(&tqpair->outstanding_reqs)) {
2121 				if (nvme_qpair_get_state(&tqpair->qpair) == NVME_QPAIR_DISCONNECTING) {
2122 					nvme_transport_ctrlr_disconnect_qpair_done(&tqpair->qpair);
2123 				}
2124 
2125 				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
2126 			}
2127 			break;
2128 		case NVME_TCP_PDU_RECV_STATE_ERROR:
2129 			memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
2130 			return NVME_TCP_PDU_FATAL;
2131 		default:
2132 			assert(0);
2133 			break;
2134 		}
2135 	} while (prev_state != tqpair->recv_state);
2136 
2137 	return rc > 0 ? 0 : rc;
2138 }
2139 
2140 static void
2141 nvme_tcp_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
2142 {
2143 	uint64_t t02;
2144 	struct nvme_tcp_req *tcp_req, *tmp;
2145 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2146 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
2147 	struct spdk_nvme_ctrlr_process *active_proc;
2148 
2149 	/* Don't check timeouts during controller initialization. */
2150 	if (ctrlr->state != NVME_CTRLR_STATE_READY) {
2151 		return;
2152 	}
2153 
2154 	if (nvme_qpair_is_admin_queue(qpair)) {
2155 		active_proc = nvme_ctrlr_get_current_process(ctrlr);
2156 	} else {
2157 		active_proc = qpair->active_proc;
2158 	}
2159 
2160 	/* Only check timeouts if the current process has a timeout callback. */
2161 	if (active_proc == NULL || active_proc->timeout_cb_fn == NULL) {
2162 		return;
2163 	}
2164 
2165 	t02 = spdk_get_ticks();
2166 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
2167 		if (ctrlr->is_failed) {
2168 			/* The controller state may be changed to failed in one of the nvme_request_check_timeout callbacks. */
2169 			return;
2170 		}
2171 		assert(tcp_req->req != NULL);
2172 
2173 		if (nvme_request_check_timeout(tcp_req->req, tcp_req->cid, active_proc, t02)) {
2174 			/*
2175 			 * The requests are in order, so as soon as one has not timed out,
2176 			 * stop iterating.
2177 			 */
2178 			break;
2179 		}
2180 	}
2181 }
2182 
2183 static int nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr,
2184 		struct spdk_nvme_qpair *qpair);
2185 
2186 static int
2187 nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
2188 {
2189 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2190 	uint32_t reaped;
2191 	int rc;
2192 
2193 	if (qpair->poll_group == NULL) {
2194 		rc = spdk_sock_flush(tqpair->sock);
2195 		if (rc < 0 && errno != EAGAIN) {
2196 			SPDK_ERRLOG("Failed to flush tqpair=%p (%d): %s\n", tqpair,
2197 				    errno, spdk_strerror(errno));
2198 			if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
2199 				nvme_tcp_qpair_check_timeout(qpair);
2200 			}
2201 
2202 			if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
2203 				if (TAILQ_EMPTY(&tqpair->outstanding_reqs)) {
2204 					nvme_transport_ctrlr_disconnect_qpair_done(qpair);
2205 				}
2206 
2207 				/* Don't return errors until the qpair gets disconnected */
2208 				return 0;
2209 			}
2210 
2211 			goto fail;
2212 		}
2213 	}
2214 
2215 	if (max_completions == 0) {
2216 		max_completions = spdk_max(tqpair->num_entries, 1);
2217 	} else {
2218 		max_completions = spdk_min(max_completions, tqpair->num_entries);
2219 	}
2220 
2221 	reaped = 0;
2222 	rc = nvme_tcp_read_pdu(tqpair, &reaped, max_completions);
2223 	if (rc < 0) {
2224 		SPDK_DEBUGLOG(nvme, "Error polling CQ! (%d): %s\n",
2225 			      errno, spdk_strerror(errno));
2226 		goto fail;
2227 	}
2228 
2229 	if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
2230 		nvme_tcp_qpair_check_timeout(qpair);
2231 	}
2232 
2233 	if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING)) {
2234 		rc = nvme_tcp_ctrlr_connect_qpair_poll(qpair->ctrlr, qpair);
2235 		if (rc != 0 && rc != -EAGAIN) {
2236 			SPDK_ERRLOG("Failed to connect tqpair=%p\n", tqpair);
2237 			goto fail;
2238 		} else if (rc == 0) {
2239 			/* Once the connection is completed, we can submit queued requests */
2240 			nvme_qpair_resubmit_requests(qpair, tqpair->num_entries);
2241 		}
2242 	}
2243 
2244 	return reaped;
2245 fail:
2246 
2247 	/*
2248 	 * Since admin queues take the ctrlr_lock before entering this function,
2249 	 * we can call nvme_transport_ctrlr_disconnect_qpair. For other qpairs we need
2250 	 * to call the generic function which will take the lock for us.
2251 	 */
2252 	qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
2253 
2254 	if (nvme_qpair_is_admin_queue(qpair)) {
2255 		enum nvme_qpair_state state_prev = nvme_qpair_get_state(qpair);
2256 
2257 		nvme_transport_ctrlr_disconnect_qpair(qpair->ctrlr, qpair);
2258 
2259 		if (state_prev == NVME_QPAIR_CONNECTING && qpair->poll_status != NULL) {
2260 			/* Needed to free the poll_status */
2261 			nvme_tcp_ctrlr_connect_qpair_poll(qpair->ctrlr, qpair);
2262 		}
2263 	} else {
2264 		nvme_ctrlr_disconnect_qpair(qpair);
2265 	}
2266 	return -ENXIO;
2267 }
2268 
2269 static void
2270 nvme_tcp_qpair_sock_cb(void *ctx, struct spdk_sock_group *group, struct spdk_sock *sock)
2271 {
2272 	struct spdk_nvme_qpair *qpair = ctx;
2273 	struct nvme_tcp_poll_group *pgroup = nvme_tcp_poll_group(qpair->poll_group);
2274 	int32_t num_completions;
2275 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2276 
2277 	if (tqpair->needs_poll) {
2278 		TAILQ_REMOVE(&pgroup->needs_poll, tqpair, link);
2279 		tqpair->needs_poll = false;
2280 	}
2281 
2282 	num_completions = spdk_nvme_qpair_process_completions(qpair, pgroup->completions_per_qpair);
2283 
2284 	if (pgroup->num_completions >= 0 && num_completions >= 0) {
2285 		pgroup->num_completions += num_completions;
2286 		pgroup->stats.nvme_completions += num_completions;
2287 	} else {
2288 		pgroup->num_completions = -ENXIO;
2289 	}
2290 }
2291 
2292 static int
2293 nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
2294 {
2295 	struct spdk_nvme_tcp_ic_req *ic_req;
2296 	struct nvme_tcp_pdu *pdu;
2297 	uint32_t timeout_in_sec;
2298 
2299 	pdu = tqpair->send_pdu;
2300 	memset(tqpair->send_pdu, 0, sizeof(*tqpair->send_pdu));
2301 	ic_req = &pdu->hdr.ic_req;
2302 
2303 	ic_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
2304 	ic_req->common.hlen = ic_req->common.plen = sizeof(*ic_req);
2305 	ic_req->pfv = 0;
2306 	ic_req->maxr2t = NVME_TCP_MAX_R2T_DEFAULT - 1;
2307 	ic_req->hpda = NVME_TCP_HPDA_DEFAULT;
2308 
2309 	ic_req->dgst.bits.hdgst_enable = tqpair->qpair.ctrlr->opts.header_digest;
2310 	ic_req->dgst.bits.ddgst_enable = tqpair->qpair.ctrlr->opts.data_digest;
2311 
2312 	nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_send_icreq_complete, tqpair);
2313 
2314 	timeout_in_sec = tqpair->qpair.async ? ICREQ_TIMEOUT_ASYNC : ICREQ_TIMEOUT_SYNC;
2315 	tqpair->icreq_timeout_tsc = spdk_get_ticks() + (timeout_in_sec * spdk_get_ticks_hz());
2316 	return 0;
2317 }
2318 
2319 static int
2320 nvme_tcp_qpair_connect_sock(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
2321 {
2322 	struct sockaddr_storage dst_addr;
2323 	struct sockaddr_storage src_addr;
2324 	int rc;
2325 	struct nvme_tcp_qpair *tqpair;
2326 	int family;
2327 	long int port, src_port = 0;
2328 	char *sock_impl_name;
2329 	struct spdk_sock_impl_opts impl_opts = {};
2330 	size_t impl_opts_size = sizeof(impl_opts);
2331 	struct spdk_sock_opts opts;
2332 	struct nvme_tcp_ctrlr *tcp_ctrlr;
2333 
2334 	tqpair = nvme_tcp_qpair(qpair);
2335 
2336 	switch (ctrlr->trid.adrfam) {
2337 	case SPDK_NVMF_ADRFAM_IPV4:
2338 		family = AF_INET;
2339 		break;
2340 	case SPDK_NVMF_ADRFAM_IPV6:
2341 		family = AF_INET6;
2342 		break;
2343 	default:
2344 		SPDK_ERRLOG("Unhandled ADRFAM %d\n", ctrlr->trid.adrfam);
2345 		rc = -1;
2346 		return rc;
2347 	}
2348 
2349 	SPDK_DEBUGLOG(nvme, "adrfam %d ai_family %d\n", ctrlr->trid.adrfam, family);
2350 
2351 	memset(&dst_addr, 0, sizeof(dst_addr));
2352 
2353 	SPDK_DEBUGLOG(nvme, "trsvcid is %s\n", ctrlr->trid.trsvcid);
2354 	rc = nvme_parse_addr(&dst_addr, family, ctrlr->trid.traddr, ctrlr->trid.trsvcid, &port);
2355 	if (rc != 0) {
2356 		SPDK_ERRLOG("dst_addr nvme_parse_addr() failed\n");
2357 		return rc;
2358 	}
2359 
2360 	if (ctrlr->opts.src_addr[0] || ctrlr->opts.src_svcid[0]) {
2361 		memset(&src_addr, 0, sizeof(src_addr));
2362 		rc = nvme_parse_addr(&src_addr, family,
2363 				     ctrlr->opts.src_addr[0] ? ctrlr->opts.src_addr : NULL,
2364 				     ctrlr->opts.src_svcid[0] ? ctrlr->opts.src_svcid : NULL,
2365 				     &src_port);
2366 		if (rc != 0) {
2367 			SPDK_ERRLOG("src_addr nvme_parse_addr() failed\n");
2368 			return rc;
2369 		}
2370 	}
2371 
2372 	tcp_ctrlr = SPDK_CONTAINEROF(ctrlr, struct nvme_tcp_ctrlr, ctrlr);
2373 	sock_impl_name = tcp_ctrlr->psk[0] ? "ssl" : NULL;
2374 	SPDK_DEBUGLOG(nvme, "sock_impl_name is %s\n", sock_impl_name);
2375 
2376 	if (sock_impl_name) {
2377 		spdk_sock_impl_get_opts(sock_impl_name, &impl_opts, &impl_opts_size);
2378 		impl_opts.tls_version = SPDK_TLS_VERSION_1_3;
2379 		impl_opts.psk_identity = tcp_ctrlr->psk_identity;
2380 		impl_opts.psk_key = tcp_ctrlr->psk;
2381 		impl_opts.psk_key_size = tcp_ctrlr->psk_size;
2382 		impl_opts.tls_cipher_suites = tcp_ctrlr->tls_cipher_suite;
2383 	}
2384 	opts.opts_size = sizeof(opts);
2385 	spdk_sock_get_default_opts(&opts);
2386 	opts.priority = ctrlr->trid.priority;
2387 	opts.zcopy = !nvme_qpair_is_admin_queue(qpair);
2388 	opts.src_addr = ctrlr->opts.src_addr[0] ? ctrlr->opts.src_addr : NULL;
2389 	opts.src_port = src_port;
2390 	if (ctrlr->opts.transport_ack_timeout) {
2391 		opts.ack_timeout = 1ULL << ctrlr->opts.transport_ack_timeout;
2392 	}
2393 	if (sock_impl_name) {
2394 		opts.impl_opts = &impl_opts;
2395 		opts.impl_opts_size = sizeof(impl_opts);
2396 	}
2397 	tqpair->sock = spdk_sock_connect_ext(ctrlr->trid.traddr, port, sock_impl_name, &opts);
2398 	if (!tqpair->sock) {
2399 		SPDK_ERRLOG("sock connection error of tqpair=%p with addr=%s, port=%ld\n",
2400 			    tqpair, ctrlr->trid.traddr, port);
2401 		rc = -1;
2402 		return rc;
2403 	}
2404 
2405 	return 0;
2406 }
2407 
2408 static int
2409 nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
2410 {
2411 	struct nvme_tcp_qpair *tqpair;
2412 	int rc;
2413 
2414 	tqpair = nvme_tcp_qpair(qpair);
2415 
2416 	/* Prevent this function from being called recursively, as it could lead to issues with
2417 	 * nvme_fabric_qpair_connect_poll() if the connect response is received in the recursive
2418 	 * call.
2419 	 */
2420 	if (tqpair->flags.in_connect_poll) {
2421 		return -EAGAIN;
2422 	}
2423 
2424 	tqpair->flags.in_connect_poll = 1;
2425 
2426 	switch (tqpair->state) {
2427 	case NVME_TCP_QPAIR_STATE_INVALID:
2428 	case NVME_TCP_QPAIR_STATE_INITIALIZING:
2429 		if (spdk_get_ticks() > tqpair->icreq_timeout_tsc) {
2430 			SPDK_ERRLOG("Failed to construct the tqpair=%p via correct icresp\n", tqpair);
2431 			rc = -ETIMEDOUT;
2432 			break;
2433 		}
2434 		rc = -EAGAIN;
2435 		break;
2436 	case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND:
2437 		rc = nvme_fabric_qpair_connect_async(&tqpair->qpair, tqpair->num_entries + 1);
2438 		if (rc < 0) {
2439 			SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
2440 			break;
2441 		}
2442 		tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL;
2443 		rc = -EAGAIN;
2444 		break;
2445 	case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL:
2446 		rc = nvme_fabric_qpair_connect_poll(&tqpair->qpair);
2447 		if (rc == 0) {
2448 			if (nvme_fabric_qpair_auth_required(qpair)) {
2449 				rc = nvme_fabric_qpair_authenticate_async(qpair);
2450 				if (rc == 0) {
2451 					tqpair->state = NVME_TCP_QPAIR_STATE_AUTHENTICATING;
2452 					rc = -EAGAIN;
2453 				}
2454 			} else {
2455 				tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
2456 				nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
2457 			}
2458 		} else if (rc != -EAGAIN) {
2459 			SPDK_ERRLOG("Failed to poll NVMe-oF Fabric CONNECT command\n");
2460 		}
2461 		break;
2462 	case NVME_TCP_QPAIR_STATE_AUTHENTICATING:
2463 		rc = nvme_fabric_qpair_authenticate_poll(qpair);
2464 		if (rc == 0) {
2465 			tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
2466 			nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
2467 		}
2468 		break;
2469 	case NVME_TCP_QPAIR_STATE_RUNNING:
2470 		rc = 0;
2471 		break;
2472 	default:
2473 		assert(false);
2474 		rc = -EINVAL;
2475 		break;
2476 	}
2477 
2478 	tqpair->flags.in_connect_poll = 0;
2479 	return rc;
2480 }
2481 
2482 static int
2483 nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
2484 {
2485 	int rc = 0;
2486 	struct nvme_tcp_qpair *tqpair;
2487 	struct nvme_tcp_poll_group *tgroup;
2488 
2489 	tqpair = nvme_tcp_qpair(qpair);
2490 
2491 	if (!tqpair->sock) {
2492 		rc = nvme_tcp_qpair_connect_sock(ctrlr, qpair);
2493 		if (rc < 0) {
2494 			return rc;
2495 		}
2496 	}
2497 
2498 	if (qpair->poll_group) {
2499 		rc = nvme_poll_group_connect_qpair(qpair);
2500 		if (rc) {
2501 			SPDK_ERRLOG("Unable to activate the tcp qpair.\n");
2502 			return rc;
2503 		}
2504 		tgroup = nvme_tcp_poll_group(qpair->poll_group);
2505 		tqpair->stats = &tgroup->stats;
2506 		tqpair->shared_stats = true;
2507 	} else {
2508 		/* When resetting a controller, we disconnect adminq and then reconnect. The stats
2509 		 * is not freed when disconnecting. So when reconnecting, don't allocate memory
2510 		 * again.
2511 		 */
2512 		if (tqpair->stats == NULL) {
2513 			tqpair->stats = calloc(1, sizeof(*tqpair->stats));
2514 			if (!tqpair->stats) {
2515 				SPDK_ERRLOG("tcp stats memory allocation failed\n");
2516 				return -ENOMEM;
2517 			}
2518 		}
2519 	}
2520 
2521 	tqpair->maxr2t = NVME_TCP_MAX_R2T_DEFAULT;
2522 	/* Explicitly set the state and recv_state of tqpair */
2523 	tqpair->state = NVME_TCP_QPAIR_STATE_INVALID;
2524 	if (tqpair->recv_state != NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY) {
2525 		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
2526 	}
2527 	rc = nvme_tcp_qpair_icreq_send(tqpair);
2528 	if (rc != 0) {
2529 		SPDK_ERRLOG("Unable to connect the tqpair\n");
2530 		return rc;
2531 	}
2532 
2533 	return rc;
2534 }
2535 
2536 static struct spdk_nvme_qpair *
2537 nvme_tcp_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
2538 			    uint16_t qid, uint32_t qsize,
2539 			    enum spdk_nvme_qprio qprio,
2540 			    uint32_t num_requests, bool async)
2541 {
2542 	struct nvme_tcp_qpair *tqpair;
2543 	struct spdk_nvme_qpair *qpair;
2544 	int rc;
2545 
2546 	if (qsize < SPDK_NVME_QUEUE_MIN_ENTRIES) {
2547 		SPDK_ERRLOG("Failed to create qpair with size %u. Minimum queue size is %d.\n",
2548 			    qsize, SPDK_NVME_QUEUE_MIN_ENTRIES);
2549 		return NULL;
2550 	}
2551 
2552 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
2553 	if (!tqpair) {
2554 		SPDK_ERRLOG("failed to get create tqpair\n");
2555 		return NULL;
2556 	}
2557 
2558 	/* Set num_entries one less than queue size. According to NVMe
2559 	 * and NVMe-oF specs we can not submit queue size requests,
2560 	 * one slot shall always remain empty.
2561 	 */
2562 	tqpair->num_entries = qsize - 1;
2563 	qpair = &tqpair->qpair;
2564 	rc = nvme_qpair_init(qpair, qid, ctrlr, qprio, num_requests, async);
2565 	if (rc != 0) {
2566 		free(tqpair);
2567 		return NULL;
2568 	}
2569 
2570 	rc = nvme_tcp_alloc_reqs(tqpair);
2571 	if (rc) {
2572 		nvme_tcp_ctrlr_delete_io_qpair(ctrlr, qpair);
2573 		return NULL;
2574 	}
2575 
2576 	/* spdk_nvme_qpair_get_optimal_poll_group needs socket information.
2577 	 * So create the socket first when creating a qpair. */
2578 	rc = nvme_tcp_qpair_connect_sock(ctrlr, qpair);
2579 	if (rc) {
2580 		nvme_tcp_ctrlr_delete_io_qpair(ctrlr, qpair);
2581 		return NULL;
2582 	}
2583 
2584 	return qpair;
2585 }
2586 
2587 static struct spdk_nvme_qpair *
2588 nvme_tcp_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
2589 			       const struct spdk_nvme_io_qpair_opts *opts)
2590 {
2591 	return nvme_tcp_ctrlr_create_qpair(ctrlr, qid, opts->io_queue_size, opts->qprio,
2592 					   opts->io_queue_requests, opts->async_mode);
2593 }
2594 
2595 static int
2596 nvme_tcp_generate_tls_credentials(struct nvme_tcp_ctrlr *tctrlr)
2597 {
2598 	struct spdk_nvme_ctrlr *ctrlr = &tctrlr->ctrlr;
2599 	int rc;
2600 	uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {};
2601 	uint8_t psk_configured[SPDK_TLS_PSK_MAX_LEN] = {};
2602 	uint8_t pskbuf[SPDK_TLS_PSK_MAX_LEN + 1] = {};
2603 	uint8_t tls_cipher_suite;
2604 	uint8_t psk_retained_hash;
2605 	uint64_t psk_configured_size;
2606 
2607 	rc = spdk_key_get_key(ctrlr->opts.tls_psk, pskbuf, SPDK_TLS_PSK_MAX_LEN);
2608 	if (rc < 0) {
2609 		SPDK_ERRLOG("Failed to obtain key '%s': %s\n",
2610 			    spdk_key_get_name(ctrlr->opts.tls_psk), spdk_strerror(-rc));
2611 		goto finish;
2612 	}
2613 
2614 	rc = nvme_tcp_parse_interchange_psk(pskbuf, psk_configured, sizeof(psk_configured),
2615 					    &psk_configured_size, &psk_retained_hash);
2616 	if (rc < 0) {
2617 		SPDK_ERRLOG("Failed to parse PSK interchange!\n");
2618 		goto finish;
2619 	}
2620 
2621 	/* The Base64 string encodes the configured PSK (32 or 48 bytes binary).
2622 	 * This check also ensures that psk_configured_size is smaller than
2623 	 * psk_retained buffer size. */
2624 	if (psk_configured_size == SHA256_DIGEST_LENGTH) {
2625 		tls_cipher_suite = NVME_TCP_CIPHER_AES_128_GCM_SHA256;
2626 		tctrlr->tls_cipher_suite = "TLS_AES_128_GCM_SHA256";
2627 	} else if (psk_configured_size == SHA384_DIGEST_LENGTH) {
2628 		tls_cipher_suite = NVME_TCP_CIPHER_AES_256_GCM_SHA384;
2629 		tctrlr->tls_cipher_suite = "TLS_AES_256_GCM_SHA384";
2630 	} else {
2631 		SPDK_ERRLOG("Unrecognized cipher suite!\n");
2632 		rc = -ENOTSUP;
2633 		goto finish;
2634 	}
2635 
2636 	rc = nvme_tcp_generate_psk_identity(tctrlr->psk_identity, sizeof(tctrlr->psk_identity),
2637 					    ctrlr->opts.hostnqn, ctrlr->trid.subnqn,
2638 					    tls_cipher_suite);
2639 	if (rc) {
2640 		SPDK_ERRLOG("could not generate PSK identity\n");
2641 		goto finish;
2642 	}
2643 
2644 	/* No hash indicates that Configured PSK must be used as Retained PSK. */
2645 	if (psk_retained_hash == NVME_TCP_HASH_ALGORITHM_NONE) {
2646 		assert(psk_configured_size < sizeof(psk_retained));
2647 		memcpy(psk_retained, psk_configured, psk_configured_size);
2648 		rc = psk_configured_size;
2649 	} else {
2650 		/* Derive retained PSK. */
2651 		rc = nvme_tcp_derive_retained_psk(psk_configured, psk_configured_size, ctrlr->opts.hostnqn,
2652 						  psk_retained, sizeof(psk_retained), psk_retained_hash);
2653 		if (rc < 0) {
2654 			SPDK_ERRLOG("Unable to derive retained PSK!\n");
2655 			goto finish;
2656 		}
2657 	}
2658 
2659 	rc = nvme_tcp_derive_tls_psk(psk_retained, rc, tctrlr->psk_identity, tctrlr->psk,
2660 				     sizeof(tctrlr->psk), tls_cipher_suite);
2661 	if (rc < 0) {
2662 		SPDK_ERRLOG("Could not generate TLS PSK!\n");
2663 		goto finish;
2664 	}
2665 
2666 	tctrlr->psk_size = rc;
2667 	rc = 0;
2668 finish:
2669 	spdk_memset_s(psk_configured, sizeof(psk_configured), 0, sizeof(psk_configured));
2670 	spdk_memset_s(pskbuf, sizeof(pskbuf), 0, sizeof(pskbuf));
2671 
2672 	return rc;
2673 }
2674 
2675 /* We have to use the typedef in the function declaration to appease astyle. */
2676 typedef struct spdk_nvme_ctrlr spdk_nvme_ctrlr_t;
2677 
2678 static spdk_nvme_ctrlr_t *
2679 nvme_tcp_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
2680 			 const struct spdk_nvme_ctrlr_opts *opts,
2681 			 void *devhandle)
2682 {
2683 	struct nvme_tcp_ctrlr *tctrlr;
2684 	struct nvme_tcp_qpair *tqpair;
2685 	int rc;
2686 
2687 	tctrlr = calloc(1, sizeof(*tctrlr));
2688 	if (tctrlr == NULL) {
2689 		SPDK_ERRLOG("could not allocate ctrlr\n");
2690 		return NULL;
2691 	}
2692 
2693 	tctrlr->ctrlr.opts = *opts;
2694 	tctrlr->ctrlr.trid = *trid;
2695 
2696 	if (opts->tls_psk != NULL) {
2697 		rc = nvme_tcp_generate_tls_credentials(tctrlr);
2698 		if (rc != 0) {
2699 			free(tctrlr);
2700 			return NULL;
2701 		}
2702 	}
2703 
2704 	if (opts->transport_ack_timeout > NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT) {
2705 		SPDK_NOTICELOG("transport_ack_timeout exceeds max value %d, use max value\n",
2706 			       NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
2707 		tctrlr->ctrlr.opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT;
2708 	}
2709 
2710 	rc = nvme_ctrlr_construct(&tctrlr->ctrlr);
2711 	if (rc != 0) {
2712 		free(tctrlr);
2713 		return NULL;
2714 	}
2715 
2716 	/* Sequence might be used not only for data digest offload purposes but
2717 	 * to handle a potential COPY operation appended as the result of translation. */
2718 	tctrlr->ctrlr.flags |= SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED;
2719 	tctrlr->ctrlr.adminq = nvme_tcp_ctrlr_create_qpair(&tctrlr->ctrlr, 0,
2720 			       tctrlr->ctrlr.opts.admin_queue_size, 0,
2721 			       tctrlr->ctrlr.opts.admin_queue_size, true);
2722 	if (!tctrlr->ctrlr.adminq) {
2723 		SPDK_ERRLOG("failed to create admin qpair\n");
2724 		nvme_tcp_ctrlr_destruct(&tctrlr->ctrlr);
2725 		return NULL;
2726 	}
2727 
2728 	tqpair = nvme_tcp_qpair(tctrlr->ctrlr.adminq);
2729 	tctrlr->ctrlr.numa.id_valid = 1;
2730 	tctrlr->ctrlr.numa.id = spdk_sock_get_numa_id(tqpair->sock);
2731 
2732 	if (nvme_ctrlr_add_process(&tctrlr->ctrlr, 0) != 0) {
2733 		SPDK_ERRLOG("nvme_ctrlr_add_process() failed\n");
2734 		nvme_ctrlr_destruct(&tctrlr->ctrlr);
2735 		return NULL;
2736 	}
2737 
2738 	return &tctrlr->ctrlr;
2739 }
2740 
2741 static uint32_t
2742 nvme_tcp_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
2743 {
2744 	/* TCP transport doesn't limit maximum IO transfer size. */
2745 	return UINT32_MAX;
2746 }
2747 
2748 static uint16_t
2749 nvme_tcp_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
2750 {
2751 	return NVME_TCP_MAX_SGL_DESCRIPTORS;
2752 }
2753 
2754 static int
2755 nvme_tcp_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
2756 				int (*iter_fn)(struct nvme_request *req, void *arg),
2757 				void *arg)
2758 {
2759 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2760 	struct nvme_tcp_req *tcp_req, *tmp;
2761 	int rc;
2762 
2763 	assert(iter_fn != NULL);
2764 
2765 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
2766 		assert(tcp_req->req != NULL);
2767 
2768 		rc = iter_fn(tcp_req->req, arg);
2769 		if (rc != 0) {
2770 			return rc;
2771 		}
2772 	}
2773 
2774 	return 0;
2775 }
2776 
2777 static int
2778 nvme_tcp_qpair_authenticate(struct spdk_nvme_qpair *qpair)
2779 {
2780 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2781 	int rc;
2782 
2783 	/* If the qpair is still connecting, it'll be forced to authenticate later on */
2784 	if (tqpair->state < NVME_TCP_QPAIR_STATE_RUNNING) {
2785 		return 0;
2786 	} else if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
2787 		return -ENOTCONN;
2788 	}
2789 
2790 	rc = nvme_fabric_qpair_authenticate_async(qpair);
2791 	if (rc == 0) {
2792 		nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
2793 		tqpair->state = NVME_TCP_QPAIR_STATE_AUTHENTICATING;
2794 	}
2795 
2796 	return rc;
2797 }
2798 
2799 static void
2800 nvme_tcp_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
2801 {
2802 	struct nvme_tcp_req *tcp_req, *tmp;
2803 	struct spdk_nvme_cpl cpl = {};
2804 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2805 
2806 	cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
2807 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2808 
2809 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
2810 		assert(tcp_req->req != NULL);
2811 		if (tcp_req->req->cmd.opc != SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
2812 			continue;
2813 		}
2814 
2815 		nvme_tcp_req_complete(tcp_req, tqpair, &cpl, false);
2816 	}
2817 }
2818 
2819 static struct spdk_nvme_transport_poll_group *
2820 nvme_tcp_poll_group_create(void)
2821 {
2822 	struct nvme_tcp_poll_group *group = calloc(1, sizeof(*group));
2823 
2824 	if (group == NULL) {
2825 		SPDK_ERRLOG("Unable to allocate poll group.\n");
2826 		return NULL;
2827 	}
2828 
2829 	TAILQ_INIT(&group->needs_poll);
2830 
2831 	group->sock_group = spdk_sock_group_create(group);
2832 	if (group->sock_group == NULL) {
2833 		free(group);
2834 		SPDK_ERRLOG("Unable to allocate sock group.\n");
2835 		return NULL;
2836 	}
2837 
2838 	return &group->group;
2839 }
2840 
2841 static struct spdk_nvme_transport_poll_group *
2842 nvme_tcp_qpair_get_optimal_poll_group(struct spdk_nvme_qpair *qpair)
2843 {
2844 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2845 	struct spdk_sock_group *group = NULL;
2846 	int rc;
2847 
2848 	rc = spdk_sock_get_optimal_sock_group(tqpair->sock, &group, NULL);
2849 	if (!rc && group != NULL) {
2850 		return spdk_sock_group_get_ctx(group);
2851 	}
2852 
2853 	return NULL;
2854 }
2855 
2856 static int
2857 nvme_tcp_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
2858 {
2859 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(qpair->poll_group);
2860 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2861 
2862 	if (spdk_sock_group_add_sock(group->sock_group, tqpair->sock, nvme_tcp_qpair_sock_cb, qpair)) {
2863 		return -EPROTO;
2864 	}
2865 	return 0;
2866 }
2867 
2868 static int
2869 nvme_tcp_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
2870 {
2871 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(qpair->poll_group);
2872 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2873 
2874 	if (tqpair->needs_poll) {
2875 		TAILQ_REMOVE(&group->needs_poll, tqpair, link);
2876 		tqpair->needs_poll = false;
2877 	}
2878 
2879 	if (tqpair->sock && group->sock_group) {
2880 		if (spdk_sock_group_remove_sock(group->sock_group, tqpair->sock)) {
2881 			return -EPROTO;
2882 		}
2883 	}
2884 	return 0;
2885 }
2886 
2887 static int
2888 nvme_tcp_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
2889 			struct spdk_nvme_qpair *qpair)
2890 {
2891 	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
2892 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(tgroup);
2893 
2894 	/* disconnected qpairs won't have a sock to add. */
2895 	if (nvme_qpair_get_state(qpair) >= NVME_QPAIR_CONNECTED) {
2896 		if (spdk_sock_group_add_sock(group->sock_group, tqpair->sock, nvme_tcp_qpair_sock_cb, qpair)) {
2897 			return -EPROTO;
2898 		}
2899 	}
2900 
2901 	return 0;
2902 }
2903 
2904 static int
2905 nvme_tcp_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
2906 			   struct spdk_nvme_qpair *qpair)
2907 {
2908 	struct nvme_tcp_qpair *tqpair;
2909 	struct nvme_tcp_poll_group *group;
2910 
2911 	assert(qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs);
2912 
2913 	tqpair = nvme_tcp_qpair(qpair);
2914 	group = nvme_tcp_poll_group(tgroup);
2915 
2916 	assert(tqpair->shared_stats == true);
2917 	tqpair->stats = &g_dummy_stats;
2918 
2919 	if (tqpair->needs_poll) {
2920 		TAILQ_REMOVE(&group->needs_poll, tqpair, link);
2921 		tqpair->needs_poll = false;
2922 	}
2923 
2924 	return 0;
2925 }
2926 
2927 static int64_t
2928 nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
2929 					uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
2930 {
2931 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(tgroup);
2932 	struct spdk_nvme_qpair *qpair, *tmp_qpair;
2933 	struct nvme_tcp_qpair *tqpair, *tmp_tqpair;
2934 	int num_events;
2935 
2936 	group->completions_per_qpair = completions_per_qpair;
2937 	group->num_completions = 0;
2938 	group->stats.polls++;
2939 
2940 	num_events = spdk_sock_group_poll(group->sock_group);
2941 
2942 	STAILQ_FOREACH_SAFE(qpair, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_qpair) {
2943 		tqpair = nvme_tcp_qpair(qpair);
2944 		if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
2945 			if (TAILQ_EMPTY(&tqpair->outstanding_reqs)) {
2946 				nvme_transport_ctrlr_disconnect_qpair_done(qpair);
2947 			}
2948 		}
2949 		/* Wait until the qpair transitions to the DISCONNECTED state, otherwise user might
2950 		 * want to free it from disconnect_qpair_cb, while it's not fully disconnected (and
2951 		 * might still have outstanding requests) */
2952 		if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
2953 			disconnected_qpair_cb(qpair, tgroup->group->ctx);
2954 		}
2955 	}
2956 
2957 	/* If any qpairs were marked as needing to be polled due to an asynchronous write completion
2958 	 * and they weren't polled as a consequence of calling spdk_sock_group_poll above, poll them now. */
2959 	TAILQ_FOREACH_SAFE(tqpair, &group->needs_poll, link, tmp_tqpair) {
2960 		nvme_tcp_qpair_sock_cb(&tqpair->qpair, group->sock_group, tqpair->sock);
2961 	}
2962 
2963 	if (spdk_unlikely(num_events < 0)) {
2964 		return num_events;
2965 	}
2966 
2967 	group->stats.idle_polls += !num_events;
2968 	group->stats.socket_completions += num_events;
2969 
2970 	return group->num_completions;
2971 }
2972 
2973 static int
2974 nvme_tcp_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
2975 {
2976 	int rc;
2977 	struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(tgroup);
2978 
2979 	if (!STAILQ_EMPTY(&tgroup->connected_qpairs) || !STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
2980 		return -EBUSY;
2981 	}
2982 
2983 	rc = spdk_sock_group_close(&group->sock_group);
2984 	if (rc != 0) {
2985 		SPDK_ERRLOG("Failed to close the sock group for a tcp poll group.\n");
2986 		assert(false);
2987 	}
2988 
2989 	free(tgroup);
2990 
2991 	return 0;
2992 }
2993 
2994 static int
2995 nvme_tcp_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
2996 			      struct spdk_nvme_transport_poll_group_stat **_stats)
2997 {
2998 	struct nvme_tcp_poll_group *group;
2999 	struct spdk_nvme_transport_poll_group_stat *stats;
3000 
3001 	if (tgroup == NULL || _stats == NULL) {
3002 		SPDK_ERRLOG("Invalid stats or group pointer\n");
3003 		return -EINVAL;
3004 	}
3005 
3006 	group = nvme_tcp_poll_group(tgroup);
3007 
3008 	stats = calloc(1, sizeof(*stats));
3009 	if (!stats) {
3010 		SPDK_ERRLOG("Can't allocate memory for TCP stats\n");
3011 		return -ENOMEM;
3012 	}
3013 	stats->trtype = SPDK_NVME_TRANSPORT_TCP;
3014 	memcpy(&stats->tcp, &group->stats, sizeof(group->stats));
3015 
3016 	*_stats = stats;
3017 
3018 	return 0;
3019 }
3020 
3021 static void
3022 nvme_tcp_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
3023 			       struct spdk_nvme_transport_poll_group_stat *stats)
3024 {
3025 	free(stats);
3026 }
3027 
3028 static int
3029 nvme_tcp_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
3030 				  struct spdk_memory_domain **domains, int array_size)
3031 {
3032 	if (domains && array_size > 0) {
3033 		domains[0] = spdk_memory_domain_get_system_domain();
3034 	}
3035 
3036 	return 1;
3037 }
3038 
3039 const struct spdk_nvme_transport_ops tcp_ops = {
3040 	.name = "TCP",
3041 	.type = SPDK_NVME_TRANSPORT_TCP,
3042 	.ctrlr_construct = nvme_tcp_ctrlr_construct,
3043 	.ctrlr_scan = nvme_fabric_ctrlr_scan,
3044 	.ctrlr_destruct = nvme_tcp_ctrlr_destruct,
3045 	.ctrlr_enable = nvme_tcp_ctrlr_enable,
3046 
3047 	.ctrlr_set_reg_4 = nvme_fabric_ctrlr_set_reg_4,
3048 	.ctrlr_set_reg_8 = nvme_fabric_ctrlr_set_reg_8,
3049 	.ctrlr_get_reg_4 = nvme_fabric_ctrlr_get_reg_4,
3050 	.ctrlr_get_reg_8 = nvme_fabric_ctrlr_get_reg_8,
3051 	.ctrlr_set_reg_4_async = nvme_fabric_ctrlr_set_reg_4_async,
3052 	.ctrlr_set_reg_8_async = nvme_fabric_ctrlr_set_reg_8_async,
3053 	.ctrlr_get_reg_4_async = nvme_fabric_ctrlr_get_reg_4_async,
3054 	.ctrlr_get_reg_8_async = nvme_fabric_ctrlr_get_reg_8_async,
3055 
3056 	.ctrlr_get_max_xfer_size = nvme_tcp_ctrlr_get_max_xfer_size,
3057 	.ctrlr_get_max_sges = nvme_tcp_ctrlr_get_max_sges,
3058 
3059 	.ctrlr_create_io_qpair = nvme_tcp_ctrlr_create_io_qpair,
3060 	.ctrlr_delete_io_qpair = nvme_tcp_ctrlr_delete_io_qpair,
3061 	.ctrlr_connect_qpair = nvme_tcp_ctrlr_connect_qpair,
3062 	.ctrlr_disconnect_qpair = nvme_tcp_ctrlr_disconnect_qpair,
3063 
3064 	.ctrlr_get_memory_domains = nvme_tcp_ctrlr_get_memory_domains,
3065 
3066 	.qpair_abort_reqs = nvme_tcp_qpair_abort_reqs,
3067 	.qpair_reset = nvme_tcp_qpair_reset,
3068 	.qpair_submit_request = nvme_tcp_qpair_submit_request,
3069 	.qpair_process_completions = nvme_tcp_qpair_process_completions,
3070 	.qpair_iterate_requests = nvme_tcp_qpair_iterate_requests,
3071 	.qpair_authenticate = nvme_tcp_qpair_authenticate,
3072 	.admin_qpair_abort_aers = nvme_tcp_admin_qpair_abort_aers,
3073 
3074 	.poll_group_create = nvme_tcp_poll_group_create,
3075 	.qpair_get_optimal_poll_group = nvme_tcp_qpair_get_optimal_poll_group,
3076 	.poll_group_connect_qpair = nvme_tcp_poll_group_connect_qpair,
3077 	.poll_group_disconnect_qpair = nvme_tcp_poll_group_disconnect_qpair,
3078 	.poll_group_add = nvme_tcp_poll_group_add,
3079 	.poll_group_remove = nvme_tcp_poll_group_remove,
3080 	.poll_group_process_completions = nvme_tcp_poll_group_process_completions,
3081 	.poll_group_destroy = nvme_tcp_poll_group_destroy,
3082 	.poll_group_get_stats = nvme_tcp_poll_group_get_stats,
3083 	.poll_group_free_stats = nvme_tcp_poll_group_free_stats,
3084 };
3085 
3086 SPDK_NVME_TRANSPORT_REGISTER(tcp, &tcp_ops);
3087 
3088 static void
3089 nvme_tcp_trace(void)
3090 {
3091 	struct spdk_trace_tpoint_opts opts[] = {
3092 		{
3093 			"NVME_TCP_SUBMIT", TRACE_NVME_TCP_SUBMIT,
3094 			OWNER_TYPE_NVME_TCP_QP, OBJECT_NVME_TCP_REQ, 1,
3095 			{	{ "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
3096 				{ "cid", SPDK_TRACE_ARG_TYPE_INT, 4 },
3097 				{ "opc", SPDK_TRACE_ARG_TYPE_INT, 4 },
3098 				{ "dw10", SPDK_TRACE_ARG_TYPE_PTR, 4 },
3099 				{ "dw11", SPDK_TRACE_ARG_TYPE_PTR, 4 },
3100 				{ "dw12", SPDK_TRACE_ARG_TYPE_PTR, 4 },
3101 				{ "qd", SPDK_TRACE_ARG_TYPE_INT, 4 }
3102 			}
3103 		},
3104 		{
3105 			"NVME_TCP_COMPLETE", TRACE_NVME_TCP_COMPLETE,
3106 			OWNER_TYPE_NVME_TCP_QP, OBJECT_NVME_TCP_REQ, 0,
3107 			{	{ "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
3108 				{ "cid", SPDK_TRACE_ARG_TYPE_INT, 4 },
3109 				{ "cpl", SPDK_TRACE_ARG_TYPE_PTR, 4 },
3110 				{ "qd", SPDK_TRACE_ARG_TYPE_INT, 4 }
3111 			}
3112 		},
3113 	};
3114 
3115 	spdk_trace_register_object(OBJECT_NVME_TCP_REQ, 'p');
3116 	spdk_trace_register_owner_type(OWNER_TYPE_NVME_TCP_QP, 'q');
3117 	spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
3118 
3119 	spdk_trace_tpoint_register_relation(TRACE_SOCK_REQ_QUEUE, OBJECT_NVME_TCP_REQ, 0);
3120 	spdk_trace_tpoint_register_relation(TRACE_SOCK_REQ_PEND, OBJECT_NVME_TCP_REQ, 0);
3121 	spdk_trace_tpoint_register_relation(TRACE_SOCK_REQ_COMPLETE, OBJECT_NVME_TCP_REQ, 0);
3122 }
3123 SPDK_TRACE_REGISTER_FN(nvme_tcp_trace, "nvme_tcp", TRACE_GROUP_NVME_TCP)
3124