xref: /spdk/lib/nvmf/tcp.c (revision df902b1d2e0abbbdeb84c0972bad34d250227e26)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/accel_engine.h"
35 #include "spdk/stdinc.h"
36 #include "spdk/crc32.h"
37 #include "spdk/endian.h"
38 #include "spdk/assert.h"
39 #include "spdk/thread.h"
40 #include "spdk/nvmf_transport.h"
41 #include "spdk/string.h"
42 #include "spdk/trace.h"
43 #include "spdk/util.h"
44 #include "spdk/log.h"
45 
46 #include "spdk_internal/assert.h"
47 #include "spdk_internal/nvme_tcp.h"
48 #include "spdk_internal/sock.h"
49 
50 #include "nvmf_internal.h"
51 
52 #define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16
53 #define SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY 16
54 #define SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY 0
55 #define SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM 32
56 #define SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION true
57 
58 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp;
59 
60 /* spdk nvmf related structure */
61 enum spdk_nvmf_tcp_req_state {
62 
63 	/* The request is not currently in use */
64 	TCP_REQUEST_STATE_FREE = 0,
65 
66 	/* Initial state when request first received */
67 	TCP_REQUEST_STATE_NEW,
68 
69 	/* The request is queued until a data buffer is available. */
70 	TCP_REQUEST_STATE_NEED_BUFFER,
71 
72 	/* The request is currently transferring data from the host to the controller. */
73 	TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
74 
75 	/* The request is waiting for the R2T send acknowledgement. */
76 	TCP_REQUEST_STATE_AWAITING_R2T_ACK,
77 
78 	/* The request is ready to execute at the block device */
79 	TCP_REQUEST_STATE_READY_TO_EXECUTE,
80 
81 	/* The request is currently executing at the block device */
82 	TCP_REQUEST_STATE_EXECUTING,
83 
84 	/* The request finished executing at the block device */
85 	TCP_REQUEST_STATE_EXECUTED,
86 
87 	/* The request is ready to send a completion */
88 	TCP_REQUEST_STATE_READY_TO_COMPLETE,
89 
90 	/* The request is currently transferring final pdus from the controller to the host. */
91 	TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
92 
93 	/* The request completed and can be marked free. */
94 	TCP_REQUEST_STATE_COMPLETED,
95 
96 	/* Terminator */
97 	TCP_REQUEST_NUM_STATES,
98 };
99 
100 static const char *spdk_nvmf_tcp_term_req_fes_str[] = {
101 	"Invalid PDU Header Field",
102 	"PDU Sequence Error",
103 	"Header Digiest Error",
104 	"Data Transfer Out of Range",
105 	"R2T Limit Exceeded",
106 	"Unsupported parameter",
107 };
108 
109 #define OBJECT_NVMF_TCP_IO				0x80
110 
111 #define TRACE_GROUP_NVMF_TCP				0x5
112 #define TRACE_TCP_REQUEST_STATE_NEW					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x0)
113 #define TRACE_TCP_REQUEST_STATE_NEED_BUFFER				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x1)
114 #define TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER		SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x2)
115 #define TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE			SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x3)
116 #define TRACE_TCP_REQUEST_STATE_EXECUTING				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x4)
117 #define TRACE_TCP_REQUEST_STATE_EXECUTED				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x5)
118 #define TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE			SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x6)
119 #define TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST		SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x7)
120 #define TRACE_TCP_REQUEST_STATE_COMPLETED				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x8)
121 #define TRACE_TCP_FLUSH_WRITEBUF_START					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x9)
122 #define TRACE_TCP_FLUSH_WRITEBUF_DONE					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0xA)
123 #define TRACE_TCP_READ_FROM_SOCKET_DONE					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0xB)
124 #define TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0xC)
125 
126 SPDK_TRACE_REGISTER_FN(nvmf_tcp_trace, "nvmf_tcp", TRACE_GROUP_NVMF_TCP)
127 {
128 	spdk_trace_register_object(OBJECT_NVMF_TCP_IO, 'r');
129 	spdk_trace_register_description("TCP_REQ_NEW",
130 					TRACE_TCP_REQUEST_STATE_NEW,
131 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 1, 1, "");
132 	spdk_trace_register_description("TCP_REQ_NEED_BUFFER",
133 					TRACE_TCP_REQUEST_STATE_NEED_BUFFER,
134 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, "");
135 	spdk_trace_register_description("TCP_REQ_TX_H_TO_C",
136 					TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
137 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, "");
138 	spdk_trace_register_description("TCP_REQ_RDY_TO_EXECUTE",
139 					TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE,
140 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, "");
141 	spdk_trace_register_description("TCP_REQ_EXECUTING",
142 					TRACE_TCP_REQUEST_STATE_EXECUTING,
143 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, "");
144 	spdk_trace_register_description("TCP_REQ_EXECUTED",
145 					TRACE_TCP_REQUEST_STATE_EXECUTED,
146 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, "");
147 	spdk_trace_register_description("TCP_REQ_RDY_TO_COMPLETE",
148 					TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE,
149 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, "");
150 	spdk_trace_register_description("TCP_REQ_TRANSFER_C2H",
151 					TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
152 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, "");
153 	spdk_trace_register_description("TCP_REQ_COMPLETED",
154 					TRACE_TCP_REQUEST_STATE_COMPLETED,
155 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, "");
156 	spdk_trace_register_description("TCP_WRITE_START",
157 					TRACE_TCP_FLUSH_WRITEBUF_START,
158 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
159 	spdk_trace_register_description("TCP_WRITE_DONE",
160 					TRACE_TCP_FLUSH_WRITEBUF_DONE,
161 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
162 	spdk_trace_register_description("TCP_READ_DONE",
163 					TRACE_TCP_READ_FROM_SOCKET_DONE,
164 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
165 	spdk_trace_register_description("TCP_REQ_AWAIT_R2T_ACK",
166 					TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK,
167 					OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, "");
168 }
169 
170 struct spdk_nvmf_tcp_req  {
171 	struct spdk_nvmf_request		req;
172 	struct spdk_nvme_cpl			rsp;
173 	struct spdk_nvme_cmd			cmd;
174 
175 	/* A PDU that can be used for sending responses. This is
176 	 * not the incoming PDU! */
177 	struct nvme_tcp_pdu			*pdu;
178 
179 	/* In-capsule data buffer */
180 	uint8_t					*buf;
181 	/*
182 	 * The PDU for a request may be used multiple times in serial over
183 	 * the request's lifetime. For example, first to send an R2T, then
184 	 * to send a completion. To catch mistakes where the PDU is used
185 	 * twice at the same time, add a debug flag here for init/fini.
186 	 */
187 	bool					pdu_in_use;
188 	bool					has_incapsule_data;
189 
190 	/* transfer_tag */
191 	uint16_t				ttag;
192 
193 	enum spdk_nvmf_tcp_req_state		state;
194 
195 	/*
196 	 * h2c_offset is used when we receive the h2c_data PDU.
197 	 */
198 	uint32_t				h2c_offset;
199 
200 	STAILQ_ENTRY(spdk_nvmf_tcp_req)		link;
201 	TAILQ_ENTRY(spdk_nvmf_tcp_req)		state_link;
202 };
203 
204 struct spdk_nvmf_tcp_qpair {
205 	struct spdk_nvmf_qpair			qpair;
206 	struct spdk_nvmf_tcp_poll_group		*group;
207 	struct spdk_sock			*sock;
208 
209 	enum nvme_tcp_pdu_recv_state		recv_state;
210 	enum nvme_tcp_qpair_state		state;
211 
212 	/* PDU being actively received */
213 	struct nvme_tcp_pdu			pdu_in_progress;
214 
215 	/* Queues to track the requests in all states */
216 	TAILQ_HEAD(, spdk_nvmf_tcp_req)		tcp_req_working_queue;
217 	TAILQ_HEAD(, spdk_nvmf_tcp_req)		tcp_req_free_queue;
218 
219 	/* Number of requests in each state */
220 	uint32_t				state_cntr[TCP_REQUEST_NUM_STATES];
221 
222 	uint8_t					cpda;
223 
224 	bool					host_hdgst_enable;
225 	bool					host_ddgst_enable;
226 
227 	/* This is a spare PDU used for sending special management
228 	 * operations. Primarily, this is used for the initial
229 	 * connection response and c2h termination request. */
230 	struct nvme_tcp_pdu			*mgmt_pdu;
231 
232 	/* Arrays of in-capsule buffers, requests, and pdus.
233 	 * Each array is 'resource_count' number of elements */
234 	void					*bufs;
235 	struct spdk_nvmf_tcp_req		*reqs;
236 	struct nvme_tcp_pdu			*pdus;
237 	uint32_t				resource_count;
238 	uint32_t				recv_buf_size;
239 
240 	struct spdk_nvmf_tcp_port		*port;
241 
242 	/* IP address */
243 	char					initiator_addr[SPDK_NVMF_TRADDR_MAX_LEN];
244 	char					target_addr[SPDK_NVMF_TRADDR_MAX_LEN];
245 
246 	/* IP port */
247 	uint16_t				initiator_port;
248 	uint16_t				target_port;
249 
250 	/* Timer used to destroy qpair after detecting transport error issue if initiator does
251 	 *  not close the connection.
252 	 */
253 	struct spdk_poller			*timeout_poller;
254 
255 
256 	TAILQ_ENTRY(spdk_nvmf_tcp_qpair)	link;
257 };
258 
259 struct spdk_nvmf_tcp_control_msg {
260 	STAILQ_ENTRY(spdk_nvmf_tcp_control_msg) link;
261 };
262 
263 struct spdk_nvmf_tcp_control_msg_list {
264 	void *msg_buf;
265 	STAILQ_HEAD(, spdk_nvmf_tcp_control_msg) free_msgs;
266 };
267 
268 struct spdk_nvmf_tcp_poll_group {
269 	struct spdk_nvmf_transport_poll_group	group;
270 	struct spdk_sock_group			*sock_group;
271 
272 	TAILQ_HEAD(, spdk_nvmf_tcp_qpair)	qpairs;
273 	TAILQ_HEAD(, spdk_nvmf_tcp_qpair)	await_req;
274 
275 	struct spdk_io_channel			*accel_channel;
276 	struct spdk_nvmf_tcp_control_msg_list	*control_msg_list;
277 };
278 
279 struct spdk_nvmf_tcp_port {
280 	const struct spdk_nvme_transport_id	*trid;
281 	struct spdk_sock			*listen_sock;
282 	TAILQ_ENTRY(spdk_nvmf_tcp_port)		link;
283 };
284 
285 struct tcp_transport_opts {
286 	bool		c2h_success;
287 	uint16_t	control_msg_num;
288 	uint32_t	sock_priority;
289 };
290 
291 struct spdk_nvmf_tcp_transport {
292 	struct spdk_nvmf_transport		transport;
293 	struct tcp_transport_opts               tcp_opts;
294 
295 	pthread_mutex_t				lock;
296 
297 	TAILQ_HEAD(, spdk_nvmf_tcp_port)	ports;
298 };
299 
300 static const struct spdk_json_object_decoder tcp_transport_opts_decoder[] = {
301 	{
302 		"c2h_success", offsetof(struct tcp_transport_opts, c2h_success),
303 		spdk_json_decode_bool, true
304 	},
305 	{
306 		"control_msg_num", offsetof(struct tcp_transport_opts, control_msg_num),
307 		spdk_json_decode_uint16, true
308 	},
309 	{
310 		"sock_priority", offsetof(struct tcp_transport_opts, sock_priority),
311 		spdk_json_decode_uint32, true
312 	},
313 };
314 
315 static bool nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
316 				 struct spdk_nvmf_tcp_req *tcp_req);
317 static void nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group);
318 
319 static void _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
320 				    struct spdk_nvmf_tcp_req *tcp_req);
321 
322 static void
323 nvmf_tcp_req_set_state(struct spdk_nvmf_tcp_req *tcp_req,
324 		       enum spdk_nvmf_tcp_req_state state)
325 {
326 	struct spdk_nvmf_qpair *qpair;
327 	struct spdk_nvmf_tcp_qpair *tqpair;
328 
329 	qpair = tcp_req->req.qpair;
330 	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
331 
332 	assert(tqpair->state_cntr[tcp_req->state] > 0);
333 	tqpair->state_cntr[tcp_req->state]--;
334 	tqpair->state_cntr[state]++;
335 
336 	tcp_req->state = state;
337 }
338 
339 static inline struct nvme_tcp_pdu *
340 nvmf_tcp_req_pdu_init(struct spdk_nvmf_tcp_req *tcp_req)
341 {
342 	assert(tcp_req->pdu_in_use == false);
343 	tcp_req->pdu_in_use = true;
344 
345 	memset(tcp_req->pdu, 0, sizeof(*tcp_req->pdu));
346 	tcp_req->pdu->qpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
347 
348 	return tcp_req->pdu;
349 }
350 
351 static inline void
352 nvmf_tcp_req_pdu_fini(struct spdk_nvmf_tcp_req *tcp_req)
353 {
354 	tcp_req->pdu_in_use = false;
355 }
356 
357 static struct spdk_nvmf_tcp_req *
358 nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair)
359 {
360 	struct spdk_nvmf_tcp_req *tcp_req;
361 
362 	tcp_req = TAILQ_FIRST(&tqpair->tcp_req_free_queue);
363 	if (!tcp_req) {
364 		return NULL;
365 	}
366 
367 	memset(&tcp_req->rsp, 0, sizeof(tcp_req->rsp));
368 	tcp_req->h2c_offset = 0;
369 	tcp_req->has_incapsule_data = false;
370 	tcp_req->req.dif.dif_insert_or_strip = false;
371 
372 	TAILQ_REMOVE(&tqpair->tcp_req_free_queue, tcp_req, state_link);
373 	TAILQ_INSERT_TAIL(&tqpair->tcp_req_working_queue, tcp_req, state_link);
374 	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEW);
375 	return tcp_req;
376 }
377 
378 static inline void
379 nvmf_tcp_req_put(struct spdk_nvmf_tcp_qpair *tqpair, struct spdk_nvmf_tcp_req *tcp_req)
380 {
381 	TAILQ_REMOVE(&tqpair->tcp_req_working_queue, tcp_req, state_link);
382 	TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link);
383 	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_FREE);
384 }
385 
386 static void
387 nvmf_tcp_request_free(void *cb_arg)
388 {
389 	struct spdk_nvmf_tcp_transport *ttransport;
390 	struct spdk_nvmf_tcp_req *tcp_req = cb_arg;
391 
392 	assert(tcp_req != NULL);
393 
394 	SPDK_DEBUGLOG(nvmf_tcp, "tcp_req=%p will be freed\n", tcp_req);
395 	ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport,
396 				      struct spdk_nvmf_tcp_transport, transport);
397 	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED);
398 	nvmf_tcp_req_process(ttransport, tcp_req);
399 }
400 
401 static int
402 nvmf_tcp_req_free(struct spdk_nvmf_request *req)
403 {
404 	struct spdk_nvmf_tcp_req *tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req);
405 
406 	nvmf_tcp_request_free(tcp_req);
407 
408 	return 0;
409 }
410 
411 static void
412 nvmf_tcp_drain_state_queue(struct spdk_nvmf_tcp_qpair *tqpair,
413 			   enum spdk_nvmf_tcp_req_state state)
414 {
415 	struct spdk_nvmf_tcp_req *tcp_req, *req_tmp;
416 
417 	assert(state != TCP_REQUEST_STATE_FREE);
418 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) {
419 		if (state == tcp_req->state) {
420 			nvmf_tcp_request_free(tcp_req);
421 		}
422 	}
423 }
424 
425 static void
426 nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair)
427 {
428 	struct spdk_nvmf_tcp_req *tcp_req, *req_tmp;
429 
430 	nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
431 	nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEW);
432 
433 	/* Wipe the requests waiting for buffer from the global list */
434 	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) {
435 		if (tcp_req->state == TCP_REQUEST_STATE_NEED_BUFFER) {
436 			STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue, &tcp_req->req,
437 				      spdk_nvmf_request, buf_link);
438 		}
439 	}
440 
441 	nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER);
442 	nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_EXECUTING);
443 	nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
444 	nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_AWAITING_R2T_ACK);
445 }
446 
447 static void
448 nvmf_tcp_dump_qpair_req_contents(struct spdk_nvmf_tcp_qpair *tqpair)
449 {
450 	int i;
451 	struct spdk_nvmf_tcp_req *tcp_req;
452 
453 	SPDK_ERRLOG("Dumping contents of queue pair (QID %d)\n", tqpair->qpair.qid);
454 	for (i = 1; i < TCP_REQUEST_NUM_STATES; i++) {
455 		SPDK_ERRLOG("\tNum of requests in state[%d] = %u\n", i, tqpair->state_cntr[i]);
456 		TAILQ_FOREACH(tcp_req, &tqpair->tcp_req_working_queue, state_link) {
457 			if ((int)tcp_req->state == i) {
458 				SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->req.data_from_pool);
459 				SPDK_ERRLOG("\t\tRequest opcode: %d\n", tcp_req->req.cmd->nvmf_cmd.opcode);
460 			}
461 		}
462 	}
463 }
464 
465 static void
466 nvmf_tcp_qpair_destroy(struct spdk_nvmf_tcp_qpair *tqpair)
467 {
468 	int err = 0;
469 
470 	SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
471 
472 	err = spdk_sock_close(&tqpair->sock);
473 	assert(err == 0);
474 	nvmf_tcp_cleanup_all_states(tqpair);
475 
476 	if (tqpair->state_cntr[TCP_REQUEST_STATE_FREE] != tqpair->resource_count) {
477 		SPDK_ERRLOG("tqpair(%p) free tcp request num is %u but should be %u\n", tqpair,
478 			    tqpair->state_cntr[TCP_REQUEST_STATE_FREE],
479 			    tqpair->resource_count);
480 		err++;
481 	}
482 
483 	if (err > 0) {
484 		nvmf_tcp_dump_qpair_req_contents(tqpair);
485 	}
486 
487 	spdk_dma_free(tqpair->pdus);
488 	free(tqpair->reqs);
489 	spdk_free(tqpair->bufs);
490 	free(tqpair);
491 	SPDK_DEBUGLOG(nvmf_tcp, "Leave\n");
492 }
493 
494 static void
495 nvmf_tcp_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w)
496 {
497 	struct spdk_nvmf_tcp_transport	*ttransport;
498 	assert(w != NULL);
499 
500 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
501 	spdk_json_write_named_bool(w, "c2h_success", ttransport->tcp_opts.c2h_success);
502 	spdk_json_write_named_uint32(w, "sock_priority", ttransport->tcp_opts.sock_priority);
503 }
504 
505 static int
506 nvmf_tcp_destroy(struct spdk_nvmf_transport *transport,
507 		 spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
508 {
509 	struct spdk_nvmf_tcp_transport	*ttransport;
510 
511 	assert(transport != NULL);
512 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
513 
514 	pthread_mutex_destroy(&ttransport->lock);
515 	free(ttransport);
516 
517 	if (cb_fn) {
518 		cb_fn(cb_arg);
519 	}
520 	return 0;
521 }
522 
523 static struct spdk_nvmf_transport *
524 nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
525 {
526 	struct spdk_nvmf_tcp_transport *ttransport;
527 	uint32_t sge_count;
528 	uint32_t min_shared_buffers;
529 
530 	ttransport = calloc(1, sizeof(*ttransport));
531 	if (!ttransport) {
532 		return NULL;
533 	}
534 
535 	TAILQ_INIT(&ttransport->ports);
536 
537 	ttransport->transport.ops = &spdk_nvmf_transport_tcp;
538 
539 	ttransport->tcp_opts.c2h_success = SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION;
540 	ttransport->tcp_opts.sock_priority = SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY;
541 	ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM;
542 	if (opts->transport_specific != NULL &&
543 	    spdk_json_decode_object_relaxed(opts->transport_specific, tcp_transport_opts_decoder,
544 					    SPDK_COUNTOF(tcp_transport_opts_decoder),
545 					    &ttransport->tcp_opts)) {
546 		SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n");
547 		free(ttransport);
548 		return NULL;
549 	}
550 
551 	SPDK_NOTICELOG("*** TCP Transport Init ***\n");
552 
553 	SPDK_INFOLOG(nvmf_tcp, "*** TCP Transport Init ***\n"
554 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
555 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
556 		     "  in_capsule_data_size=%d, max_aq_depth=%d\n"
557 		     "  num_shared_buffers=%d, c2h_success=%d,\n"
558 		     "  dif_insert_or_strip=%d, sock_priority=%d\n"
559 		     "  abort_timeout_sec=%d, control_msg_num=%hu\n",
560 		     opts->max_queue_depth,
561 		     opts->max_io_size,
562 		     opts->max_qpairs_per_ctrlr - 1,
563 		     opts->io_unit_size,
564 		     opts->in_capsule_data_size,
565 		     opts->max_aq_depth,
566 		     opts->num_shared_buffers,
567 		     ttransport->tcp_opts.c2h_success,
568 		     opts->dif_insert_or_strip,
569 		     ttransport->tcp_opts.sock_priority,
570 		     opts->abort_timeout_sec,
571 		     ttransport->tcp_opts.control_msg_num);
572 
573 	if (ttransport->tcp_opts.sock_priority > SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY) {
574 		SPDK_ERRLOG("Unsupported socket_priority=%d, the current range is: 0 to %d\n"
575 			    "you can use man 7 socket to view the range of priority under SO_PRIORITY item\n",
576 			    ttransport->tcp_opts.sock_priority, SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY);
577 		free(ttransport);
578 		return NULL;
579 	}
580 
581 	if (ttransport->tcp_opts.control_msg_num == 0 &&
582 	    opts->in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) {
583 		SPDK_WARNLOG("TCP param control_msg_num can't be 0 if ICD is less than %u bytes. Using default value %u\n",
584 			     SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE, SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM);
585 		ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM;
586 	}
587 
588 	/* I/O unit size cannot be larger than max I/O size */
589 	if (opts->io_unit_size > opts->max_io_size) {
590 		opts->io_unit_size = opts->max_io_size;
591 	}
592 
593 	sge_count = opts->max_io_size / opts->io_unit_size;
594 	if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) {
595 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
596 		free(ttransport);
597 		return NULL;
598 	}
599 
600 	min_shared_buffers = spdk_env_get_core_count() * opts->buf_cache_size;
601 	if (min_shared_buffers > opts->num_shared_buffers) {
602 		SPDK_ERRLOG("There are not enough buffers to satisfy"
603 			    "per-poll group caches for each thread. (%" PRIu32 ")"
604 			    "supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers);
605 		SPDK_ERRLOG("Please specify a larger number of shared buffers\n");
606 		free(ttransport);
607 		return NULL;
608 	}
609 
610 	pthread_mutex_init(&ttransport->lock, NULL);
611 
612 	return &ttransport->transport;
613 }
614 
615 static int
616 nvmf_tcp_trsvcid_to_int(const char *trsvcid)
617 {
618 	unsigned long long ull;
619 	char *end = NULL;
620 
621 	ull = strtoull(trsvcid, &end, 10);
622 	if (end == NULL || end == trsvcid || *end != '\0') {
623 		return -1;
624 	}
625 
626 	/* Valid TCP/IP port numbers are in [0, 65535] */
627 	if (ull > 65535) {
628 		return -1;
629 	}
630 
631 	return (int)ull;
632 }
633 
634 /**
635  * Canonicalize a listen address trid.
636  */
637 static int
638 nvmf_tcp_canon_listen_trid(struct spdk_nvme_transport_id *canon_trid,
639 			   const struct spdk_nvme_transport_id *trid)
640 {
641 	int trsvcid_int;
642 
643 	trsvcid_int = nvmf_tcp_trsvcid_to_int(trid->trsvcid);
644 	if (trsvcid_int < 0) {
645 		return -EINVAL;
646 	}
647 
648 	memset(canon_trid, 0, sizeof(*canon_trid));
649 	spdk_nvme_trid_populate_transport(canon_trid, SPDK_NVME_TRANSPORT_TCP);
650 	canon_trid->adrfam = trid->adrfam;
651 	snprintf(canon_trid->traddr, sizeof(canon_trid->traddr), "%s", trid->traddr);
652 	snprintf(canon_trid->trsvcid, sizeof(canon_trid->trsvcid), "%d", trsvcid_int);
653 
654 	return 0;
655 }
656 
657 /**
658  * Find an existing listening port.
659  *
660  * Caller must hold ttransport->lock.
661  */
662 static struct spdk_nvmf_tcp_port *
663 nvmf_tcp_find_port(struct spdk_nvmf_tcp_transport *ttransport,
664 		   const struct spdk_nvme_transport_id *trid)
665 {
666 	struct spdk_nvme_transport_id canon_trid;
667 	struct spdk_nvmf_tcp_port *port;
668 
669 	if (nvmf_tcp_canon_listen_trid(&canon_trid, trid) != 0) {
670 		return NULL;
671 	}
672 
673 	TAILQ_FOREACH(port, &ttransport->ports, link) {
674 		if (spdk_nvme_transport_id_compare(&canon_trid, port->trid) == 0) {
675 			return port;
676 		}
677 	}
678 
679 	return NULL;
680 }
681 
682 static int
683 nvmf_tcp_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
684 		struct spdk_nvmf_listen_opts *listen_opts)
685 {
686 	struct spdk_nvmf_tcp_transport *ttransport;
687 	struct spdk_nvmf_tcp_port *port;
688 	int trsvcid_int;
689 	uint8_t adrfam;
690 	struct spdk_sock_opts opts;
691 
692 	if (!strlen(trid->trsvcid)) {
693 		SPDK_ERRLOG("Service id is required\n");
694 		return -EINVAL;
695 	}
696 
697 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
698 
699 	trsvcid_int = nvmf_tcp_trsvcid_to_int(trid->trsvcid);
700 	if (trsvcid_int < 0) {
701 		SPDK_ERRLOG("Invalid trsvcid '%s'\n", trid->trsvcid);
702 		return -EINVAL;
703 	}
704 
705 	pthread_mutex_lock(&ttransport->lock);
706 	port = calloc(1, sizeof(*port));
707 	if (!port) {
708 		SPDK_ERRLOG("Port allocation failed\n");
709 		pthread_mutex_unlock(&ttransport->lock);
710 		return -ENOMEM;
711 	}
712 
713 	port->trid = trid;
714 	opts.opts_size = sizeof(opts);
715 	spdk_sock_get_default_opts(&opts);
716 	opts.priority = ttransport->tcp_opts.sock_priority;
717 	port->listen_sock = spdk_sock_listen_ext(trid->traddr, trsvcid_int,
718 			    NULL, &opts);
719 	if (port->listen_sock == NULL) {
720 		SPDK_ERRLOG("spdk_sock_listen(%s, %d) failed: %s (%d)\n",
721 			    trid->traddr, trsvcid_int,
722 			    spdk_strerror(errno), errno);
723 		free(port);
724 		pthread_mutex_unlock(&ttransport->lock);
725 		return -errno;
726 	}
727 
728 	if (spdk_sock_is_ipv4(port->listen_sock)) {
729 		adrfam = SPDK_NVMF_ADRFAM_IPV4;
730 	} else if (spdk_sock_is_ipv6(port->listen_sock)) {
731 		adrfam = SPDK_NVMF_ADRFAM_IPV6;
732 	} else {
733 		SPDK_ERRLOG("Unhandled socket type\n");
734 		adrfam = 0;
735 	}
736 
737 	if (adrfam != trid->adrfam) {
738 		SPDK_ERRLOG("Socket address family mismatch\n");
739 		spdk_sock_close(&port->listen_sock);
740 		free(port);
741 		pthread_mutex_unlock(&ttransport->lock);
742 		return -EINVAL;
743 	}
744 
745 	SPDK_NOTICELOG("*** NVMe/TCP Target Listening on %s port %s ***\n",
746 		       trid->traddr, trid->trsvcid);
747 
748 	TAILQ_INSERT_TAIL(&ttransport->ports, port, link);
749 	pthread_mutex_unlock(&ttransport->lock);
750 	return 0;
751 }
752 
753 static void
754 nvmf_tcp_stop_listen(struct spdk_nvmf_transport *transport,
755 		     const struct spdk_nvme_transport_id *trid)
756 {
757 	struct spdk_nvmf_tcp_transport *ttransport;
758 	struct spdk_nvmf_tcp_port *port;
759 
760 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
761 
762 	SPDK_DEBUGLOG(nvmf_tcp, "Removing listen address %s port %s\n",
763 		      trid->traddr, trid->trsvcid);
764 
765 	pthread_mutex_lock(&ttransport->lock);
766 	port = nvmf_tcp_find_port(ttransport, trid);
767 	if (port) {
768 		TAILQ_REMOVE(&ttransport->ports, port, link);
769 		spdk_sock_close(&port->listen_sock);
770 		free(port);
771 	}
772 
773 	pthread_mutex_unlock(&ttransport->lock);
774 }
775 
776 static void nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair,
777 		enum nvme_tcp_pdu_recv_state state);
778 
779 static void
780 nvmf_tcp_qpair_disconnect(struct spdk_nvmf_tcp_qpair *tqpair)
781 {
782 	SPDK_DEBUGLOG(nvmf_tcp, "Disconnecting qpair %p\n", tqpair);
783 
784 	if (tqpair->state <= NVME_TCP_QPAIR_STATE_RUNNING) {
785 		tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
786 		nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
787 		spdk_poller_unregister(&tqpair->timeout_poller);
788 
789 		/* This will end up calling nvmf_tcp_close_qpair */
790 		spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL);
791 	}
792 }
793 
794 static void
795 _pdu_write_done(void *_pdu, int err)
796 {
797 	struct nvme_tcp_pdu			*pdu = _pdu;
798 	struct spdk_nvmf_tcp_qpair		*tqpair = pdu->qpair;
799 
800 	if (err != 0) {
801 		nvmf_tcp_qpair_disconnect(tqpair);
802 		return;
803 	}
804 
805 	assert(pdu->cb_fn != NULL);
806 	pdu->cb_fn(pdu->cb_arg);
807 }
808 
809 static void
810 _tcp_write_pdu(struct nvme_tcp_pdu *pdu)
811 {
812 	uint32_t mapped_length = 0;
813 	ssize_t rc;
814 	struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair;
815 
816 	pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, SPDK_COUNTOF(pdu->iov), pdu,
817 			       tqpair->host_hdgst_enable, tqpair->host_ddgst_enable,
818 			       &mapped_length);
819 	pdu->sock_req.cb_fn = _pdu_write_done;
820 	pdu->sock_req.cb_arg = pdu;
821 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP ||
822 	    pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ) {
823 		rc = spdk_sock_writev(tqpair->sock, pdu->iov, pdu->sock_req.iovcnt);
824 		if (rc == mapped_length) {
825 			_pdu_write_done(pdu, 0);
826 		} else {
827 			SPDK_ERRLOG("IC_RESP or TERM_REQ could not write to socket.\n");
828 			_pdu_write_done(pdu, -1);
829 		}
830 	} else {
831 		spdk_sock_writev_async(tqpair->sock, &pdu->sock_req);
832 	}
833 }
834 
835 static void
836 data_crc32_accel_done(void *cb_arg, int status)
837 {
838 	struct nvme_tcp_pdu *pdu = cb_arg;
839 
840 	if (spdk_unlikely(status)) {
841 		SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu);
842 		_pdu_write_done(pdu, status);
843 		return;
844 	}
845 
846 	pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
847 	MAKE_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
848 
849 	_tcp_write_pdu(pdu);
850 }
851 
852 static void
853 pdu_data_crc32_compute(struct nvme_tcp_pdu *pdu)
854 {
855 	struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair;
856 	uint32_t crc32c;
857 
858 	/* Data Digest */
859 	if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && tqpair->host_ddgst_enable) {
860 		/* Only suport this limitated case for the first step */
861 		if (spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)
862 				&& tqpair->group)) {
863 			spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->data_digest_crc32,
864 						  pdu->data_iov, pdu->data_iovcnt, 0, data_crc32_accel_done, pdu);
865 			return;
866 		}
867 
868 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
869 		MAKE_DIGEST_WORD(pdu->data_digest, crc32c);
870 	}
871 
872 	_tcp_write_pdu(pdu);
873 }
874 
875 static void
876 header_crc32_accel_done(void *cb_arg, int status)
877 {
878 	struct nvme_tcp_pdu *pdu = cb_arg;
879 
880 	pdu->header_digest_crc32 ^= SPDK_CRC32C_XOR;
881 	MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, pdu->header_digest_crc32);
882 	if (spdk_unlikely(status)) {
883 		SPDK_ERRLOG("Failed to compute header digest on pdu=%p\n", pdu);
884 		_pdu_write_done(pdu, status);
885 		return;
886 	}
887 
888 	pdu_data_crc32_compute(pdu);
889 }
890 
891 static void
892 nvmf_tcp_qpair_write_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
893 			 struct nvme_tcp_pdu *pdu,
894 			 nvme_tcp_qpair_xfer_complete_cb cb_fn,
895 			 void *cb_arg)
896 {
897 	int hlen;
898 
899 	assert(&tqpair->pdu_in_progress != pdu);
900 
901 	hlen = pdu->hdr.common.hlen;
902 	pdu->cb_fn = cb_fn;
903 	pdu->cb_arg = cb_arg;
904 	pdu->qpair = tqpair;
905 
906 	pdu->iov[0].iov_base = &pdu->hdr.raw;
907 	pdu->iov[0].iov_len = hlen;
908 
909 	/* Header Digest */
910 	if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->host_hdgst_enable && tqpair->group) {
911 		spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->header_digest_crc32,
912 					  pdu->iov, 1, 0, header_crc32_accel_done, pdu);
913 		return;
914 	}
915 
916 	pdu_data_crc32_compute(pdu);
917 }
918 
919 static int
920 nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)
921 {
922 	uint32_t i;
923 	struct spdk_nvmf_transport_opts *opts;
924 	uint32_t in_capsule_data_size;
925 
926 	opts = &tqpair->qpair.transport->opts;
927 
928 	in_capsule_data_size = opts->in_capsule_data_size;
929 	if (opts->dif_insert_or_strip) {
930 		in_capsule_data_size = SPDK_BDEV_BUF_SIZE_WITH_MD(in_capsule_data_size);
931 	}
932 
933 	tqpair->resource_count = opts->max_queue_depth;
934 
935 	tqpair->reqs = calloc(tqpair->resource_count, sizeof(*tqpair->reqs));
936 	if (!tqpair->reqs) {
937 		SPDK_ERRLOG("Unable to allocate reqs on tqpair=%p\n", tqpair);
938 		return -1;
939 	}
940 
941 	if (in_capsule_data_size) {
942 		tqpair->bufs = spdk_zmalloc(tqpair->resource_count * in_capsule_data_size, 0x1000,
943 					    NULL, SPDK_ENV_LCORE_ID_ANY,
944 					    SPDK_MALLOC_DMA);
945 		if (!tqpair->bufs) {
946 			SPDK_ERRLOG("Unable to allocate bufs on tqpair=%p.\n", tqpair);
947 			return -1;
948 		}
949 	}
950 
951 	/* Add addtional one member, which will be used for mgmt_pdu owned by the tqpair */
952 	tqpair->pdus = spdk_dma_malloc((tqpair->resource_count + 1) * sizeof(*tqpair->pdus), 0x1000, NULL);
953 	if (!tqpair->pdus) {
954 		SPDK_ERRLOG("Unable to allocate pdu pool on tqpair =%p.\n", tqpair);
955 		return -1;
956 	}
957 
958 	for (i = 0; i < tqpair->resource_count; i++) {
959 		struct spdk_nvmf_tcp_req *tcp_req = &tqpair->reqs[i];
960 
961 		tcp_req->ttag = i + 1;
962 		tcp_req->req.qpair = &tqpair->qpair;
963 
964 		tcp_req->pdu = &tqpair->pdus[i];
965 		tcp_req->pdu->qpair = tqpair;
966 
967 		/* Set up memory to receive commands */
968 		if (tqpair->bufs) {
969 			tcp_req->buf = (void *)((uintptr_t)tqpair->bufs + (i * in_capsule_data_size));
970 		}
971 
972 		/* Set the cmdn and rsp */
973 		tcp_req->req.rsp = (union nvmf_c2h_msg *)&tcp_req->rsp;
974 		tcp_req->req.cmd = (union nvmf_h2c_msg *)&tcp_req->cmd;
975 
976 		/* Initialize request state to FREE */
977 		tcp_req->state = TCP_REQUEST_STATE_FREE;
978 		TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link);
979 		tqpair->state_cntr[TCP_REQUEST_STATE_FREE]++;
980 	}
981 
982 	tqpair->mgmt_pdu = &tqpair->pdus[i];
983 	tqpair->mgmt_pdu->qpair = tqpair;
984 
985 	tqpair->recv_buf_size = (in_capsule_data_size + sizeof(struct spdk_nvme_tcp_cmd) + 2 *
986 				 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
987 
988 	return 0;
989 }
990 
991 static int
992 nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair)
993 {
994 	struct spdk_nvmf_tcp_qpair *tqpair;
995 
996 	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
997 
998 	SPDK_DEBUGLOG(nvmf_tcp, "New TCP Connection: %p\n", qpair);
999 
1000 	/* Initialise request state queues of the qpair */
1001 	TAILQ_INIT(&tqpair->tcp_req_free_queue);
1002 	TAILQ_INIT(&tqpair->tcp_req_working_queue);
1003 
1004 	tqpair->host_hdgst_enable = true;
1005 	tqpair->host_ddgst_enable = true;
1006 
1007 	return 0;
1008 }
1009 
1010 static int
1011 nvmf_tcp_qpair_sock_init(struct spdk_nvmf_tcp_qpair *tqpair)
1012 {
1013 	int rc;
1014 
1015 	/* set low water mark */
1016 	rc = spdk_sock_set_recvlowat(tqpair->sock, sizeof(struct spdk_nvme_tcp_common_pdu_hdr));
1017 	if (rc != 0) {
1018 		SPDK_ERRLOG("spdk_sock_set_recvlowat() failed\n");
1019 		return rc;
1020 	}
1021 
1022 	return 0;
1023 }
1024 
1025 static void
1026 nvmf_tcp_handle_connect(struct spdk_nvmf_transport *transport,
1027 			struct spdk_nvmf_tcp_port *port,
1028 			struct spdk_sock *sock)
1029 {
1030 	struct spdk_nvmf_tcp_qpair *tqpair;
1031 	int rc;
1032 
1033 	SPDK_DEBUGLOG(nvmf_tcp, "New connection accepted on %s port %s\n",
1034 		      port->trid->traddr, port->trid->trsvcid);
1035 
1036 	tqpair = calloc(1, sizeof(struct spdk_nvmf_tcp_qpair));
1037 	if (tqpair == NULL) {
1038 		SPDK_ERRLOG("Could not allocate new connection.\n");
1039 		spdk_sock_close(&sock);
1040 		return;
1041 	}
1042 
1043 	tqpair->sock = sock;
1044 	tqpair->state_cntr[TCP_REQUEST_STATE_FREE] = 0;
1045 	tqpair->port = port;
1046 	tqpair->qpair.transport = transport;
1047 
1048 	rc = spdk_sock_getaddr(tqpair->sock, tqpair->target_addr,
1049 			       sizeof(tqpair->target_addr), &tqpair->target_port,
1050 			       tqpair->initiator_addr, sizeof(tqpair->initiator_addr),
1051 			       &tqpair->initiator_port);
1052 	if (rc < 0) {
1053 		SPDK_ERRLOG("spdk_sock_getaddr() failed of tqpair=%p\n", tqpair);
1054 		nvmf_tcp_qpair_destroy(tqpair);
1055 		return;
1056 	}
1057 
1058 	spdk_nvmf_tgt_new_qpair(transport->tgt, &tqpair->qpair);
1059 }
1060 
1061 static uint32_t
1062 nvmf_tcp_port_accept(struct spdk_nvmf_transport *transport, struct spdk_nvmf_tcp_port *port)
1063 {
1064 	struct spdk_sock *sock;
1065 	uint32_t count = 0;
1066 	int i;
1067 
1068 	for (i = 0; i < NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME; i++) {
1069 		sock = spdk_sock_accept(port->listen_sock);
1070 		if (sock == NULL) {
1071 			break;
1072 		}
1073 		count++;
1074 		nvmf_tcp_handle_connect(transport, port, sock);
1075 	}
1076 
1077 	return count;
1078 }
1079 
1080 static uint32_t
1081 nvmf_tcp_accept(struct spdk_nvmf_transport *transport)
1082 {
1083 	struct spdk_nvmf_tcp_transport *ttransport;
1084 	struct spdk_nvmf_tcp_port *port;
1085 	uint32_t count = 0;
1086 
1087 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
1088 
1089 	TAILQ_FOREACH(port, &ttransport->ports, link) {
1090 		count += nvmf_tcp_port_accept(transport, port);
1091 	}
1092 
1093 	return count;
1094 }
1095 
1096 static void
1097 nvmf_tcp_discover(struct spdk_nvmf_transport *transport,
1098 		  struct spdk_nvme_transport_id *trid,
1099 		  struct spdk_nvmf_discovery_log_page_entry *entry)
1100 {
1101 	entry->trtype = SPDK_NVMF_TRTYPE_TCP;
1102 	entry->adrfam = trid->adrfam;
1103 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED;
1104 
1105 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
1106 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
1107 
1108 	entry->tsas.tcp.sectype = SPDK_NVME_TCP_SECURITY_NONE;
1109 }
1110 
1111 static struct spdk_nvmf_tcp_control_msg_list *
1112 nvmf_tcp_control_msg_list_create(uint16_t num_messages)
1113 {
1114 	struct spdk_nvmf_tcp_control_msg_list *list;
1115 	struct spdk_nvmf_tcp_control_msg *msg;
1116 	uint16_t i;
1117 
1118 	list = calloc(1, sizeof(*list));
1119 	if (!list) {
1120 		SPDK_ERRLOG("Failed to allocate memory for list structure\n");
1121 		return NULL;
1122 	}
1123 
1124 	list->msg_buf = spdk_zmalloc(num_messages * SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE,
1125 				     NVMF_DATA_BUFFER_ALIGNMENT, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
1126 	if (!list->msg_buf) {
1127 		SPDK_ERRLOG("Failed to allocate memory for control message buffers\n");
1128 		free(list);
1129 		return NULL;
1130 	}
1131 
1132 	STAILQ_INIT(&list->free_msgs);
1133 
1134 	for (i = 0; i < num_messages; i++) {
1135 		msg = (struct spdk_nvmf_tcp_control_msg *)((char *)list->msg_buf + i *
1136 				SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE);
1137 		STAILQ_INSERT_TAIL(&list->free_msgs, msg, link);
1138 	}
1139 
1140 	return list;
1141 }
1142 
1143 static void
1144 nvmf_tcp_control_msg_list_free(struct spdk_nvmf_tcp_control_msg_list *list)
1145 {
1146 	if (!list) {
1147 		return;
1148 	}
1149 
1150 	spdk_free(list->msg_buf);
1151 	free(list);
1152 }
1153 
1154 static struct spdk_nvmf_transport_poll_group *
1155 nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
1156 {
1157 	struct spdk_nvmf_tcp_transport	*ttransport;
1158 	struct spdk_nvmf_tcp_poll_group *tgroup;
1159 
1160 	tgroup = calloc(1, sizeof(*tgroup));
1161 	if (!tgroup) {
1162 		return NULL;
1163 	}
1164 
1165 	tgroup->sock_group = spdk_sock_group_create(&tgroup->group);
1166 	if (!tgroup->sock_group) {
1167 		goto cleanup;
1168 	}
1169 
1170 	TAILQ_INIT(&tgroup->qpairs);
1171 	TAILQ_INIT(&tgroup->await_req);
1172 
1173 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
1174 
1175 	if (transport->opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) {
1176 		SPDK_DEBUGLOG(nvmf_tcp, "ICD %u is less than min required for admin/fabric commands (%u). "
1177 			      "Creating control messages list\n", transport->opts.in_capsule_data_size,
1178 			      SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE);
1179 		tgroup->control_msg_list = nvmf_tcp_control_msg_list_create(ttransport->tcp_opts.control_msg_num);
1180 		if (!tgroup->control_msg_list) {
1181 			goto cleanup;
1182 		}
1183 	}
1184 
1185 	tgroup->accel_channel = spdk_accel_engine_get_io_channel();
1186 	if (spdk_unlikely(!tgroup->accel_channel)) {
1187 		SPDK_ERRLOG("Cannot create accel_channel for tgroup=%p\n", tgroup);
1188 		goto cleanup;
1189 	}
1190 
1191 	return &tgroup->group;
1192 
1193 cleanup:
1194 	nvmf_tcp_poll_group_destroy(&tgroup->group);
1195 	return NULL;
1196 }
1197 
1198 static struct spdk_nvmf_transport_poll_group *
1199 nvmf_tcp_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
1200 {
1201 	struct spdk_nvmf_tcp_qpair *tqpair;
1202 	struct spdk_sock_group *group = NULL;
1203 	int rc;
1204 
1205 	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
1206 	rc = spdk_sock_get_optimal_sock_group(tqpair->sock, &group);
1207 	if (!rc && group != NULL) {
1208 		return spdk_sock_group_get_ctx(group);
1209 	}
1210 
1211 	return NULL;
1212 }
1213 
1214 static void
1215 nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
1216 {
1217 	struct spdk_nvmf_tcp_poll_group *tgroup;
1218 
1219 	tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
1220 	spdk_sock_group_close(&tgroup->sock_group);
1221 	if (tgroup->control_msg_list) {
1222 		nvmf_tcp_control_msg_list_free(tgroup->control_msg_list);
1223 	}
1224 
1225 	if (tgroup->accel_channel) {
1226 		spdk_put_io_channel(tgroup->accel_channel);
1227 	}
1228 
1229 	free(tgroup);
1230 }
1231 
1232 static void
1233 nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair,
1234 			      enum nvme_tcp_pdu_recv_state state)
1235 {
1236 	if (tqpair->recv_state == state) {
1237 		SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n",
1238 			    tqpair, state);
1239 		return;
1240 	}
1241 
1242 	if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) {
1243 		/* When leaving the await req state, move the qpair to the main list */
1244 		TAILQ_REMOVE(&tqpair->group->await_req, tqpair, link);
1245 		TAILQ_INSERT_TAIL(&tqpair->group->qpairs, tqpair, link);
1246 	}
1247 
1248 	SPDK_DEBUGLOG(nvmf_tcp, "tqpair(%p) recv state=%d\n", tqpair, state);
1249 	tqpair->recv_state = state;
1250 
1251 	switch (state) {
1252 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
1253 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
1254 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
1255 		break;
1256 	case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ:
1257 		TAILQ_REMOVE(&tqpair->group->qpairs, tqpair, link);
1258 		TAILQ_INSERT_TAIL(&tqpair->group->await_req, tqpair, link);
1259 		break;
1260 	case NVME_TCP_PDU_RECV_STATE_ERROR:
1261 	case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
1262 		memset(&tqpair->pdu_in_progress, 0, sizeof(tqpair->pdu_in_progress));
1263 		break;
1264 	default:
1265 		SPDK_ERRLOG("The state(%d) is invalid\n", state);
1266 		abort();
1267 		break;
1268 	}
1269 }
1270 
1271 static int
1272 nvmf_tcp_qpair_handle_timeout(void *ctx)
1273 {
1274 	struct spdk_nvmf_tcp_qpair *tqpair = ctx;
1275 
1276 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1277 
1278 	SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair,
1279 		    SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT);
1280 
1281 	nvmf_tcp_qpair_disconnect(tqpair);
1282 	return SPDK_POLLER_BUSY;
1283 }
1284 
1285 static void
1286 nvmf_tcp_send_c2h_term_req_complete(void *cb_arg)
1287 {
1288 	struct spdk_nvmf_tcp_qpair *tqpair = (struct spdk_nvmf_tcp_qpair *)cb_arg;
1289 
1290 	if (!tqpair->timeout_poller) {
1291 		tqpair->timeout_poller = SPDK_POLLER_REGISTER(nvmf_tcp_qpair_handle_timeout, tqpair,
1292 					 SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT * 1000000);
1293 	}
1294 }
1295 
1296 static void
1297 nvmf_tcp_send_c2h_term_req(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1298 			   enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset)
1299 {
1300 	struct nvme_tcp_pdu *rsp_pdu;
1301 	struct spdk_nvme_tcp_term_req_hdr *c2h_term_req;
1302 	uint32_t c2h_term_req_hdr_len = sizeof(*c2h_term_req);
1303 	uint32_t copy_len;
1304 
1305 	rsp_pdu = tqpair->mgmt_pdu;
1306 
1307 	c2h_term_req = &rsp_pdu->hdr.term_req;
1308 	c2h_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
1309 	c2h_term_req->common.hlen = c2h_term_req_hdr_len;
1310 
1311 	if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
1312 	    (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
1313 		DSET32(&c2h_term_req->fei, error_offset);
1314 	}
1315 
1316 	copy_len = spdk_min(pdu->hdr.common.hlen, SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
1317 
1318 	/* Copy the error info into the buffer */
1319 	memcpy((uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, pdu->hdr.raw, copy_len);
1320 	nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, copy_len);
1321 
1322 	/* Contain the header of the wrong received pdu */
1323 	c2h_term_req->common.plen = c2h_term_req->common.hlen + copy_len;
1324 	nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1325 	nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_send_c2h_term_req_complete, tqpair);
1326 }
1327 
1328 static void
1329 nvmf_tcp_capsule_cmd_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
1330 				struct spdk_nvmf_tcp_qpair *tqpair,
1331 				struct nvme_tcp_pdu *pdu)
1332 {
1333 	struct spdk_nvmf_tcp_req *tcp_req;
1334 
1335 	assert(pdu->psh_valid_bytes == pdu->psh_len);
1336 	assert(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
1337 
1338 	tcp_req = nvmf_tcp_req_get(tqpair);
1339 	if (!tcp_req) {
1340 		/* Directly return and make the allocation retry again */
1341 		if (tqpair->state_cntr[TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST] > 0) {
1342 			return;
1343 		}
1344 
1345 		/* The host sent more commands than the maximum queue depth. */
1346 		SPDK_ERRLOG("Cannot allocate tcp_req on tqpair=%p\n", tqpair);
1347 		nvmf_tcp_qpair_disconnect(tqpair);
1348 		return;
1349 	}
1350 
1351 	pdu->req = tcp_req;
1352 	assert(tcp_req->state == TCP_REQUEST_STATE_NEW);
1353 	nvmf_tcp_req_process(ttransport, tcp_req);
1354 }
1355 
1356 static void
1357 nvmf_tcp_capsule_cmd_payload_handle(struct spdk_nvmf_tcp_transport *ttransport,
1358 				    struct spdk_nvmf_tcp_qpair *tqpair,
1359 				    struct nvme_tcp_pdu *pdu)
1360 {
1361 	struct spdk_nvmf_tcp_req *tcp_req;
1362 	struct spdk_nvme_tcp_cmd *capsule_cmd;
1363 	uint32_t error_offset = 0;
1364 	enum spdk_nvme_tcp_term_req_fes fes;
1365 
1366 	capsule_cmd = &pdu->hdr.capsule_cmd;
1367 	tcp_req = pdu->req;
1368 	assert(tcp_req != NULL);
1369 	if (capsule_cmd->common.pdo > SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET) {
1370 		SPDK_ERRLOG("Expected ICReq capsule_cmd pdu offset <= %d, got %c\n",
1371 			    SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET, capsule_cmd->common.pdo);
1372 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1373 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo);
1374 		goto err;
1375 	}
1376 
1377 	nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1378 	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
1379 	nvmf_tcp_req_process(ttransport, tcp_req);
1380 
1381 	return;
1382 err:
1383 	nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1384 }
1385 
1386 static int
1387 nvmf_tcp_find_req_in_state(struct spdk_nvmf_tcp_qpair *tqpair,
1388 			   enum spdk_nvmf_tcp_req_state state,
1389 			   uint16_t cid, uint16_t tag,
1390 			   struct spdk_nvmf_tcp_req **req)
1391 {
1392 	struct spdk_nvmf_tcp_req *tcp_req = NULL;
1393 
1394 	TAILQ_FOREACH(tcp_req, &tqpair->tcp_req_working_queue, state_link) {
1395 		if (tcp_req->state != state) {
1396 			continue;
1397 		}
1398 
1399 		if (tcp_req->req.cmd->nvme_cmd.cid != cid) {
1400 			continue;
1401 		}
1402 
1403 		if (tcp_req->ttag == tag) {
1404 			*req = tcp_req;
1405 			return 0;
1406 		}
1407 
1408 		*req = NULL;
1409 		return -1;
1410 	}
1411 
1412 	/* Didn't find it, but not an error */
1413 	*req = NULL;
1414 	return 0;
1415 }
1416 
1417 static void
1418 nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
1419 			     struct spdk_nvmf_tcp_qpair *tqpair,
1420 			     struct nvme_tcp_pdu *pdu)
1421 {
1422 	struct spdk_nvmf_tcp_req *tcp_req;
1423 	uint32_t error_offset = 0;
1424 	enum spdk_nvme_tcp_term_req_fes fes = 0;
1425 	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
1426 	int rc;
1427 
1428 	h2c_data = &pdu->hdr.h2c_data;
1429 
1430 	SPDK_DEBUGLOG(nvmf_tcp, "tqpair=%p, r2t_info: datao=%u, datal=%u, cccid=%u, ttag=%u\n",
1431 		      tqpair, h2c_data->datao, h2c_data->datal, h2c_data->cccid, h2c_data->ttag);
1432 
1433 	rc = nvmf_tcp_find_req_in_state(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
1434 					h2c_data->cccid, h2c_data->ttag, &tcp_req);
1435 	if (rc == 0 && tcp_req == NULL) {
1436 		rc = nvmf_tcp_find_req_in_state(tqpair, TCP_REQUEST_STATE_AWAITING_R2T_ACK, h2c_data->cccid,
1437 						h2c_data->ttag, &tcp_req);
1438 	}
1439 
1440 	if (!tcp_req) {
1441 		SPDK_DEBUGLOG(nvmf_tcp, "tcp_req is not found for tqpair=%p\n", tqpair);
1442 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER;
1443 		if (rc == 0) {
1444 			error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, cccid);
1445 		} else {
1446 			error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, ttag);
1447 		}
1448 		goto err;
1449 	}
1450 
1451 	if (tcp_req->h2c_offset != h2c_data->datao) {
1452 		SPDK_DEBUGLOG(nvmf_tcp,
1453 			      "tcp_req(%p), tqpair=%p, expected data offset %u, but data offset is %u\n",
1454 			      tcp_req, tqpair, tcp_req->h2c_offset, h2c_data->datao);
1455 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1456 		goto err;
1457 	}
1458 
1459 	if ((h2c_data->datao + h2c_data->datal) > tcp_req->req.length) {
1460 		SPDK_DEBUGLOG(nvmf_tcp,
1461 			      "tcp_req(%p), tqpair=%p,  (datao=%u + datal=%u) execeeds requested length=%u\n",
1462 			      tcp_req, tqpair, h2c_data->datao, h2c_data->datal, tcp_req->req.length);
1463 		fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1464 		goto err;
1465 	}
1466 
1467 	pdu->req = tcp_req;
1468 
1469 	if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
1470 		pdu->dif_ctx = &tcp_req->req.dif.dif_ctx;
1471 	}
1472 
1473 	nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
1474 				  h2c_data->datao, h2c_data->datal);
1475 	nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1476 	return;
1477 
1478 err:
1479 	nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1480 }
1481 
1482 static void
1483 nvmf_tcp_send_capsule_resp_pdu(struct spdk_nvmf_tcp_req *tcp_req,
1484 			       struct spdk_nvmf_tcp_qpair *tqpair)
1485 {
1486 	struct nvme_tcp_pdu *rsp_pdu;
1487 	struct spdk_nvme_tcp_rsp *capsule_resp;
1488 
1489 	SPDK_DEBUGLOG(nvmf_tcp, "enter, tqpair=%p\n", tqpair);
1490 
1491 	rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req);
1492 	assert(rsp_pdu != NULL);
1493 
1494 	capsule_resp = &rsp_pdu->hdr.capsule_resp;
1495 	capsule_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
1496 	capsule_resp->common.plen = capsule_resp->common.hlen = sizeof(*capsule_resp);
1497 	capsule_resp->rccqe = tcp_req->req.rsp->nvme_cpl;
1498 	if (tqpair->host_hdgst_enable) {
1499 		capsule_resp->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1500 		capsule_resp->common.plen += SPDK_NVME_TCP_DIGEST_LEN;
1501 	}
1502 
1503 	nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_request_free, tcp_req);
1504 }
1505 
1506 static void
1507 nvmf_tcp_pdu_c2h_data_complete(void *cb_arg)
1508 {
1509 	struct spdk_nvmf_tcp_req *tcp_req = cb_arg;
1510 	struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair,
1511 					     struct spdk_nvmf_tcp_qpair, qpair);
1512 	struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(
1513 				tcp_req->req.qpair->transport, struct spdk_nvmf_tcp_transport, transport);
1514 
1515 	assert(tqpair != NULL);
1516 
1517 	if (spdk_unlikely(tcp_req->pdu->rw_offset < tcp_req->req.length)) {
1518 		SPDK_DEBUGLOG(nvmf_tcp, "sending another C2H part, offset %u length %u\n", tcp_req->pdu->rw_offset,
1519 			      tcp_req->req.length);
1520 		_nvmf_tcp_send_c2h_data(tqpair, tcp_req);
1521 		return;
1522 	}
1523 
1524 	if (ttransport->tcp_opts.c2h_success) {
1525 		nvmf_tcp_request_free(tcp_req);
1526 	} else {
1527 		nvmf_tcp_req_pdu_fini(tcp_req);
1528 		nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair);
1529 	}
1530 }
1531 
1532 static void
1533 nvmf_tcp_r2t_complete(void *cb_arg)
1534 {
1535 	struct spdk_nvmf_tcp_req *tcp_req = cb_arg;
1536 	struct spdk_nvmf_tcp_transport *ttransport;
1537 
1538 	nvmf_tcp_req_pdu_fini(tcp_req);
1539 
1540 	ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport,
1541 				      struct spdk_nvmf_tcp_transport, transport);
1542 
1543 	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
1544 
1545 	if (tcp_req->h2c_offset == tcp_req->req.length) {
1546 		nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
1547 		nvmf_tcp_req_process(ttransport, tcp_req);
1548 	}
1549 }
1550 
1551 static void
1552 nvmf_tcp_send_r2t_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
1553 		      struct spdk_nvmf_tcp_req *tcp_req)
1554 {
1555 	struct nvme_tcp_pdu *rsp_pdu;
1556 	struct spdk_nvme_tcp_r2t_hdr *r2t;
1557 
1558 	rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req);
1559 	assert(rsp_pdu != NULL);
1560 
1561 	r2t = &rsp_pdu->hdr.r2t;
1562 	r2t->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
1563 	r2t->common.plen = r2t->common.hlen = sizeof(*r2t);
1564 
1565 	if (tqpair->host_hdgst_enable) {
1566 		r2t->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1567 		r2t->common.plen += SPDK_NVME_TCP_DIGEST_LEN;
1568 	}
1569 
1570 	r2t->cccid = tcp_req->req.cmd->nvme_cmd.cid;
1571 	r2t->ttag = tcp_req->ttag;
1572 	r2t->r2to = tcp_req->h2c_offset;
1573 	r2t->r2tl = tcp_req->req.length;
1574 
1575 	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_R2T_ACK);
1576 
1577 	SPDK_DEBUGLOG(nvmf_tcp,
1578 		      "tcp_req(%p) on tqpair(%p), r2t_info: cccid=%u, ttag=%u, r2to=%u, r2tl=%u\n",
1579 		      tcp_req, tqpair, r2t->cccid, r2t->ttag, r2t->r2to, r2t->r2tl);
1580 	nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_r2t_complete, tcp_req);
1581 }
1582 
1583 static void
1584 nvmf_tcp_h2c_data_payload_handle(struct spdk_nvmf_tcp_transport *ttransport,
1585 				 struct spdk_nvmf_tcp_qpair *tqpair,
1586 				 struct nvme_tcp_pdu *pdu)
1587 {
1588 	struct spdk_nvmf_tcp_req *tcp_req;
1589 
1590 	tcp_req = pdu->req;
1591 	assert(tcp_req != NULL);
1592 
1593 	SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
1594 
1595 	tcp_req->h2c_offset += pdu->data_len;
1596 
1597 	nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1598 
1599 	/* Wait for all of the data to arrive AND for the initial R2T PDU send to be
1600 	 * acknowledged before moving on. */
1601 	if (tcp_req->h2c_offset == tcp_req->req.length &&
1602 	    tcp_req->state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER) {
1603 		nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
1604 		nvmf_tcp_req_process(ttransport, tcp_req);
1605 	}
1606 }
1607 
1608 static void
1609 nvmf_tcp_h2c_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *h2c_term_req)
1610 {
1611 	SPDK_ERRLOG("Error info of pdu(%p): %s\n", h2c_term_req,
1612 		    spdk_nvmf_tcp_term_req_fes_str[h2c_term_req->fes]);
1613 	if ((h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
1614 	    (h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
1615 		SPDK_DEBUGLOG(nvmf_tcp, "The offset from the start of the PDU header is %u\n",
1616 			      DGET32(h2c_term_req->fei));
1617 	}
1618 }
1619 
1620 static void
1621 nvmf_tcp_h2c_term_req_hdr_handle(struct spdk_nvmf_tcp_qpair *tqpair,
1622 				 struct nvme_tcp_pdu *pdu)
1623 {
1624 	struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req;
1625 	uint32_t error_offset = 0;
1626 	enum spdk_nvme_tcp_term_req_fes fes;
1627 
1628 	if (h2c_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) {
1629 		SPDK_ERRLOG("Fatal Error Status(FES) is unknown for h2c_term_req pdu=%p\n", pdu);
1630 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1631 		error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes);
1632 		goto end;
1633 	}
1634 
1635 	/* set the data buffer */
1636 	nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + h2c_term_req->common.hlen,
1637 			      h2c_term_req->common.plen - h2c_term_req->common.hlen);
1638 	nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1639 	return;
1640 end:
1641 	nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1642 }
1643 
1644 static void
1645 nvmf_tcp_h2c_term_req_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair,
1646 				     struct nvme_tcp_pdu *pdu)
1647 {
1648 	struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req;
1649 
1650 	nvmf_tcp_h2c_term_req_dump(h2c_term_req);
1651 	nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
1652 }
1653 
1654 static void
1655 nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair,
1656 			    struct spdk_nvmf_tcp_transport *ttransport)
1657 {
1658 	int rc = 0;
1659 	struct nvme_tcp_pdu *pdu;
1660 	uint32_t crc32c, error_offset = 0;
1661 	enum spdk_nvme_tcp_term_req_fes fes;
1662 
1663 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1664 	pdu = &tqpair->pdu_in_progress;
1665 
1666 	SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
1667 	/* check data digest if need */
1668 	if (pdu->ddgst_enable) {
1669 		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
1670 		rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
1671 		if (rc == 0) {
1672 			SPDK_ERRLOG("Data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1673 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1674 			nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1675 			return;
1676 
1677 		}
1678 	}
1679 
1680 	switch (pdu->hdr.common.pdu_type) {
1681 	case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
1682 		nvmf_tcp_capsule_cmd_payload_handle(ttransport, tqpair, pdu);
1683 		break;
1684 	case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA:
1685 		nvmf_tcp_h2c_data_payload_handle(ttransport, tqpair, pdu);
1686 		break;
1687 
1688 	case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ:
1689 		nvmf_tcp_h2c_term_req_payload_handle(tqpair, pdu);
1690 		break;
1691 
1692 	default:
1693 		/* The code should not go to here */
1694 		SPDK_ERRLOG("The code should not go to here\n");
1695 		break;
1696 	}
1697 }
1698 
1699 static void
1700 nvmf_tcp_send_icresp_complete(void *cb_arg)
1701 {
1702 	struct spdk_nvmf_tcp_qpair *tqpair = cb_arg;
1703 
1704 	tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
1705 }
1706 
1707 static void
1708 nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport,
1709 		      struct spdk_nvmf_tcp_qpair *tqpair,
1710 		      struct nvme_tcp_pdu *pdu)
1711 {
1712 	struct spdk_nvme_tcp_ic_req *ic_req = &pdu->hdr.ic_req;
1713 	struct nvme_tcp_pdu *rsp_pdu;
1714 	struct spdk_nvme_tcp_ic_resp *ic_resp;
1715 	uint32_t error_offset = 0;
1716 	enum spdk_nvme_tcp_term_req_fes fes;
1717 
1718 	/* Only PFV 0 is defined currently */
1719 	if (ic_req->pfv != 0) {
1720 		SPDK_ERRLOG("Expected ICReq PFV %u, got %u\n", 0u, ic_req->pfv);
1721 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1722 		error_offset = offsetof(struct spdk_nvme_tcp_ic_req, pfv);
1723 		goto end;
1724 	}
1725 
1726 	/* MAXR2T is 0's based */
1727 	SPDK_DEBUGLOG(nvmf_tcp, "maxr2t =%u\n", (ic_req->maxr2t + 1u));
1728 
1729 	tqpair->host_hdgst_enable = ic_req->dgst.bits.hdgst_enable ? true : false;
1730 	if (!tqpair->host_hdgst_enable) {
1731 		tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
1732 	}
1733 
1734 	tqpair->host_ddgst_enable = ic_req->dgst.bits.ddgst_enable ? true : false;
1735 	if (!tqpair->host_ddgst_enable) {
1736 		tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
1737 	}
1738 
1739 	tqpair->recv_buf_size = spdk_max(tqpair->recv_buf_size, MIN_SOCK_PIPE_SIZE);
1740 	/* Now that we know whether digests are enabled, properly size the receive buffer */
1741 	if (spdk_sock_set_recvbuf(tqpair->sock, tqpair->recv_buf_size) < 0) {
1742 		SPDK_WARNLOG("Unable to allocate enough memory for receive buffer on tqpair=%p with size=%d\n",
1743 			     tqpair,
1744 			     tqpair->recv_buf_size);
1745 		/* Not fatal. */
1746 	}
1747 
1748 	tqpair->cpda = spdk_min(ic_req->hpda, SPDK_NVME_TCP_CPDA_MAX);
1749 	SPDK_DEBUGLOG(nvmf_tcp, "cpda of tqpair=(%p) is : %u\n", tqpair, tqpair->cpda);
1750 
1751 	rsp_pdu = tqpair->mgmt_pdu;
1752 
1753 	ic_resp = &rsp_pdu->hdr.ic_resp;
1754 	ic_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1755 	ic_resp->common.hlen = ic_resp->common.plen =  sizeof(*ic_resp);
1756 	ic_resp->pfv = 0;
1757 	ic_resp->cpda = tqpair->cpda;
1758 	ic_resp->maxh2cdata = ttransport->transport.opts.max_io_size;
1759 	ic_resp->dgst.bits.hdgst_enable = tqpair->host_hdgst_enable ? 1 : 0;
1760 	ic_resp->dgst.bits.ddgst_enable = tqpair->host_ddgst_enable ? 1 : 0;
1761 
1762 	SPDK_DEBUGLOG(nvmf_tcp, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable);
1763 	SPDK_DEBUGLOG(nvmf_tcp, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable);
1764 
1765 	tqpair->state = NVME_TCP_QPAIR_STATE_INITIALIZING;
1766 	nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_send_icresp_complete, tqpair);
1767 	nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1768 	return;
1769 end:
1770 	nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1771 }
1772 
1773 static void
1774 nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair,
1775 			struct spdk_nvmf_tcp_transport *ttransport)
1776 {
1777 	struct nvme_tcp_pdu *pdu;
1778 	int rc;
1779 	uint32_t crc32c, error_offset = 0;
1780 	enum spdk_nvme_tcp_term_req_fes fes;
1781 
1782 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1783 	pdu = &tqpair->pdu_in_progress;
1784 
1785 	SPDK_DEBUGLOG(nvmf_tcp, "pdu type of tqpair(%p) is %d\n", tqpair,
1786 		      pdu->hdr.common.pdu_type);
1787 	/* check header digest if needed */
1788 	if (pdu->has_hdgst) {
1789 		SPDK_DEBUGLOG(nvmf_tcp, "Compare the header of pdu=%p on tqpair=%p\n", pdu, tqpair);
1790 		crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
1791 		rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
1792 		if (rc == 0) {
1793 			SPDK_ERRLOG("Header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1794 			fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
1795 			nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1796 			return;
1797 
1798 		}
1799 	}
1800 
1801 	switch (pdu->hdr.common.pdu_type) {
1802 	case SPDK_NVME_TCP_PDU_TYPE_IC_REQ:
1803 		nvmf_tcp_icreq_handle(ttransport, tqpair, pdu);
1804 		break;
1805 	case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
1806 		nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_REQ);
1807 		break;
1808 	case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA:
1809 		nvmf_tcp_h2c_data_hdr_handle(ttransport, tqpair, pdu);
1810 		break;
1811 
1812 	case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ:
1813 		nvmf_tcp_h2c_term_req_hdr_handle(tqpair, pdu);
1814 		break;
1815 
1816 	default:
1817 		SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->pdu_in_progress.hdr.common.pdu_type);
1818 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1819 		error_offset = 1;
1820 		nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1821 		break;
1822 	}
1823 }
1824 
1825 static void
1826 nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
1827 {
1828 	struct nvme_tcp_pdu *pdu;
1829 	uint32_t error_offset = 0;
1830 	enum spdk_nvme_tcp_term_req_fes fes;
1831 	uint8_t expected_hlen, pdo;
1832 	bool plen_error = false, pdo_error = false;
1833 
1834 	assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1835 	pdu = &tqpair->pdu_in_progress;
1836 
1837 	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ) {
1838 		if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
1839 			SPDK_ERRLOG("Already received ICreq PDU, and reject this pdu=%p\n", pdu);
1840 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
1841 			goto err;
1842 		}
1843 		expected_hlen = sizeof(struct spdk_nvme_tcp_ic_req);
1844 		if (pdu->hdr.common.plen != expected_hlen) {
1845 			plen_error = true;
1846 		}
1847 	} else {
1848 		if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
1849 			SPDK_ERRLOG("The TCP/IP connection is not negotitated\n");
1850 			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
1851 			goto err;
1852 		}
1853 
1854 		switch (pdu->hdr.common.pdu_type) {
1855 		case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
1856 			expected_hlen = sizeof(struct spdk_nvme_tcp_cmd);
1857 			pdo = pdu->hdr.common.pdo;
1858 			if ((tqpair->cpda != 0) && (pdo % ((tqpair->cpda + 1) << 2) != 0)) {
1859 				pdo_error = true;
1860 				break;
1861 			}
1862 
1863 			if (pdu->hdr.common.plen < expected_hlen) {
1864 				plen_error = true;
1865 			}
1866 			break;
1867 		case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA:
1868 			expected_hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
1869 			pdo = pdu->hdr.common.pdo;
1870 			if ((tqpair->cpda != 0) && (pdo % ((tqpair->cpda + 1) << 2) != 0)) {
1871 				pdo_error = true;
1872 				break;
1873 			}
1874 			if (pdu->hdr.common.plen < expected_hlen) {
1875 				plen_error = true;
1876 			}
1877 			break;
1878 
1879 		case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ:
1880 			expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
1881 			if ((pdu->hdr.common.plen <= expected_hlen) ||
1882 			    (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
1883 				plen_error = true;
1884 			}
1885 			break;
1886 
1887 		default:
1888 			SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", pdu->hdr.common.pdu_type);
1889 			fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1890 			error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
1891 			goto err;
1892 		}
1893 	}
1894 
1895 	if (pdu->hdr.common.hlen != expected_hlen) {
1896 		SPDK_ERRLOG("PDU type=0x%02x, Expected ICReq header length %u, got %u on tqpair=%p\n",
1897 			    pdu->hdr.common.pdu_type,
1898 			    expected_hlen, pdu->hdr.common.hlen, tqpair);
1899 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1900 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen);
1901 		goto err;
1902 	} else if (pdo_error) {
1903 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1904 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo);
1905 	} else if (plen_error) {
1906 		fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1907 		error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen);
1908 		goto err;
1909 	} else {
1910 		nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1911 		nvme_tcp_pdu_calc_psh_len(&tqpair->pdu_in_progress, tqpair->host_hdgst_enable);
1912 		return;
1913 	}
1914 err:
1915 	nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1916 }
1917 
1918 static int
1919 nvmf_tcp_pdu_payload_insert_dif(struct nvme_tcp_pdu *pdu, uint32_t read_offset,
1920 				int read_len)
1921 {
1922 	int rc;
1923 
1924 	rc = spdk_dif_generate_stream(pdu->data_iov, pdu->data_iovcnt,
1925 				      read_offset, read_len, pdu->dif_ctx);
1926 	if (rc != 0) {
1927 		SPDK_ERRLOG("DIF generate failed\n");
1928 	}
1929 
1930 	return rc;
1931 }
1932 
1933 static int
1934 nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
1935 {
1936 	int rc = 0;
1937 	struct nvme_tcp_pdu *pdu;
1938 	enum nvme_tcp_pdu_recv_state prev_state;
1939 	uint32_t data_len;
1940 	struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport,
1941 			struct spdk_nvmf_tcp_transport, transport);
1942 
1943 	/* The loop here is to allow for several back-to-back state changes. */
1944 	do {
1945 		prev_state = tqpair->recv_state;
1946 		SPDK_DEBUGLOG(nvmf_tcp, "tqpair(%p) recv pdu entering state %d\n", tqpair, prev_state);
1947 
1948 		pdu = &tqpair->pdu_in_progress;
1949 		switch (tqpair->recv_state) {
1950 		/* Wait for the common header  */
1951 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
1952 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
1953 			if (spdk_unlikely(tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING)) {
1954 				return rc;
1955 			}
1956 
1957 			rc = nvme_tcp_read_data(tqpair->sock,
1958 						sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
1959 						(void *)&pdu->hdr.common + pdu->ch_valid_bytes);
1960 			if (rc < 0) {
1961 				SPDK_DEBUGLOG(nvmf_tcp, "will disconnect tqpair=%p\n", tqpair);
1962 				return NVME_TCP_PDU_FATAL;
1963 			} else if (rc > 0) {
1964 				pdu->ch_valid_bytes += rc;
1965 				spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, 0, rc, 0, 0);
1966 				if (spdk_likely(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY)) {
1967 					nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1968 				}
1969 			}
1970 
1971 			if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
1972 				return NVME_TCP_PDU_IN_PROGRESS;
1973 			}
1974 
1975 			/* The command header of this PDU has now been read from the socket. */
1976 			nvmf_tcp_pdu_ch_handle(tqpair);
1977 			break;
1978 		/* Wait for the pdu specific header  */
1979 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
1980 			rc = nvme_tcp_read_data(tqpair->sock,
1981 						pdu->psh_len - pdu->psh_valid_bytes,
1982 						(void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
1983 			if (rc < 0) {
1984 				return NVME_TCP_PDU_FATAL;
1985 			} else if (rc > 0) {
1986 				spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE,
1987 						  0, rc, 0, 0);
1988 				pdu->psh_valid_bytes += rc;
1989 			}
1990 
1991 			if (pdu->psh_valid_bytes < pdu->psh_len) {
1992 				return NVME_TCP_PDU_IN_PROGRESS;
1993 			}
1994 
1995 			/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
1996 			nvmf_tcp_pdu_psh_handle(tqpair, ttransport);
1997 			break;
1998 		/* Wait for the req slot */
1999 		case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ:
2000 			nvmf_tcp_capsule_cmd_hdr_handle(ttransport, tqpair, pdu);
2001 			break;
2002 		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
2003 			/* check whether the data is valid, if not we just return */
2004 			if (!pdu->data_len) {
2005 				return NVME_TCP_PDU_IN_PROGRESS;
2006 			}
2007 
2008 			data_len = pdu->data_len;
2009 			/* data digest */
2010 			if (spdk_unlikely((pdu->hdr.common.pdu_type != SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) &&
2011 					  tqpair->host_ddgst_enable)) {
2012 				data_len += SPDK_NVME_TCP_DIGEST_LEN;
2013 				pdu->ddgst_enable = true;
2014 			}
2015 
2016 			rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
2017 			if (rc < 0) {
2018 				return NVME_TCP_PDU_FATAL;
2019 			}
2020 			pdu->rw_offset += rc;
2021 
2022 			if (spdk_unlikely(pdu->dif_ctx != NULL)) {
2023 				rc = nvmf_tcp_pdu_payload_insert_dif(pdu, pdu->rw_offset - rc, rc);
2024 				if (rc != 0) {
2025 					return NVME_TCP_PDU_FATAL;
2026 				}
2027 			}
2028 
2029 			if (pdu->rw_offset < data_len) {
2030 				return NVME_TCP_PDU_IN_PROGRESS;
2031 			}
2032 
2033 			/* All of this PDU has now been read from the socket. */
2034 			nvmf_tcp_pdu_payload_handle(tqpair, ttransport);
2035 			break;
2036 		case NVME_TCP_PDU_RECV_STATE_ERROR:
2037 			if (!spdk_sock_is_connected(tqpair->sock)) {
2038 				return NVME_TCP_PDU_FATAL;
2039 			}
2040 			break;
2041 		default:
2042 			assert(0);
2043 			SPDK_ERRLOG("code should not come to here");
2044 			break;
2045 		}
2046 	} while (tqpair->recv_state != prev_state);
2047 
2048 	return rc;
2049 }
2050 
2051 static inline void *
2052 nvmf_tcp_control_msg_get(struct spdk_nvmf_tcp_control_msg_list *list)
2053 {
2054 	struct spdk_nvmf_tcp_control_msg *msg;
2055 
2056 	assert(list);
2057 
2058 	msg = STAILQ_FIRST(&list->free_msgs);
2059 	if (!msg) {
2060 		SPDK_DEBUGLOG(nvmf_tcp, "Out of control messages\n");
2061 		return NULL;
2062 	}
2063 	STAILQ_REMOVE_HEAD(&list->free_msgs, link);
2064 	return msg;
2065 }
2066 
2067 static inline void
2068 nvmf_tcp_control_msg_put(struct spdk_nvmf_tcp_control_msg_list *list, void *_msg)
2069 {
2070 	struct spdk_nvmf_tcp_control_msg *msg = _msg;
2071 
2072 	assert(list);
2073 	STAILQ_INSERT_HEAD(&list->free_msgs, msg, link);
2074 }
2075 
2076 static int
2077 nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req,
2078 		       struct spdk_nvmf_transport *transport,
2079 		       struct spdk_nvmf_transport_poll_group *group)
2080 {
2081 	struct spdk_nvmf_request		*req = &tcp_req->req;
2082 	struct spdk_nvme_cmd			*cmd;
2083 	struct spdk_nvme_cpl			*rsp;
2084 	struct spdk_nvme_sgl_descriptor		*sgl;
2085 	struct spdk_nvmf_tcp_poll_group		*tgroup;
2086 	uint32_t				length;
2087 
2088 	cmd = &req->cmd->nvme_cmd;
2089 	rsp = &req->rsp->nvme_cpl;
2090 	sgl = &cmd->dptr.sgl1;
2091 
2092 	length = sgl->unkeyed.length;
2093 
2094 	if (sgl->generic.type == SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK &&
2095 	    sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_TRANSPORT) {
2096 		if (length > transport->opts.max_io_size) {
2097 			SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n",
2098 				    length, transport->opts.max_io_size);
2099 			rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
2100 			return -1;
2101 		}
2102 
2103 		/* fill request length and populate iovs */
2104 		req->length = length;
2105 
2106 		SPDK_DEBUGLOG(nvmf_tcp, "Data requested length= 0x%x\n", length);
2107 
2108 		if (spdk_unlikely(req->dif.dif_insert_or_strip)) {
2109 			req->dif.orig_length = length;
2110 			length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
2111 			req->dif.elba_length = length;
2112 		}
2113 
2114 		if (spdk_nvmf_request_get_buffers(req, group, transport, length)) {
2115 			/* No available buffers. Queue this request up. */
2116 			SPDK_DEBUGLOG(nvmf_tcp, "No available large data buffers. Queueing request %p\n",
2117 				      tcp_req);
2118 			return 0;
2119 		}
2120 
2121 		/* backward compatible */
2122 		req->data = req->iov[0].iov_base;
2123 
2124 		SPDK_DEBUGLOG(nvmf_tcp, "Request %p took %d buffer/s from central pool, and data=%p\n",
2125 			      tcp_req, req->iovcnt, req->data);
2126 
2127 		return 0;
2128 	} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
2129 		   sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
2130 		uint64_t offset = sgl->address;
2131 		uint32_t max_len = transport->opts.in_capsule_data_size;
2132 		assert(tcp_req->has_incapsule_data);
2133 
2134 		SPDK_DEBUGLOG(nvmf_tcp, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
2135 			      offset, length);
2136 
2137 		if (offset > max_len) {
2138 			SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n",
2139 				    offset, max_len);
2140 			rsp->status.sc = SPDK_NVME_SC_INVALID_SGL_OFFSET;
2141 			return -1;
2142 		}
2143 		max_len -= (uint32_t)offset;
2144 
2145 		if (spdk_unlikely(length > max_len)) {
2146 			/* According to the SPEC we should support ICD up to 8192 bytes for admin and fabric commands */
2147 			if (length <= SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE &&
2148 			    (cmd->opc == SPDK_NVME_OPC_FABRIC || req->qpair->qid == 0)) {
2149 
2150 				/* Get a buffer from dedicated list */
2151 				SPDK_DEBUGLOG(nvmf_tcp, "Getting a buffer from control msg list\n");
2152 				tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
2153 				assert(tgroup->control_msg_list);
2154 				req->data = nvmf_tcp_control_msg_get(tgroup->control_msg_list);
2155 				if (!req->data) {
2156 					/* No available buffers. Queue this request up. */
2157 					SPDK_DEBUGLOG(nvmf_tcp, "No available ICD buffers. Queueing request %p\n", tcp_req);
2158 					return 0;
2159 				}
2160 			} else {
2161 				SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n",
2162 					    length, max_len);
2163 				rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
2164 				return -1;
2165 			}
2166 		} else {
2167 			req->data = tcp_req->buf;
2168 		}
2169 
2170 		req->length = length;
2171 		req->data_from_pool = false;
2172 
2173 		if (spdk_unlikely(req->dif.dif_insert_or_strip)) {
2174 			length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
2175 			req->dif.elba_length = length;
2176 		}
2177 
2178 		req->iov[0].iov_base = req->data;
2179 		req->iov[0].iov_len = length;
2180 		req->iovcnt = 1;
2181 
2182 		return 0;
2183 	}
2184 
2185 	SPDK_ERRLOG("Invalid NVMf I/O Command SGL:  Type 0x%x, Subtype 0x%x\n",
2186 		    sgl->generic.type, sgl->generic.subtype);
2187 	rsp->status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
2188 	return -1;
2189 }
2190 
2191 static inline enum spdk_nvme_media_error_status_code
2192 nvmf_tcp_dif_error_to_compl_status(uint8_t err_type) {
2193 	enum spdk_nvme_media_error_status_code result;
2194 
2195 	switch (err_type)
2196 	{
2197 	case SPDK_DIF_REFTAG_ERROR:
2198 		result = SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR;
2199 		break;
2200 	case SPDK_DIF_APPTAG_ERROR:
2201 		result = SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR;
2202 		break;
2203 	case SPDK_DIF_GUARD_ERROR:
2204 		result = SPDK_NVME_SC_GUARD_CHECK_ERROR;
2205 		break;
2206 	default:
2207 		SPDK_UNREACHABLE();
2208 		break;
2209 	}
2210 
2211 	return result;
2212 }
2213 
2214 static void
2215 _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
2216 			struct spdk_nvmf_tcp_req *tcp_req)
2217 {
2218 	struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(
2219 				tqpair->qpair.transport, struct spdk_nvmf_tcp_transport, transport);
2220 	struct nvme_tcp_pdu *rsp_pdu;
2221 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
2222 	uint32_t plen, pdo, alignment;
2223 	int rc;
2224 
2225 	SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
2226 
2227 	rsp_pdu = tcp_req->pdu;
2228 	assert(rsp_pdu != NULL);
2229 	assert(tcp_req->pdu_in_use);
2230 
2231 	c2h_data = &rsp_pdu->hdr.c2h_data;
2232 	c2h_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
2233 	plen = c2h_data->common.hlen = sizeof(*c2h_data);
2234 
2235 	if (tqpair->host_hdgst_enable) {
2236 		plen += SPDK_NVME_TCP_DIGEST_LEN;
2237 		c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
2238 	}
2239 
2240 	/* set the psh */
2241 	c2h_data->cccid = tcp_req->req.cmd->nvme_cmd.cid;
2242 	c2h_data->datal = tcp_req->req.length - tcp_req->pdu->rw_offset;
2243 	c2h_data->datao = tcp_req->pdu->rw_offset;
2244 
2245 	/* set the padding */
2246 	rsp_pdu->padding_len = 0;
2247 	pdo = plen;
2248 	if (tqpair->cpda) {
2249 		alignment = (tqpair->cpda + 1) << 2;
2250 		if (plen % alignment != 0) {
2251 			pdo = (plen + alignment) / alignment * alignment;
2252 			rsp_pdu->padding_len = pdo - plen;
2253 			plen = pdo;
2254 		}
2255 	}
2256 
2257 	c2h_data->common.pdo = pdo;
2258 	plen += c2h_data->datal;
2259 	if (tqpair->host_ddgst_enable) {
2260 		c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
2261 		plen += SPDK_NVME_TCP_DIGEST_LEN;
2262 	}
2263 
2264 	c2h_data->common.plen = plen;
2265 
2266 	if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
2267 		rsp_pdu->dif_ctx = &tcp_req->req.dif.dif_ctx;
2268 	}
2269 
2270 	nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
2271 				  c2h_data->datao, c2h_data->datal);
2272 
2273 
2274 	c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
2275 	if (ttransport->tcp_opts.c2h_success) {
2276 		c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS;
2277 	}
2278 
2279 	if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
2280 		struct spdk_nvme_cpl *rsp = &tcp_req->req.rsp->nvme_cpl;
2281 		struct spdk_dif_error err_blk = {};
2282 		uint32_t mapped_length = 0;
2283 		uint32_t available_iovs = SPDK_COUNTOF(rsp_pdu->iov);
2284 		uint32_t ddgst_len = 0;
2285 
2286 		if (tqpair->host_ddgst_enable) {
2287 			/* Data digest consumes additional iov entry */
2288 			available_iovs--;
2289 			/* plen needs to be updated since nvme_tcp_build_iovs compares expected and actual plen */
2290 			ddgst_len = SPDK_NVME_TCP_DIGEST_LEN;
2291 			c2h_data->common.plen -= ddgst_len;
2292 		}
2293 		/* Temp call to estimate if data can be described by limited number of iovs.
2294 		 * iov vector will be rebuilt in nvmf_tcp_qpair_write_pdu */
2295 		nvme_tcp_build_iovs(rsp_pdu->iov, available_iovs, rsp_pdu, tqpair->host_hdgst_enable,
2296 				    false, &mapped_length);
2297 
2298 		if (mapped_length != c2h_data->common.plen) {
2299 			c2h_data->datal = mapped_length - (c2h_data->common.plen - c2h_data->datal);
2300 			SPDK_DEBUGLOG(nvmf_tcp,
2301 				      "Part C2H, data_len %u (of %u), PDU len %u, updated PDU len %u, offset %u\n",
2302 				      c2h_data->datal, tcp_req->req.length, c2h_data->common.plen, mapped_length, rsp_pdu->rw_offset);
2303 			c2h_data->common.plen = mapped_length;
2304 
2305 			/* Rebuild pdu->data_iov since data length is changed */
2306 			nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt, c2h_data->datao,
2307 						  c2h_data->datal);
2308 
2309 			c2h_data->common.flags &= ~(SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU |
2310 						    SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS);
2311 		}
2312 
2313 		c2h_data->common.plen += ddgst_len;
2314 
2315 		assert(rsp_pdu->rw_offset <= tcp_req->req.length);
2316 
2317 		rc = spdk_dif_verify_stream(rsp_pdu->data_iov, rsp_pdu->data_iovcnt,
2318 					    0, rsp_pdu->data_len, rsp_pdu->dif_ctx, &err_blk);
2319 		if (rc != 0) {
2320 			SPDK_ERRLOG("DIF error detected. type=%d, offset=%" PRIu32 "\n",
2321 				    err_blk.err_type, err_blk.err_offset);
2322 			rsp->status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
2323 			rsp->status.sc = nvmf_tcp_dif_error_to_compl_status(err_blk.err_type);
2324 			nvmf_tcp_req_pdu_fini(tcp_req);
2325 			nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair);
2326 			return;
2327 		}
2328 	}
2329 
2330 	rsp_pdu->rw_offset += c2h_data->datal;
2331 	nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_pdu_c2h_data_complete, tcp_req);
2332 }
2333 
2334 static void
2335 nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
2336 		       struct spdk_nvmf_tcp_req *tcp_req)
2337 {
2338 	nvmf_tcp_req_pdu_init(tcp_req);
2339 	_nvmf_tcp_send_c2h_data(tqpair, tcp_req);
2340 }
2341 
2342 static int
2343 request_transfer_out(struct spdk_nvmf_request *req)
2344 {
2345 	struct spdk_nvmf_tcp_req	*tcp_req;
2346 	struct spdk_nvmf_qpair		*qpair;
2347 	struct spdk_nvmf_tcp_qpair	*tqpair;
2348 	struct spdk_nvme_cpl		*rsp;
2349 
2350 	SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
2351 
2352 	qpair = req->qpair;
2353 	rsp = &req->rsp->nvme_cpl;
2354 	tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req);
2355 
2356 	/* Advance our sq_head pointer */
2357 	if (qpair->sq_head == qpair->sq_head_max) {
2358 		qpair->sq_head = 0;
2359 	} else {
2360 		qpair->sq_head++;
2361 	}
2362 	rsp->sqhd = qpair->sq_head;
2363 
2364 	tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
2365 	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
2366 	if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
2367 		nvmf_tcp_send_c2h_data(tqpair, tcp_req);
2368 	} else {
2369 		nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair);
2370 	}
2371 
2372 	return 0;
2373 }
2374 
2375 static void
2376 nvmf_tcp_set_incapsule_data(struct spdk_nvmf_tcp_qpair *tqpair,
2377 			    struct spdk_nvmf_tcp_req *tcp_req)
2378 {
2379 	struct nvme_tcp_pdu *pdu;
2380 	uint32_t plen = 0;
2381 
2382 	pdu = &tqpair->pdu_in_progress;
2383 	plen = pdu->hdr.common.hlen;
2384 
2385 	if (tqpair->host_hdgst_enable) {
2386 		plen += SPDK_NVME_TCP_DIGEST_LEN;
2387 	}
2388 
2389 	if (pdu->hdr.common.plen != plen) {
2390 		tcp_req->has_incapsule_data = true;
2391 	}
2392 }
2393 
2394 static bool
2395 nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
2396 		     struct spdk_nvmf_tcp_req *tcp_req)
2397 {
2398 	struct spdk_nvmf_tcp_qpair		*tqpair;
2399 	int					rc;
2400 	enum spdk_nvmf_tcp_req_state		prev_state;
2401 	bool					progress = false;
2402 	struct spdk_nvmf_transport		*transport = &ttransport->transport;
2403 	struct spdk_nvmf_transport_poll_group	*group;
2404 	struct spdk_nvmf_tcp_poll_group		*tgroup;
2405 
2406 	tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
2407 	group = &tqpair->group->group;
2408 	assert(tcp_req->state != TCP_REQUEST_STATE_FREE);
2409 
2410 	/* If the qpair is not active, we need to abort the outstanding requests. */
2411 	if (tqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
2412 		if (tcp_req->state == TCP_REQUEST_STATE_NEED_BUFFER) {
2413 			STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link);
2414 		}
2415 		nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED);
2416 	}
2417 
2418 	/* The loop here is to allow for several back-to-back state changes. */
2419 	do {
2420 		prev_state = tcp_req->state;
2421 
2422 		SPDK_DEBUGLOG(nvmf_tcp, "Request %p entering state %d on tqpair=%p\n", tcp_req, prev_state,
2423 			      tqpair);
2424 
2425 		switch (tcp_req->state) {
2426 		case TCP_REQUEST_STATE_FREE:
2427 			/* Some external code must kick a request into TCP_REQUEST_STATE_NEW
2428 			 * to escape this state. */
2429 			break;
2430 		case TCP_REQUEST_STATE_NEW:
2431 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEW, 0, 0, (uintptr_t)tcp_req, 0);
2432 
2433 			/* copy the cmd from the receive pdu */
2434 			tcp_req->cmd = tqpair->pdu_in_progress.hdr.capsule_cmd.ccsqe;
2435 
2436 			if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->req.dif.dif_ctx))) {
2437 				tcp_req->req.dif.dif_insert_or_strip = true;
2438 				tqpair->pdu_in_progress.dif_ctx = &tcp_req->req.dif.dif_ctx;
2439 			}
2440 
2441 			/* The next state transition depends on the data transfer needs of this request. */
2442 			tcp_req->req.xfer = spdk_nvmf_req_get_xfer(&tcp_req->req);
2443 
2444 			if (spdk_unlikely(tcp_req->req.xfer == SPDK_NVME_DATA_BIDIRECTIONAL)) {
2445 				tcp_req->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2446 				tcp_req->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SC_INVALID_OPCODE;
2447 				nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
2448 				SPDK_DEBUGLOG(nvmf_tcp, "Request %p: invalid xfer type (BIDIRECTIONAL)\n", tcp_req);
2449 				break;
2450 			}
2451 
2452 			/* If no data to transfer, ready to execute. */
2453 			if (tcp_req->req.xfer == SPDK_NVME_DATA_NONE) {
2454 				/* Reset the tqpair receving pdu state */
2455 				nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
2456 				nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
2457 				break;
2458 			}
2459 
2460 			nvmf_tcp_set_incapsule_data(tqpair, tcp_req);
2461 
2462 			if (!tcp_req->has_incapsule_data) {
2463 				nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
2464 			}
2465 
2466 			nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEED_BUFFER);
2467 			STAILQ_INSERT_TAIL(&group->pending_buf_queue, &tcp_req->req, buf_link);
2468 			break;
2469 		case TCP_REQUEST_STATE_NEED_BUFFER:
2470 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 0, 0, (uintptr_t)tcp_req, 0);
2471 
2472 			assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE);
2473 
2474 			if (!tcp_req->has_incapsule_data && (&tcp_req->req != STAILQ_FIRST(&group->pending_buf_queue))) {
2475 				SPDK_DEBUGLOG(nvmf_tcp,
2476 					      "Not the first element to wait for the buf for tcp_req(%p) on tqpair=%p\n",
2477 					      tcp_req, tqpair);
2478 				/* This request needs to wait in line to obtain a buffer */
2479 				break;
2480 			}
2481 
2482 			/* Try to get a data buffer */
2483 			rc = nvmf_tcp_req_parse_sgl(tcp_req, transport, group);
2484 			if (rc < 0) {
2485 				STAILQ_REMOVE_HEAD(&group->pending_buf_queue, buf_link);
2486 				/* Reset the tqpair receving pdu state */
2487 				nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
2488 				nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
2489 				break;
2490 			}
2491 
2492 			if (!tcp_req->req.data) {
2493 				SPDK_DEBUGLOG(nvmf_tcp, "No buffer allocated for tcp_req(%p) on tqpair(%p\n)",
2494 					      tcp_req, tqpair);
2495 				/* No buffers available. */
2496 				break;
2497 			}
2498 
2499 			STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link);
2500 
2501 			/* If data is transferring from host to controller, we need to do a transfer from the host. */
2502 			if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
2503 				if (tcp_req->req.data_from_pool) {
2504 					SPDK_DEBUGLOG(nvmf_tcp, "Sending R2T for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);
2505 					nvmf_tcp_send_r2t_pdu(tqpair, tcp_req);
2506 				} else {
2507 					struct nvme_tcp_pdu *pdu;
2508 
2509 					nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
2510 
2511 					pdu = &tqpair->pdu_in_progress;
2512 					SPDK_DEBUGLOG(nvmf_tcp, "Not need to send r2t for tcp_req(%p) on tqpair=%p\n", tcp_req,
2513 						      tqpair);
2514 					/* No need to send r2t, contained in the capsuled data */
2515 					nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
2516 								  0, tcp_req->req.length);
2517 					nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
2518 				}
2519 				break;
2520 			}
2521 
2522 			nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
2523 			break;
2524 		case TCP_REQUEST_STATE_AWAITING_R2T_ACK:
2525 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, 0, 0, (uintptr_t)tcp_req, 0);
2526 			/* The R2T completion or the h2c data incoming will kick it out of this state. */
2527 			break;
2528 		case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
2529 
2530 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 0, 0,
2531 					  (uintptr_t)tcp_req, 0);
2532 			/* Some external code must kick a request into TCP_REQUEST_STATE_READY_TO_EXECUTE
2533 			 * to escape this state. */
2534 			break;
2535 		case TCP_REQUEST_STATE_READY_TO_EXECUTE:
2536 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, (uintptr_t)tcp_req, 0);
2537 
2538 			if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
2539 				assert(tcp_req->req.dif.elba_length >= tcp_req->req.length);
2540 				tcp_req->req.length = tcp_req->req.dif.elba_length;
2541 			}
2542 
2543 			nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTING);
2544 			spdk_nvmf_request_exec(&tcp_req->req);
2545 			break;
2546 		case TCP_REQUEST_STATE_EXECUTING:
2547 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTING, 0, 0, (uintptr_t)tcp_req, 0);
2548 			/* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED
2549 			 * to escape this state. */
2550 			break;
2551 		case TCP_REQUEST_STATE_EXECUTED:
2552 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, 0, 0, (uintptr_t)tcp_req, 0);
2553 
2554 			if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
2555 				tcp_req->req.length = tcp_req->req.dif.orig_length;
2556 			}
2557 
2558 			nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
2559 			break;
2560 		case TCP_REQUEST_STATE_READY_TO_COMPLETE:
2561 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, 0, 0, (uintptr_t)tcp_req, 0);
2562 			rc = request_transfer_out(&tcp_req->req);
2563 			assert(rc == 0); /* No good way to handle this currently */
2564 			break;
2565 		case TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
2566 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 0, 0,
2567 					  (uintptr_t)tcp_req,
2568 					  0);
2569 			/* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED
2570 			 * to escape this state. */
2571 			break;
2572 		case TCP_REQUEST_STATE_COMPLETED:
2573 			spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, 0);
2574 			if (tcp_req->req.data_from_pool) {
2575 				spdk_nvmf_request_free_buffers(&tcp_req->req, group, transport);
2576 			} else if (spdk_unlikely(tcp_req->has_incapsule_data && (tcp_req->cmd.opc == SPDK_NVME_OPC_FABRIC ||
2577 						 tqpair->qpair.qid == 0) && tcp_req->req.length > transport->opts.in_capsule_data_size)) {
2578 				tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
2579 				assert(tgroup->control_msg_list);
2580 				SPDK_DEBUGLOG(nvmf_tcp, "Put buf to control msg list\n");
2581 				nvmf_tcp_control_msg_put(tgroup->control_msg_list, tcp_req->req.data);
2582 			}
2583 			tcp_req->req.length = 0;
2584 			tcp_req->req.iovcnt = 0;
2585 			tcp_req->req.data = NULL;
2586 
2587 			nvmf_tcp_req_pdu_fini(tcp_req);
2588 
2589 			nvmf_tcp_req_put(tqpair, tcp_req);
2590 			break;
2591 		case TCP_REQUEST_NUM_STATES:
2592 		default:
2593 			assert(0);
2594 			break;
2595 		}
2596 
2597 		if (tcp_req->state != prev_state) {
2598 			progress = true;
2599 		}
2600 	} while (tcp_req->state != prev_state);
2601 
2602 	return progress;
2603 }
2604 
2605 static void
2606 nvmf_tcp_sock_cb(void *arg, struct spdk_sock_group *group, struct spdk_sock *sock)
2607 {
2608 	struct spdk_nvmf_tcp_qpair *tqpair = arg;
2609 	int rc;
2610 
2611 	assert(tqpair != NULL);
2612 	rc = nvmf_tcp_sock_process(tqpair);
2613 
2614 	/* If there was a new socket error, disconnect */
2615 	if (rc < 0) {
2616 		nvmf_tcp_qpair_disconnect(tqpair);
2617 	}
2618 }
2619 
2620 static int
2621 nvmf_tcp_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2622 			struct spdk_nvmf_qpair *qpair)
2623 {
2624 	struct spdk_nvmf_tcp_poll_group	*tgroup;
2625 	struct spdk_nvmf_tcp_qpair	*tqpair;
2626 	int				rc;
2627 
2628 	tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
2629 	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
2630 
2631 	rc = spdk_sock_group_add_sock(tgroup->sock_group, tqpair->sock,
2632 				      nvmf_tcp_sock_cb, tqpair);
2633 	if (rc != 0) {
2634 		SPDK_ERRLOG("Could not add sock to sock_group: %s (%d)\n",
2635 			    spdk_strerror(errno), errno);
2636 		return -1;
2637 	}
2638 
2639 	rc =  nvmf_tcp_qpair_sock_init(tqpair);
2640 	if (rc != 0) {
2641 		SPDK_ERRLOG("Cannot set sock opt for tqpair=%p\n", tqpair);
2642 		return -1;
2643 	}
2644 
2645 	rc = nvmf_tcp_qpair_init(&tqpair->qpair);
2646 	if (rc < 0) {
2647 		SPDK_ERRLOG("Cannot init tqpair=%p\n", tqpair);
2648 		return -1;
2649 	}
2650 
2651 	rc = nvmf_tcp_qpair_init_mem_resource(tqpair);
2652 	if (rc < 0) {
2653 		SPDK_ERRLOG("Cannot init memory resource info for tqpair=%p\n", tqpair);
2654 		return -1;
2655 	}
2656 
2657 	tqpair->group = tgroup;
2658 	tqpair->state = NVME_TCP_QPAIR_STATE_INVALID;
2659 	TAILQ_INSERT_TAIL(&tgroup->qpairs, tqpair, link);
2660 
2661 	return 0;
2662 }
2663 
2664 static int
2665 nvmf_tcp_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
2666 			   struct spdk_nvmf_qpair *qpair)
2667 {
2668 	struct spdk_nvmf_tcp_poll_group	*tgroup;
2669 	struct spdk_nvmf_tcp_qpair		*tqpair;
2670 	int				rc;
2671 
2672 	tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
2673 	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
2674 
2675 	assert(tqpair->group == tgroup);
2676 
2677 	SPDK_DEBUGLOG(nvmf_tcp, "remove tqpair=%p from the tgroup=%p\n", tqpair, tgroup);
2678 	if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) {
2679 		TAILQ_REMOVE(&tgroup->await_req, tqpair, link);
2680 	} else {
2681 		TAILQ_REMOVE(&tgroup->qpairs, tqpair, link);
2682 	}
2683 
2684 	rc = spdk_sock_group_remove_sock(tgroup->sock_group, tqpair->sock);
2685 	if (rc != 0) {
2686 		SPDK_ERRLOG("Could not remove sock from sock_group: %s (%d)\n",
2687 			    spdk_strerror(errno), errno);
2688 	}
2689 
2690 	return rc;
2691 }
2692 
2693 static int
2694 nvmf_tcp_req_complete(struct spdk_nvmf_request *req)
2695 {
2696 	struct spdk_nvmf_tcp_transport *ttransport;
2697 	struct spdk_nvmf_tcp_req *tcp_req;
2698 
2699 	ttransport = SPDK_CONTAINEROF(req->qpair->transport, struct spdk_nvmf_tcp_transport, transport);
2700 	tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req);
2701 
2702 	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTED);
2703 	nvmf_tcp_req_process(ttransport, tcp_req);
2704 
2705 	return 0;
2706 }
2707 
2708 static void
2709 nvmf_tcp_close_qpair(struct spdk_nvmf_qpair *qpair,
2710 		     spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
2711 {
2712 	struct spdk_nvmf_tcp_qpair *tqpair;
2713 
2714 	SPDK_DEBUGLOG(nvmf_tcp, "Qpair: %p\n", qpair);
2715 
2716 	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
2717 	tqpair->state = NVME_TCP_QPAIR_STATE_EXITED;
2718 	nvmf_tcp_qpair_destroy(tqpair);
2719 
2720 	if (cb_fn) {
2721 		cb_fn(cb_arg);
2722 	}
2723 }
2724 
2725 static int
2726 nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2727 {
2728 	struct spdk_nvmf_tcp_poll_group *tgroup;
2729 	int rc;
2730 	struct spdk_nvmf_request *req, *req_tmp;
2731 	struct spdk_nvmf_tcp_req *tcp_req;
2732 	struct spdk_nvmf_tcp_qpair *tqpair, *tqpair_tmp;
2733 	struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(group->transport,
2734 			struct spdk_nvmf_tcp_transport, transport);
2735 
2736 	tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
2737 
2738 	if (spdk_unlikely(TAILQ_EMPTY(&tgroup->qpairs) && TAILQ_EMPTY(&tgroup->await_req))) {
2739 		return 0;
2740 	}
2741 
2742 	STAILQ_FOREACH_SAFE(req, &group->pending_buf_queue, buf_link, req_tmp) {
2743 		tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req);
2744 		if (nvmf_tcp_req_process(ttransport, tcp_req) == false) {
2745 			break;
2746 		}
2747 	}
2748 
2749 	rc = spdk_sock_group_poll(tgroup->sock_group);
2750 	if (rc < 0) {
2751 		SPDK_ERRLOG("Failed to poll sock_group=%p\n", tgroup->sock_group);
2752 	}
2753 
2754 	TAILQ_FOREACH_SAFE(tqpair, &tgroup->await_req, link, tqpair_tmp) {
2755 		nvmf_tcp_sock_process(tqpair);
2756 	}
2757 
2758 	return rc;
2759 }
2760 
2761 static int
2762 nvmf_tcp_qpair_get_trid(struct spdk_nvmf_qpair *qpair,
2763 			struct spdk_nvme_transport_id *trid, bool peer)
2764 {
2765 	struct spdk_nvmf_tcp_qpair     *tqpair;
2766 	uint16_t			port;
2767 
2768 	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
2769 	spdk_nvme_trid_populate_transport(trid, SPDK_NVME_TRANSPORT_TCP);
2770 
2771 	if (peer) {
2772 		snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->initiator_addr);
2773 		port = tqpair->initiator_port;
2774 	} else {
2775 		snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->target_addr);
2776 		port = tqpair->target_port;
2777 	}
2778 
2779 	if (spdk_sock_is_ipv4(tqpair->sock)) {
2780 		trid->adrfam = SPDK_NVMF_ADRFAM_IPV4;
2781 	} else if (spdk_sock_is_ipv6(tqpair->sock)) {
2782 		trid->adrfam = SPDK_NVMF_ADRFAM_IPV6;
2783 	} else {
2784 		return -1;
2785 	}
2786 
2787 	snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%d", port);
2788 	return 0;
2789 }
2790 
2791 static int
2792 nvmf_tcp_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2793 			      struct spdk_nvme_transport_id *trid)
2794 {
2795 	return nvmf_tcp_qpair_get_trid(qpair, trid, 0);
2796 }
2797 
2798 static int
2799 nvmf_tcp_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2800 			     struct spdk_nvme_transport_id *trid)
2801 {
2802 	return nvmf_tcp_qpair_get_trid(qpair, trid, 1);
2803 }
2804 
2805 static int
2806 nvmf_tcp_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2807 			       struct spdk_nvme_transport_id *trid)
2808 {
2809 	return nvmf_tcp_qpair_get_trid(qpair, trid, 0);
2810 }
2811 
2812 static void
2813 nvmf_tcp_req_set_abort_status(struct spdk_nvmf_request *req,
2814 			      struct spdk_nvmf_tcp_req *tcp_req_to_abort)
2815 {
2816 	tcp_req_to_abort->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2817 	tcp_req_to_abort->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
2818 
2819 	nvmf_tcp_req_set_state(tcp_req_to_abort, TCP_REQUEST_STATE_READY_TO_COMPLETE);
2820 
2821 	req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command was successfully aborted. */
2822 }
2823 
2824 static int
2825 _nvmf_tcp_qpair_abort_request(void *ctx)
2826 {
2827 	struct spdk_nvmf_request *req = ctx;
2828 	struct spdk_nvmf_tcp_req *tcp_req_to_abort = SPDK_CONTAINEROF(req->req_to_abort,
2829 			struct spdk_nvmf_tcp_req, req);
2830 	struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(req->req_to_abort->qpair,
2831 					     struct spdk_nvmf_tcp_qpair, qpair);
2832 	int rc;
2833 
2834 	spdk_poller_unregister(&req->poller);
2835 
2836 	switch (tcp_req_to_abort->state) {
2837 	case TCP_REQUEST_STATE_EXECUTING:
2838 		rc = nvmf_ctrlr_abort_request(req);
2839 		if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS) {
2840 			return SPDK_POLLER_BUSY;
2841 		}
2842 		break;
2843 
2844 	case TCP_REQUEST_STATE_NEED_BUFFER:
2845 		STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue,
2846 			      &tcp_req_to_abort->req, spdk_nvmf_request, buf_link);
2847 
2848 		nvmf_tcp_req_set_abort_status(req, tcp_req_to_abort);
2849 		break;
2850 
2851 	case TCP_REQUEST_STATE_AWAITING_R2T_ACK:
2852 		nvmf_tcp_req_set_abort_status(req, tcp_req_to_abort);
2853 		break;
2854 
2855 	case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
2856 		if (spdk_get_ticks() < req->timeout_tsc) {
2857 			req->poller = SPDK_POLLER_REGISTER(_nvmf_tcp_qpair_abort_request, req, 0);
2858 			return SPDK_POLLER_BUSY;
2859 		}
2860 		break;
2861 
2862 	default:
2863 		break;
2864 	}
2865 
2866 	spdk_nvmf_request_complete(req);
2867 	return SPDK_POLLER_BUSY;
2868 }
2869 
2870 static void
2871 nvmf_tcp_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2872 			     struct spdk_nvmf_request *req)
2873 {
2874 	struct spdk_nvmf_tcp_qpair *tqpair;
2875 	struct spdk_nvmf_tcp_transport *ttransport;
2876 	struct spdk_nvmf_transport *transport;
2877 	uint16_t cid;
2878 	uint32_t i;
2879 	struct spdk_nvmf_tcp_req *tcp_req_to_abort = NULL;
2880 
2881 	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
2882 	ttransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_tcp_transport, transport);
2883 	transport = &ttransport->transport;
2884 
2885 	cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;
2886 
2887 	for (i = 0; i < tqpair->resource_count; i++) {
2888 		if (tqpair->reqs[i].state != TCP_REQUEST_STATE_FREE &&
2889 		    tqpair->reqs[i].req.cmd->nvme_cmd.cid == cid) {
2890 			tcp_req_to_abort = &tqpair->reqs[i];
2891 			break;
2892 		}
2893 	}
2894 
2895 	if (tcp_req_to_abort == NULL) {
2896 		spdk_nvmf_request_complete(req);
2897 		return;
2898 	}
2899 
2900 	req->req_to_abort = &tcp_req_to_abort->req;
2901 	req->timeout_tsc = spdk_get_ticks() +
2902 			   transport->opts.abort_timeout_sec * spdk_get_ticks_hz();
2903 	req->poller = NULL;
2904 
2905 	_nvmf_tcp_qpair_abort_request(req);
2906 }
2907 
2908 #define SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH 128
2909 #define SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH 128
2910 #define SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR 128
2911 #define SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
2912 #define SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE 131072
2913 #define SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE 131072
2914 #define SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS 511
2915 #define SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE 32
2916 #define SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP false
2917 #define SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC 1
2918 
2919 static void
2920 nvmf_tcp_opts_init(struct spdk_nvmf_transport_opts *opts)
2921 {
2922 	opts->max_queue_depth =		SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH;
2923 	opts->max_qpairs_per_ctrlr =	SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR;
2924 	opts->in_capsule_data_size =	SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE;
2925 	opts->max_io_size =		SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE;
2926 	opts->io_unit_size =		SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE;
2927 	opts->max_aq_depth =		SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH;
2928 	opts->num_shared_buffers =	SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS;
2929 	opts->buf_cache_size =		SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE;
2930 	opts->dif_insert_or_strip =	SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP;
2931 	opts->abort_timeout_sec =	SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC;
2932 	opts->transport_specific =      NULL;
2933 }
2934 
2935 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
2936 	.name = "TCP",
2937 	.type = SPDK_NVME_TRANSPORT_TCP,
2938 	.opts_init = nvmf_tcp_opts_init,
2939 	.create = nvmf_tcp_create,
2940 	.dump_opts = nvmf_tcp_dump_opts,
2941 	.destroy = nvmf_tcp_destroy,
2942 
2943 	.listen = nvmf_tcp_listen,
2944 	.stop_listen = nvmf_tcp_stop_listen,
2945 	.accept = nvmf_tcp_accept,
2946 
2947 	.listener_discover = nvmf_tcp_discover,
2948 
2949 	.poll_group_create = nvmf_tcp_poll_group_create,
2950 	.get_optimal_poll_group = nvmf_tcp_get_optimal_poll_group,
2951 	.poll_group_destroy = nvmf_tcp_poll_group_destroy,
2952 	.poll_group_add = nvmf_tcp_poll_group_add,
2953 	.poll_group_remove = nvmf_tcp_poll_group_remove,
2954 	.poll_group_poll = nvmf_tcp_poll_group_poll,
2955 
2956 	.req_free = nvmf_tcp_req_free,
2957 	.req_complete = nvmf_tcp_req_complete,
2958 
2959 	.qpair_fini = nvmf_tcp_close_qpair,
2960 	.qpair_get_local_trid = nvmf_tcp_qpair_get_local_trid,
2961 	.qpair_get_peer_trid = nvmf_tcp_qpair_get_peer_trid,
2962 	.qpair_get_listen_trid = nvmf_tcp_qpair_get_listen_trid,
2963 	.qpair_abort_request = nvmf_tcp_qpair_abort_request,
2964 };
2965 
2966 SPDK_NVMF_TRANSPORT_REGISTER(tcp, &spdk_nvmf_transport_tcp);
2967 SPDK_LOG_REGISTER_COMPONENT(nvmf_tcp)
2968